ID
int64 0
2.65k
| Language
stringclasses 1
value | Repository Name
stringclasses 21
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 10
111
⌀ | File Path for Unit Test
stringlengths 16
116
⌀ | Code
stringlengths 66
1.91M
| Unit Test - (Ground Truth)
stringlengths 40
32.1k
⌀ |
---|---|---|---|---|---|---|---|
1,200 | cpp | tensorflow/tensorflow | func | third_party/xla/xla/mlir/tools/mlir_bisect/rewrites/func.cc | tensorflow/compiler/mlir/quantization/common/func_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_COMMON_FUNC_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_COMMON_FUNC_H_
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
namespace mlir::quant {
func::FuncOp FindMainFuncOp(ModuleOp module_op);
}
#endif
#include <dlfcn.h>
#include <tuple>
#include <type_traits>
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/LLVM.h"
#include "xla/mlir/tools/mlir_interpreter/dialects/util.h"
#include "xla/mlir/tools/mlir_interpreter/framework/interpreter.h"
#include "xla/mlir/tools/mlir_interpreter/framework/interpreter_value.h"
#include "xla/mlir/tools/mlir_interpreter/framework/registration.h"
namespace mlir {
namespace interpreter {
namespace {
template <typename T>
bool TypeMatches(mlir::Type type) {
if constexpr (std::is_same_v<T, float>) {
return type.isF32();
} else if constexpr (std::is_same_v<T, double>) {
return type.isF64();
} else {
return false;
}
}
template <typename Dummy>
bool TypesMatch(ArrayRef<mlir::Type> types) {
return types.empty();
}
template <typename Dummy, typename T, typename... R>
bool TypesMatch(ArrayRef<mlir::Type> types) {
if (types.empty() || !TypeMatches<T>(types.front())) return false;
return TypesMatch<Dummy, R...>(types.drop_front());
}
template <int n, typename... Args>
using Arg = std::tuple_element_t<n, std::tuple<Args...>>;
template <typename Ret, typename... Args>
bool TryCall(void* sym, func::FuncOp callee,
MutableArrayRef<InterpreterValue> args, InterpreterValue& ret) {
if (args.size() != callee.getNumArguments() || callee.getNumResults() != 1) {
return false;
}
if (!TypeMatches<Ret>(callee.getResultTypes()[0])) {
return false;
}
if (!TypesMatch<void, Args...>(callee.getArgumentTypes())) {
return false;
}
static_assert(sizeof...(Args) <= 2);
using FnType = Ret (*)(Args...);
auto fn = reinterpret_cast<FnType>(sym);
constexpr int n = sizeof...(Args);
if constexpr (n == 1) {
ret = {fn(std::get<Arg<0, Args...>>(args[0].storage))};
} else {
static_assert(n == 2);
ret = {fn(std::get<Arg<0, Args...>>(args[0].storage),
std::get<Arg<1, Args...>>(args[1].storage))};
}
return true;
}
llvm::SmallVector<InterpreterValue> Call(MutableArrayRef<InterpreterValue> args,
mlir::Operation* op,
InterpreterState& state) {
auto call = llvm::cast<func::CallOp>(op);
auto callee =
llvm::cast<func::FuncOp>(state.GetSymbols().lookup(call.getCallee()));
if (callee->getRegion(0).hasOneBlock()) {
return Interpret(state, callee.getRegion(), args);
}
void* sym = dlsym(RTLD_DEFAULT, callee.getSymName().str().c_str());
if (sym == nullptr) {
state.AddFailure("callee not found");
return {};
}
InterpreterValue result;
if (TryCall<float, float>(sym, callee, args, result) ||
TryCall<float, float, float>(sym, callee, args, result) ||
TryCall<double, double>(sym, callee, args, result) ||
TryCall<double, double, double>(sym, callee, args, result)) {
return {result};
}
state.AddFailure("unsupported call target");
return {};
}
REGISTER_MLIR_INTERPRETER_OP("func.call", Call);
REGISTER_MLIR_INTERPRETER_OP("func.return", NoOpTerminator);
}
}
} | #include "tensorflow/compiler/mlir/quantization/common/func.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
namespace mlir::quant {
namespace {
using ::testing::IsNull;
using ::testing::NotNull;
using FindMainFuncOpTest = ::mlir::quant::QuantizationTestBase;
TEST_F(FindMainFuncOpTest, ReturnsMainFuncOp) {
constexpr absl::string_view kModuleWithMainFunc = R"mlir(
module {
func.func @main() -> () {
return
}
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleWithMainFunc);
EXPECT_THAT(*module_op, NotNull());
func::FuncOp main_func_op = FindMainFuncOp(*module_op);
EXPECT_THAT(main_func_op, NotNull());
}
TEST_F(FindMainFuncOpTest, ReturnsNullWhenMainFuncOpIsPrivate) {
constexpr absl::string_view kModuleWithPrivateMainFunc = R"mlir(
module {
func.func private @main() -> () {
return
}
}
)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithPrivateMainFunc);
EXPECT_THAT(*module_op, NotNull());
EXPECT_THAT(FindMainFuncOp(*module_op), IsNull());
}
TEST_F(FindMainFuncOpTest, ReturnsServingDefaultFuncOp) {
constexpr absl::string_view kModuleWithServingDefaultFunc = R"mlir(
module {
func.func @serving_default() -> () {
return
}
}
)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithServingDefaultFunc);
EXPECT_THAT(*module_op, NotNull());
EXPECT_THAT(FindMainFuncOp(*module_op), NotNull());
}
TEST_F(FindMainFuncOpTest, ReturnsNullWhenServingDefaultFuncOpIsPrivate) {
constexpr absl::string_view kModuleWithPrivateServingDefaultFunc = R"mlir(
module {
func.func private @serving_default() -> () {
return
}
}
)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithPrivateServingDefaultFunc);
EXPECT_THAT(*module_op, NotNull());
EXPECT_THAT(FindMainFuncOp(*module_op), IsNull());
}
TEST_F(FindMainFuncOpTest, ReturnsNullWhenMainFuncNotFound) {
constexpr absl::string_view kModuleWithNoMainFunc = R"mlir(
module {
func.func @foo() -> () {
return
}
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleWithNoMainFunc);
EXPECT_THAT(*module_op, NotNull());
EXPECT_THAT(FindMainFuncOp(*module_op), IsNull());
}
}
} |
1,201 | cpp | tensorflow/tensorflow | lift_as_function_call | tensorflow/compiler/mlir/quantization/common/lift_as_function_call.cc | tensorflow/compiler/mlir/quantization/common/lift_as_function_call_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_COMMON_LIFT_AS_FUNCTION_CALL_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_COMMON_LIFT_AS_FUNCTION_CALL_H_
#include "absl/base/nullability.h"
#include "absl/status/statusor.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Bytecode/BytecodeOpInterface.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace mlir::quant {
inline constexpr StringRef kFusedFunctionAttr = "tf_quant.composite_function";
inline constexpr StringRef kNullAttributeValue = "N/A";
constexpr StringRef kQuantizedFuncPrefix = "quantized_";
constexpr StringRef kCompositeFuncPrefix = "composite_";
inline constexpr StringRef kOriginalStablehloEntryFunctionAttrName =
"_original_entry_function";
enum FunctionCallOpType { TFPartitionedCallOp = 0, TFXlaCallModuleOp = 1 };
bool IsInLiftedFunc(Operation* op);
bool IsInStableHloOpRegion(Operation* op);
bool IsEinsumSupportedByXlaDotV2(StringAttr equation_attr);
absl::StatusOr<::stablehlo::quantization::Method> GetQuantizationMethod(
absl::Nonnull<Operation*> op);
::stablehlo::quantization::Method GetQuantizationMethodOrDefault(
absl::Nonnull<Operation*> op);
SmallVector<Value, 4> LiftAsFunctionCall(OpBuilder& builder, Location location,
FunctionCallOpType call_op_type,
StringRef func_name,
ArrayRef<Value> arguments,
ArrayRef<Value> results,
ArrayRef<NamedAttribute> attributes);
SmallVector<Value, 4> LiftAsFunctionCall(OpBuilder& builder, Location location,
FunctionCallOpType call_op_type,
StringRef func_name,
ArrayRef<Value> arguments,
ArrayRef<Value> results);
SmallVector<Value> AppendToVector(ArrayRef<Value> arguments, Value append);
bool HasWeightOnlyPtqMethod(TF::XlaCallModuleOp xla_call_module_op);
bool IsWeightOnlyQuantizableOp(const Operation& op);
SmallVector<func::FuncOp> GetSortedFunctions(ModuleOp module_op);
}
#endif
#include "tensorflow/compiler/mlir/quantization/common/lift_as_function_call.h"
#include <algorithm>
#include <cstdint>
#include <optional>
#include <queue>
#include <stack>
#include <string>
#include "absl/algorithm/container.h"
#include "absl/base/nullability.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Sequence.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/ErrorHandling.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/IR/TypeRange.h"
#include "mlir/IR/Value.h"
#include "mlir/IR/ValueRange.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/utils/stablehlo_type_utils.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/cc/quantization_unit_loc.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/xla_call_module_attrs.h"
#include "tensorflow/core/ir/types/dialect.h"
#include "tensorflow/core/platform/mutex.h"
#include "tsl/platform/protobuf.h"
namespace mlir::quant {
using ::stablehlo::quantization::Method;
using ::tsl::protobuf::TextFormat;
constexpr int64_t kDefaultVersion = 9;
constexpr StringRef kPlatformCpu = "CPU";
constexpr StringRef kStablehloModuleAttrsAttrName = "_stablehlo_module_attrs";
constexpr StringRef kUsesShapePolymorphismAttr = "jax.uses_shape_polymorphism";
bool IsInLiftedFunc(Operation* op) {
if (op == nullptr) return false;
return op->getParentOfType<func::FuncOp>()->hasAttr(kFusedFunctionAttr);
}
bool IsInStableHloOpRegion(Operation* op) {
if (op == nullptr) return false;
auto parent_op = op->getParentOp();
return parent_op != nullptr && stablehlo::IsStablehloOp(parent_op);
}
StringAttr InsertToSymbolTable(Operation& module, Operation& function,
const StringRef func_name) {
static tensorflow::mutex* mtx = new tensorflow::mutex();
tensorflow::mutex_lock lock(*mtx);
SymbolTable symbol_table(&module);
std::string unique_name = func_name.str();
int32_t uniquing_counter = 0;
while (symbol_table.lookup(unique_name) != nullptr) {
++uniquing_counter;
unique_name = absl::StrCat(func_name.str(), "_", uniquing_counter);
}
function.setAttr("sym_name",
StringAttr::get(module.getContext(), unique_name));
return symbol_table.insert(&function);
}
ValueRange CreateTFPartitionedCallOp(OpBuilder& builder,
const Location location,
const StringRef func_name,
const TypeRange output_types,
const ValueRange args) {
TF::PartitionedCallOp call_op = builder.create<TF::PartitionedCallOp>(
location, output_types, args,
FlatSymbolRefAttr::get(builder.getStringAttr(func_name)),
"", "", "");
call_op->setAttr(
kQuantTraitAttrName,
builder.getStringAttr(StringRef(
std::string(QuantTraitValues[QuantizationTrait::FullyQuantizable]))));
return call_op.getOutput();
}
ValueRange CreateTFXlaCallModuleOp(OpBuilder& builder, const Location location,
const StringRef func_name,
const TypeRange output_types,
const ValueRange args) {
MLIRContext* ctx = builder.getContext();
SmallVector<Attribute> shape_attrs;
for (const Type result_type : output_types) {
shape_attrs.push_back(
tf_type::ShapeAttr::get(ctx, mlir::cast<ShapedType>(result_type)));
}
auto empty_array_attr = ArrayAttr::get(ctx, {});
auto platforms = ArrayAttr::get(ctx, {StringAttr::get(ctx, kPlatformCpu)});
auto call_op = builder.create<TF::XlaCallModuleOp>(
location,
output_types,
args,
kDefaultVersion, "",
ArrayAttr::get(ctx, shape_attrs),
empty_array_attr,
platforms,
empty_array_attr,
false,
empty_array_attr);
call_op->setAttr(TF::kStablehloEntryFunctionAttrName,
FlatSymbolRefAttr::get(builder.getStringAttr(func_name)));
call_op->setAttr(kOriginalStablehloEntryFunctionAttrName,
builder.getStringAttr(func_name));
call_op->setAttr(
kQuantTraitAttrName,
builder.getStringAttr(StringRef(
std::string(QuantTraitValues[QuantizationTrait::FullyQuantizable]))));
call_op->setAttr(kStablehloModuleAttrsAttrName,
builder.getDictionaryAttr(builder.getNamedAttr(
kUsesShapePolymorphismAttr, builder.getBoolAttr(true))));
return call_op.getOutput();
}
ValueRange CreateFunctionCallOp(OpBuilder& builder, const Location location,
const FunctionCallOpType call_op_type,
const StringRef func_name,
const TypeRange output_types,
const ValueRange args) {
switch (call_op_type) {
case FunctionCallOpType::TFXlaCallModuleOp:
return CreateTFXlaCallModuleOp(builder, location, func_name, output_types,
args);
case FunctionCallOpType::TFPartitionedCallOp:
return CreateTFPartitionedCallOp(builder, location, func_name,
output_types, args);
}
}
SmallVector<Operation*> FindOpsFromArgumentsToResults(
const ArrayRef<Value> arguments, const ArrayRef<Value> results) {
std::queue<Value> value_queue;
for (Value result : results) {
value_queue.push(result);
}
absl::flat_hash_set<mlir::detail::ValueImpl*> argument_set;
for (Value argument : arguments) {
argument_set.insert(argument.getImpl());
}
std::stack<Operation*> op_stack;
while (!value_queue.empty()) {
Value current_value = value_queue.front();
value_queue.pop();
Operation* defining_node = current_value.getDefiningOp();
if (defining_node == nullptr) continue;
op_stack.push(defining_node);
for (Value arg : defining_node->getOperands()) {
if (!argument_set.contains(arg.getImpl())) {
value_queue.push(arg);
}
}
}
SmallVector<Operation*> sorted_ops;
absl::flat_hash_set<Operation*> unique_ops;
while (!op_stack.empty()) {
Operation* current_op = op_stack.top();
op_stack.pop();
if (unique_ops.contains(current_op)) continue;
sorted_ops.push_back(current_op);
unique_ops.insert(current_op);
}
return sorted_ops;
}
LogicalResult SetAttributeMap(MLIRContext& context,
const ArrayRef<NamedAttribute> attributes,
const ArrayRef<Operation*> ops) {
llvm::SmallDenseMap<NamedAttribute, Operation*> attr_to_op_map;
for (Operation* op : ops) {
for (const NamedAttribute named_attr : op->getAttrs()) {
attr_to_op_map.insert({named_attr, op});
}
}
for (int idx : llvm::seq<int>(0, attributes.size())) {
const NamedAttribute& attribute = attributes[idx];
if (const auto string_attr =
mlir::dyn_cast_or_null<StringAttr>(attribute.getValue());
string_attr != nullptr &&
string_attr.getValue() == kNullAttributeValue) {
continue;
}
if (std::find_if(
attr_to_op_map.begin(), attr_to_op_map.end(), [&](auto attr_op) {
return std::get<0>(attr_op).getName() == attribute.getName();
}) == attr_to_op_map.end()) {
emitError(UnknownLoc::get(&context),
"Could not find attribute: " + attribute.getName().str());
return failure();
}
Operation* owner_op;
for (const auto& [attr, val] : attr_to_op_map) {
if (attr.getName() == attribute.getName()) owner_op = val;
}
if (stablehlo::IsStablehloOp(owner_op)) {
owner_op->setAttr(StringRef(attribute.getName()), attribute.getValue());
} else {
owner_op = attr_to_op_map[attribute];
std::string new_attr_map_str{};
if (owner_op->hasAttr(kAttrMapAttribute)) {
new_attr_map_str =
owner_op->getAttrOfType<StringAttr>(kAttrMapAttribute).str();
absl::StrAppend(&new_attr_map_str, ",");
}
const std::string identifier = std::to_string(idx);
const StringAttr attribute_name = attribute.getName();
absl::StrAppend(&new_attr_map_str, identifier, ":", attribute_name.str());
owner_op->setAttr(kAttrMapAttribute,
StringAttr::get(&context, new_attr_map_str));
}
}
return success();
}
SmallVector<Value, 4> LiftAsFunctionCall(
OpBuilder& builder, const Location location,
const FunctionCallOpType call_op_type, const StringRef func_name,
const ArrayRef<Value> arguments, const ArrayRef<Value> results,
const ArrayRef<NamedAttribute> attributes) {
MLIRContext* context = builder.getContext();
if (results.empty()) {
emitError(UnknownLoc::get(context), "No result values specified");
return {};
}
Operation* result_op = results[0].getDefiningOp();
auto module = result_op->getParentOfType<ModuleOp>();
auto current_func = result_op->getParentOfType<func::FuncOp>();
auto guard = OpBuilder::InsertionGuard(builder);
builder.setInsertionPointAfter(current_func);
TypeRange arg_types{ValueRange{arguments}};
TypeRange result_types{ValueRange{results}};
auto func_type = FunctionType::get(context, arg_types, result_types);
SmallVector<Location> arg_locs;
for (Value arg : arguments) {
arg_locs.push_back(arg.getLoc());
}
auto wrap_func = builder.create<func::FuncOp>(location, func_name, func_type);
wrap_func.setVisibility(SymbolTable::Visibility::Private);
if (call_op_type == FunctionCallOpType::TFXlaCallModuleOp) {
wrap_func->setAttr(TF::kFromXlaCallModuleAttrName, builder.getUnitAttr());
}
wrap_func->setAttr(kFusedFunctionAttr, builder.getUnitAttr());
builder.createBlock(&wrap_func.getBody(), wrap_func.begin(), arg_types,
arg_locs);
IRMapping mapping;
for (int32_t i : llvm::seq<int32_t>(0, arguments.size())) {
mapping.map(arguments[i], wrap_func.getArgument(i));
}
auto cloning_ops = FindOpsFromArgumentsToResults(arguments, results);
Location call_op_loc = location;
for (Operation* op : cloning_ops) {
std::optional<QuantizationUnitLoc::QuantizationUnit> unit =
FindQuantizationUnitFromLoc(op->getLoc());
if (unit.has_value()) {
call_op_loc = QuantizationUnitLoc(builder.getContext(), unit.value());
}
}
if (failed(SetAttributeMap(*context, attributes, cloning_ops))) {
current_func.emitError() << "Some attributes couldn't be found.";
}
for (Operation* op : cloning_ops) {
builder.clone(*op, mapping);
}
SmallVector<Value> return_values;
for (Value result : results) {
return_values.push_back(mapping.lookupOrNull(result));
}
builder.create<func::ReturnOp>(location, return_values);
StringAttr new_func_name =
InsertToSymbolTable(*module, *wrap_func, func_name);
builder.setInsertionPointAfter(result_op);
ValueRange new_results =
CreateFunctionCallOp(builder, call_op_loc, call_op_type,
new_func_name.getValue(), result_types, arguments);
return SmallVector<Value, 4>(new_results.begin(), new_results.end());
}
SmallVector<Value, 4> LiftAsFunctionCall(OpBuilder& builder,
const Location location,
const FunctionCallOpType call_op_type,
const StringRef func_name,
const ArrayRef<Value> arguments,
const ArrayRef<Value> results) {
SmallVector<NamedAttribute> attributes;
return LiftAsFunctionCall(builder, location, call_op_type, func_name,
arguments, results, attributes);
}
SmallVector<Value> AppendToVector(const ArrayRef<Value> arguments,
Value append) {
SmallVector<Value> ret(arguments);
ret.push_back(append);
return ret;
}
bool IsEinsumSupportedByXlaDotV2(StringAttr equation_attr) {
StringRef equation = equation_attr.getValue();
if (!absl::StrContains(equation, "->") || !absl::StrContains(equation, ",") ||
absl::StrContains(equation, ".")) {
return false;
}
int idx_arrow = equation.find("->");
StringRef calc_eq = equation.substr(0, idx_arrow);
StringRef out_eq = equation.substr(idx_arrow + 2);
int idx_comma = calc_eq.find(',');
StringRef lhs_eq = calc_eq.substr(0, idx_comma);
StringRef rhs_eq = calc_eq.substr(idx_comma + 1);
if (absl::StrContains(rhs_eq, ",")) return false;
int lhs_out_idx_start = out_eq.size();
int lhs_out_idx_end = -1;
int rhs_out_idx_start = out_eq.size();
int rhs_out_idx_end = -1;
int lhs_batch_dim_size = 0;
int rhs_batch_dim_size = 0;
for (const char c : lhs_eq) {
if (absl::StrContains(out_eq, c) && absl::StrContains(rhs_eq, c)) {
lhs_batch_dim_size++;
} else if (absl::StrContains(out_eq, c)) {
const int out_idx = out_eq.find(c);
if (out_idx < lhs_out_idx_end) {
return false;
}
lhs_out_idx_start = std::min(lhs_out_idx_start, out_idx);
lhs_out_idx_end = std::max(lhs_out_idx_end, out_idx);
}
}
for (const char c : rhs_eq) {
if (absl::StrContains(out_eq, c) && absl::StrContains(lhs_eq, c)) {
rhs_batch_dim_size++;
} else if (absl::StrContains(out_eq, c)) {
int out_idx = out_eq.find(c);
if (out_idx < rhs_out_idx_end) {
return false;
}
if (out_idx < rhs_out_idx_start) rhs_out_idx_start = out_idx;
if (out_idx > rhs_out_idx_end) rhs_out_idx_end = out_idx;
}
}
if (lhs_batch_dim_size != rhs_batch_dim_size && lhs_batch_dim_size != 0 &&
rhs_batch_dim_size != 0) {
return false;
}
if (lhs_out_idx_end > rhs_out_idx_start) return false;
int batch_dim_size = std::max(rhs_batch_dim_size, lhs_batch_dim_size);
return lhs_out_idx_start >= batch_dim_size &&
rhs_out_idx_start >= batch_dim_size;
}
absl::StatusOr<Method> GetQuantizationMethod(absl::Nonnull<Operation*> op) {
const auto quantization_method_attr =
op->getAttrOfType<StringAttr>(kQuantizationMethodAttr);
if (!quantization_method_attr) {
return absl::InvalidArgumentError(absl::StrCat(
"Attribute ", kQuantizationMethodAttr.str(), " is not found."));
}
Method quantization_method;
const std::string method_txtpb = quantization_method_attr.getValue().str();
if (!TextFormat::ParseFromString(method_txtpb, &quantization_method)) {
return absl::InternalError(
absl::StrCat("Failed to parse Method from textproto: ", method_txtpb));
}
return quantization_method;
}
Method GetQuantizationMethodOrDefault(absl::Nonnull<Operation*> op) {
absl::StatusOr<Method> method = GetQuantizationMethod(op);
if (method.status().code() == absl::StatusCode::kInternal) {
op->emitError(absl::StrCat("Failed to get quantization method: ",
method.status().ToString()));
}
return method.ok() ? *method : Method::default_instance();
}
bool HasWeightOnlyPtqMethod(TF::XlaCallModuleOp xla_call_module_op) {
Method method = GetQuantizationMethodOrDefault(xla_call_module_op);
return method.has_weight_only_ptq();
}
bool IsWeightOnlyQuantizableOp(const Operation& op) {
if (auto call_op = dyn_cast<TF::XlaCallModuleOp>(op)) {
StringRef entry_function_name = GetEntryFunctionName(call_op);
absl::StatusOr<Method> quantization_method = GetQuantizationMethod(call_op);
return ContainsConvOrDot(entry_function_name) && quantization_method.ok() &&
quantization_method->has_weight_only_ptq();
}
return false;
}
SmallVector<func::FuncOp> GetSortedFunctions(ModuleOp module_op) {
auto iterator_range = module_op.getOps<func::FuncOp>();
SmallVector<func::FuncOp> func_ops(iterator_range.begin(),
iterator_range.end());
absl::c_sort(func_ops, [](func::FuncOp op1, func::FuncOp op2) {
return op1.getName() < op2.getName();
});
return func_ops;
}
} | #include "tensorflow/compiler/mlir/quantization/common/lift_as_function_call.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include "tensorflow/compiler/mlir/quantization/common/func.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status_matchers.h"
namespace mlir::quant {
namespace {
using ::stablehlo::quantization::Method;
using ::testing::HasSubstr;
using ::testing::NotNull;
using ::testing::SizeIs;
using ::testing::StrEq;
using ::tsl::protobuf::util::MessageDifferencer;
using ::tsl::testing::IsOk;
using ::tsl::testing::StatusIs;
using LiftAsFunctionCallTest = QuantizationTestBase;
constexpr absl::string_view kModuleLifted = R"mlir(
module {
func.func private @composite_dot_general_fn_1(%arg0: tensor<1x1024xf32>, %arg1: tensor<1024x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module, tf_quant.composite_function} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0], precision = [DEFAULT, DEFAULT] : (tensor<1x1024xf32>, tensor<1024x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
}
)mlir";
TEST_F(LiftAsFunctionCallTest, LiftedFunctionSucceeds) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleLifted);
ASSERT_TRUE(module_op);
auto composite_dot_general_fn =
module_op->lookupSymbol<func::FuncOp>("composite_dot_general_fn_1");
ASSERT_THAT(composite_dot_general_fn, NotNull());
auto dot_general_op = FindOperationOfType<mlir::stablehlo::DotGeneralOp>(
composite_dot_general_fn);
EXPECT_TRUE(IsInLiftedFunc(dot_general_op));
}
constexpr absl::string_view kModuleStableHlo = R"mlir(
module {
func.func @main(%arg0: tensor<1x1024xf32>, %arg1: tensor<1024x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0], precision = [] : (tensor<1x1024xf32>, tensor<1024x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
}
)mlir";
TEST_F(LiftAsFunctionCallTest, FunctionLiftedAsXlaCallModuleOp) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleStableHlo);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto dot_general_op =
FindOperationOfType<mlir::stablehlo::DotGeneralOp>(main_fn);
const SmallVector<NamedAttribute>& attributes = {
builder_.getNamedAttr(
"precision_config",
builder_.getArrayAttr(SmallVector<Attribute>(
1, mlir::stablehlo::PrecisionAttr::get(
ctx_.get(), mlir::stablehlo::Precision::DEFAULT)))),
};
const SmallVector<Value> operands(dot_general_op->getOperands());
const SmallVector<Value> results(dot_general_op->getResults());
Operation* lifted_op =
LiftAsFunctionCall(builder_, dot_general_op->getLoc(),
FunctionCallOpType::TFXlaCallModuleOp,
"composite_dot_general_fn", operands, results,
attributes)[0]
.getDefiningOp();
const auto entry_function_symbol_ref =
lifted_op->getAttrOfType<FlatSymbolRefAttr>("_entry_function");
SymbolTable symbol_table(*module_op);
auto entry_func = dyn_cast_or_null<func::FuncOp>(
symbol_table.lookup(entry_function_symbol_ref.getValue()));
auto lifted_dot_general_op =
FindOperationOfType<mlir::stablehlo::DotGeneralOp>(entry_func);
EXPECT_TRUE(isa<TF::XlaCallModuleOp>(lifted_op));
EXPECT_EQ(
mlir::cast<StringAttr>(lifted_op->getAttr("_original_entry_function")),
"composite_dot_general_fn_1");
EXPECT_EQ(
mlir::cast<ArrayAttr>(lifted_dot_general_op->getAttr("precision_config")),
builder_.getArrayAttr(SmallVector<Attribute>(
1, mlir::stablehlo::PrecisionAttr::get(
ctx_.get(), mlir::stablehlo::Precision::DEFAULT))));
}
TEST_F(LiftAsFunctionCallTest, FunctionNoAttrLiftedAsXlaCallModuleOp) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleStableHlo);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto dot_general_op =
FindOperationOfType<mlir::stablehlo::DotGeneralOp>(main_fn);
const SmallVector<Value> operands(dot_general_op->getOperands());
const SmallVector<Value> results(dot_general_op->getResults());
Operation* lifted_op =
LiftAsFunctionCall(builder_, dot_general_op->getLoc(),
FunctionCallOpType::TFXlaCallModuleOp,
"composite_dot_general_fn", operands, results)[0]
.getDefiningOp();
EXPECT_TRUE(isa<TF::XlaCallModuleOp>(lifted_op));
EXPECT_EQ(
mlir::cast<StringAttr>(lifted_op->getAttr("_original_entry_function")),
"composite_dot_general_fn_1");
}
TEST_F(LiftAsFunctionCallTest, EinsumSupportedForXlaDotV2Succeeds) {
StringAttr einsum_supported_by_xla_dot_v2_attr =
builder_.getStringAttr("ijk,ikm->ijm");
StringAttr einsum_one_operand = builder_.getStringAttr("ijk->ikj");
StringAttr einsum_ellipsis = builder_.getStringAttr("...gse->...gs");
EXPECT_TRUE(IsEinsumSupportedByXlaDotV2(einsum_supported_by_xla_dot_v2_attr));
EXPECT_FALSE(IsEinsumSupportedByXlaDotV2(einsum_one_operand));
EXPECT_FALSE(IsEinsumSupportedByXlaDotV2(einsum_ellipsis));
}
TEST_F(LiftAsFunctionCallTest, GetQuantizationMethodSucceeds) {
constexpr absl::string_view kXlaCallModuleOpWithQuantizationMethodAttr =
R"mlir(
func.func @main(%arg0: tensor<1x1x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x1x4xf32> {
%0 = "tf.XlaCallModule"(%arg0, %arg1) <{Sout = [#tf_type.shape<1x1x4>], dim_args_spec = [], disabled_checks = [], function_list = [], has_token_input_output = false, module = "", platforms = ["CPU"], version = 9 : i64}> {_entry_function = @composite_dot_general_fn_1, _quantization_method = "no_quantization {}", _stablehlo_module_attrs = {jax.uses_shape_polymorphism = true}} : (tensor<1x1x3xf32>, tensor<3x4xf32>) -> tensor<1x1x4xf32>
return %0 : tensor<1x1x4xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kXlaCallModuleOpWithQuantizationMethodAttr);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto xla_call_module_ops = main_fn.getOps<TF::XlaCallModuleOp>();
ASSERT_FALSE(xla_call_module_ops.empty());
const absl::StatusOr<Method> method =
GetQuantizationMethod(*xla_call_module_ops.begin());
ASSERT_THAT(method, IsOk());
EXPECT_TRUE(method->has_no_quantization());
}
TEST_F(LiftAsFunctionCallTest,
GetQuantizationMethodFailsWhenNoQuantizationMethodAttr) {
constexpr absl::string_view kXlaCallModuleOpWithNoQuantizationMethodAttr =
R"mlir(
func.func @main(%arg0: tensor<1x1x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x1x4xf32> {
%0 = "tf.XlaCallModule"(%arg0, %arg1) <{Sout = [#tf_type.shape<1x1x4>], dim_args_spec = [], disabled_checks = [], function_list = [], has_token_input_output = false, module = "", platforms = ["CPU"], version = 9 : i64}> {_entry_function = @composite_dot_general_fn_1, _stablehlo_module_attrs = {jax.uses_shape_polymorphism = true}} : (tensor<1x1x3xf32>, tensor<3x4xf32>) -> tensor<1x1x4xf32>
return %0 : tensor<1x1x4xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kXlaCallModuleOpWithNoQuantizationMethodAttr);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto xla_call_module_ops = main_fn.getOps<TF::XlaCallModuleOp>();
ASSERT_FALSE(xla_call_module_ops.empty());
const absl::StatusOr<Method> method =
GetQuantizationMethod(*xla_call_module_ops.begin());
EXPECT_THAT(
method,
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Attribute _quantization_method is not found")));
}
TEST_F(LiftAsFunctionCallTest,
GetQuantizationMethodFailsWhenMalformedQuantizationMethodAttr) {
constexpr absl::string_view kXlaCallModuleOpWithNoQuantizationMethodAttr =
R"mlir(
func.func @main(%arg0: tensor<1x1x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x1x4xf32> {
%0 = "tf.XlaCallModule"(%arg0, %arg1) <{Sout = [#tf_type.shape<1x1x4>], dim_args_spec = [], disabled_checks = [], function_list = [], has_token_input_output = false, module = "", platforms = ["CPU"], version = 9 : i64}> {_entry_function = @composite_dot_general_fn_1, _quantization_method = "invalid_field: 123", _stablehlo_module_attrs = {jax.uses_shape_polymorphism = true}} : (tensor<1x1x3xf32>, tensor<3x4xf32>) -> tensor<1x1x4xf32>
return %0 : tensor<1x1x4xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kXlaCallModuleOpWithNoQuantizationMethodAttr);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto xla_call_module_ops = main_fn.getOps<TF::XlaCallModuleOp>();
ASSERT_FALSE(xla_call_module_ops.empty());
const absl::StatusOr<Method> method =
GetQuantizationMethod(*xla_call_module_ops.begin());
EXPECT_THAT(method,
StatusIs(absl::StatusCode::kInternal,
HasSubstr("Failed to parse Method from textproto")));
}
constexpr absl::string_view kFunctionWithRegion =
R"mlir(
func.func @main(%arg0: tensor<i1>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<f32> {
%if = "stablehlo.if"(%arg0) ({
%0 = stablehlo.add %arg1, %arg1 : tensor<f32>
stablehlo.return %0 : tensor<f32>
}, {
%1 = stablehlo.add %arg2, %arg2 : tensor<f32>
stablehlo.return %1 : tensor<f32>
}) : (tensor<i1>) -> (tensor<f32>)
%subtract = stablehlo.subtract %if, %if : tensor<f32>
return %subtract : tensor<f32>
}
)mlir";
TEST_F(LiftAsFunctionCallTest, IsInRegionSucceedsWhenOpInsideRegion) {
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kFunctionWithRegion);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto if_op = FindOperationOfType<mlir::stablehlo::IfOp>(main_fn);
Block& block = if_op->getRegion(0).front();
Operation& add_op = *absl::c_find_if(block, [](Operation& entry) {
return dyn_cast_or_null<::mlir::stablehlo::AddOp>(&entry);
});
EXPECT_TRUE(IsInStableHloOpRegion(&add_op));
}
TEST_F(LiftAsFunctionCallTest, IsInRegionFailsWhenOpNotInsideRegion) {
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kFunctionWithRegion);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto subtract_op = FindOperationOfType<mlir::stablehlo::SubtractOp>(main_fn);
EXPECT_FALSE(IsInStableHloOpRegion(subtract_op));
}
TEST_F(LiftAsFunctionCallTest,
GetQuantizationMethodOrDefaultReturnsCorrectMethod) {
constexpr absl::string_view kXlaCallModuleOpWithQuantizationMethodAttr =
R"mlir(
func.func @main(%arg0: tensor<1x1x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x1x4xf32> {
%0 = "tf.XlaCallModule"(%arg0, %arg1) <{Sout = [#tf_type.shape<1x1x4>], dim_args_spec = [], disabled_checks = [], function_list = [], has_token_input_output = false, module = "", platforms = ["CPU"], version = 9 : i64}>
{
_entry_function = @composite_dot_general_fn_1,
_quantization_method = "no_quantization { }",
_stablehlo_module_attrs = {jax.uses_shape_polymorphism = true}
} : (tensor<1x1x3xf32>, tensor<3x4xf32>) -> tensor<1x1x4xf32>
return %0 : tensor<1x1x4xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kXlaCallModuleOpWithQuantizationMethodAttr);
ASSERT_TRUE(module_op);
FailureOr<TF::XlaCallModuleOp> xla_call_module_op =
FindFirstOpFromMainFunc<TF::XlaCallModuleOp>(*module_op);
ASSERT_TRUE(succeeded(xla_call_module_op));
const Method method = GetQuantizationMethodOrDefault(*xla_call_module_op);
EXPECT_TRUE(method.has_no_quantization());
}
TEST_F(
LiftAsFunctionCallTest,
GetQuantizationMethodOrDefaultReturnsDefaultWhenNoQuantizationMethodAttr) {
constexpr absl::string_view kXlaCallModuleOpWithoutQuantizationMethodAttr =
R"mlir(
func.func @main(%arg0: tensor<1x1x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x1x4xf32> {
%0 = "tf.XlaCallModule"(%arg0, %arg1) <{Sout = [#tf_type.shape<1x1x4>], dim_args_spec = [], disabled_checks = [], function_list = [], has_token_input_output = false, module = "", platforms = ["CPU"], version = 9 : i64}>
{
_entry_function = @composite_dot_general_fn_1,
_stablehlo_module_attrs = {jax.uses_shape_polymorphism = true}
} : (tensor<1x1x3xf32>, tensor<3x4xf32>) -> tensor<1x1x4xf32>
return %0 : tensor<1x1x4xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kXlaCallModuleOpWithoutQuantizationMethodAttr);
ASSERT_TRUE(module_op);
FailureOr<TF::XlaCallModuleOp> xla_call_module_op =
FindFirstOpFromMainFunc<TF::XlaCallModuleOp>(*module_op);
ASSERT_TRUE(succeeded(xla_call_module_op));
const Method method = GetQuantizationMethodOrDefault(*xla_call_module_op);
EXPECT_TRUE(MessageDifferencer::Equals(method, Method::default_instance()));
}
constexpr absl::string_view kModuleDotWeightOnlyPtq = R"mlir(
module {
func.func @main(%arg0: tensor<?x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<?x2xf32>) {
%0 = stablehlo.constant dense<[-0.211145893, -0.708605706]> : tensor<2xf32>
%1 = stablehlo.constant dense<[[-0.630731344, 0.54962182], [0.180364341, -0.764542698]]> : tensor<2x2xf32>
%2 = "tf.XlaCallModule"(%arg0, %1, %0) <{Sout = [#tf_type.shape<?x2>], module = "", version = 9 : i64}> {_entry_function = @composite_dot_general_fn_1, _original_entry_function = "composite_dot_general_fn_1", _tfl_quant_trait = "fully_quantizable", _quantization_method = "weight_only_ptq { }"} : (tensor<?x2xf32>, tensor<2x2xf32>, tensor<2xf32>) -> tensor<?x2xf32>
return %2 : tensor<?x2xf32>
}
func.func private @composite_dot_general_fn_1(%arg0: tensor<?x2xf32>, %arg1: tensor<2x2xf32>, %arg2: tensor<2xf32>) -> tensor<?x2xf32> attributes {_from_xla_call_module, tf_quant.composite_function} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<?x2xf32>, tensor<2x2xf32>) -> tensor<?x2xf32>
return %0 : tensor<?x2xf32>
}
}
)mlir";
TEST_F(LiftAsFunctionCallTest, HasWeightOnlyPtqMethodExists) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleDotWeightOnlyPtq);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto call_op = *main_fn.getOps<TF::XlaCallModuleOp>().begin();
EXPECT_TRUE(HasWeightOnlyPtqMethod(call_op));
}
TEST_F(LiftAsFunctionCallTest, HasWeightOnlyPtqMethodDifferentMethod) {
const absl::string_view kModuleDotNoQuantization = R"mlir(
module {
func.func @main(%arg0: tensor<?x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<?x2xf32>) {
%0 = stablehlo.constant dense<[-0.211145893, -0.708605706]> : tensor<2xf32>
%1 = stablehlo.constant dense<[[-0.630731344, 0.54962182], [0.180364341, -0.764542698]]> : tensor<2x2xf32>
%2 = "tf.XlaCallModule"(%arg0, %1, %0) <{Sout = [#tf_type.shape<?x2>], module = "", version = 9 : i64}> {_entry_function = @composite_dot_general_fn_1, _original_entry_function = "composite_dot_general_fn_1", _tfl_quant_trait = "fully_quantizable", _quantization_method = "no_quantization { }"} : (tensor<?x2xf32>, tensor<2x2xf32>, tensor<2xf32>) -> tensor<?x2xf32>
return %2 : tensor<?x2xf32>
}
func.func private @composite_dot_general_fn_1(%arg0: tensor<?x2xf32>, %arg1: tensor<2x2xf32>, %arg2: tensor<2xf32>) -> tensor<?x2xf32> attributes {_from_xla_call_module, tf_quant.composite_function} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<?x2xf32>, tensor<2x2xf32>) -> tensor<?x2xf32>
return %0 : tensor<?x2xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleDotNoQuantization);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto call_op = *main_fn.getOps<TF::XlaCallModuleOp>().begin();
EXPECT_FALSE(HasWeightOnlyPtqMethod(call_op));
}
TEST_F(LiftAsFunctionCallTest, HasWeightOnlyPtqMethodNoMethod) {
const absl::string_view kModuleXlaCallModule = R"mlir(
module {
func.func @main(%arg0: tensor<?x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<?x2xf32>) {
%0 = stablehlo.constant dense<[-0.211145893, -0.708605706]> : tensor<2xf32>
%1 = stablehlo.constant dense<[[-0.630731344, 0.54962182], [0.180364341, -0.764542698]]> : tensor<2x2xf32>
%2 = "tf.XlaCallModule"(%arg0, %1, %0) <{Sout = [#tf_type.shape<?x2>], module = "", version = 9 : i64}> {_entry_function = @composite_fn_1, _original_entry_function = "composite_fn_1", _tfl_quant_trait = "fully_quantizable"} : (tensor<?x2xf32>, tensor<2x2xf32>, tensor<2xf32>) -> tensor<?x2xf32>
return %2 : tensor<?x2xf32>
}
func.func private @composite_fn_1(%arg0: tensor<?x2xf32>, %arg1: tensor<2x2xf32>, %arg2: tensor<2xf32>) -> tensor<?x2xf32> attributes {_from_xla_call_module, tf_quant.composite_function} {
return %arg0 : tensor<?x2xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleXlaCallModule);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto call_op = *main_fn.getOps<TF::XlaCallModuleOp>().begin();
EXPECT_FALSE(HasWeightOnlyPtqMethod(call_op));
}
TEST_F(LiftAsFunctionCallTest, IsWeightOnlyQuantizableOpDot) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleDotWeightOnlyPtq);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto call_op = *main_fn.getOps<TF::XlaCallModuleOp>().begin();
EXPECT_TRUE(IsWeightOnlyQuantizableOp(*call_op));
}
TEST_F(LiftAsFunctionCallTest, IsWeightOnlyQuantizableOpNotTfXlaCallModuleOp) {
const absl::string_view kModulePartitionedCallDot = R"mlir(
module {
func.func @main(%arg0: tensor<?x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<?x2xf32>) {
%0 = stablehlo.constant dense<[-0.211145893, -0.708605706]> : tensor<2xf32>
%1 = stablehlo.constant dense<[[-0.630731344, 0.54962182], [0.180364341, -0.764542698]]> : tensor<2x2xf32>
%2 = "tf.PartitionedCall"(%arg0, %1, %0) {_tfl_quant_trait = "fully_quantizable", config = "", config_proto = "", executor_type = "", f = @composite_dot_general_fn_1, _quantization_method = "weight_only_ptq { }"} : (tensor<?x2xf32>, tensor<2x2xf32>, tensor<2xf32>) -> tensor<?x2xf32>
return %2 : tensor<?x2xf32>
}
func.func private @composite_dot_general_fn_1(%arg0: tensor<?x2xf32>, %arg1: tensor<2x2xf32>, %arg2: tensor<2xf32>) -> tensor<?x2xf32> attributes {_from_xla_call_module, tf_quant.composite_function} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<?x2xf32>, tensor<2x2xf32>) -> tensor<?x2xf32>
return %0 : tensor<?x2xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModulePartitionedCallDot);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto call_op = *main_fn.getOps<TF::PartitionedCallOp>().begin();
EXPECT_FALSE(IsWeightOnlyQuantizableOp(*call_op));
}
TEST_F(LiftAsFunctionCallTest, IsWeightOnlyQuantizableOpNoConvNoDot) {
constexpr absl::string_view kModuleXlaCallModule = R"mlir(
module {
func.func @main(%arg0: tensor<?x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<?x2xf32>) {
%0 = stablehlo.constant dense<[-0.211145893, -0.708605706]> : tensor<2xf32>
%1 = stablehlo.constant dense<[[-0.630731344, 0.54962182], [0.180364341, -0.764542698]]> : tensor<2x2xf32>
%2 = "tf.XlaCallModule"(%arg0, %1, %0) <{Sout = [#tf_type.shape<?x2>], module = "", version = 9 : i64}> {_entry_function = @composite_fn_1, _original_entry_function = "composite_fn_1", _tfl_quant_trait = "fully_quantizable", _quantization_method = "weight_only_ptq { }"} : (tensor<?x2xf32>, tensor<2x2xf32>, tensor<2xf32>) -> tensor<?x2xf32>
return %2 : tensor<?x2xf32>
}
func.func private @composite_fn_1(%arg0: tensor<?x2xf32>, %arg1: tensor<2x2xf32>, %arg2: tensor<2xf32>) -> tensor<?x2xf32> attributes {_from_xla_call_module, tf_quant.composite_function} {
return %arg0 : tensor<?x2xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleXlaCallModule);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto call_op = *main_fn.getOps<TF::XlaCallModuleOp>().begin();
EXPECT_FALSE(IsWeightOnlyQuantizableOp(*call_op));
}
TEST_F(LiftAsFunctionCallTest, GetSortedFunctions) {
constexpr absl::string_view kModuleXlaCallModule = R"mlir(
module {
func.func @conv_3_fn(%arg0: tensor<1x3x3x4xf32>) -> tensor<1x3x3x4xf32> {
%0 = stablehlo.constant dense<2.000000e+00> : tensor<3x3x4x4xf32>
%1 = stablehlo.convolution(%arg0, %0) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {pad = [[1, 1], [1, 1]]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x3x3x4xf32>, tensor<3x3x4x4xf32>) -> tensor<1x3x3x4xf32>
%2 = stablehlo.convolution(%1, %0) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {pad = [[1, 1], [1, 1]]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x3x3x4xf32>, tensor<3x3x4x4xf32>) -> tensor<1x3x3x4xf32>
func.return %2: tensor<1x3x3x4xf32>
}
func.func @conv_1_fn(%arg0: tensor<1x3x3x4xf32>) -> tensor<1x3x3x4xf32> {
%0 = stablehlo.constant dense<2.000000e+00> : tensor<3x3x4x4xf32>
%1 = stablehlo.convolution(%arg0, %0) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {pad = [[1, 1], [1, 1]]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x3x3x4xf32>, tensor<3x3x4x4xf32>) -> tensor<1x3x3x4xf32>
%2 = stablehlo.convolution(%1, %0) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {pad = [[1, 1], [1, 1]]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x3x3x4xf32>, tensor<3x3x4x4xf32>) -> tensor<1x3x3x4xf32>
func.return %2: tensor<1x3x3x4xf32>
}
func.func @conv_2_fn(%arg0: tensor<1x3x3x4xf32>) -> tensor<1x3x3x4xf32> {
%0 = stablehlo.constant dense<2.000000e+00> : tensor<3x3x4x4xf32>
%1 = stablehlo.convolution(%arg0, %0) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {pad = [[1, 1], [1, 1]]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x3x3x4xf32>, tensor<3x3x4x4xf32>) -> tensor<1x3x3x4xf32>
%2 = stablehlo.convolution(%1, %0) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {pad = [[1, 1], [1, 1]]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x3x3x4xf32>, tensor<3x3x4x4xf32>) -> tensor<1x3x3x4xf32>
func.return %2: tensor<1x3x3x4xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleXlaCallModule);
ASSERT_TRUE(module_op);
SmallVector<func::FuncOp> funcs = GetSortedFunctions(*module_op);
ASSERT_THAT(funcs, SizeIs(3));
EXPECT_THAT(funcs[0].getSymName(), StrEq("conv_1_fn"));
EXPECT_THAT(funcs[1].getSymName(), StrEq("conv_2_fn"));
EXPECT_THAT(funcs[2].getSymName(), StrEq("conv_3_fn"));
}
}
} |
1,202 | cpp | tensorflow/tensorflow | quantization_driver | tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc | tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_COMMON_QUANTIZATION_LIB_QUANTIZATION_DRIVER_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_COMMON_QUANTIZATION_LIB_QUANTIZATION_DRIVER_H_
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Quant/QuantTypes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
namespace mlir {
namespace quant {
struct QuantState {
QuantizedType params;
const bool immutable;
bool IsEmpty() const { return params == nullptr; }
};
struct RequantizeState {
enum RequantizePosition {
NO_REQUANTIZE,
ON_INPUT,
ON_OUTPUT
} pos = NO_REQUANTIZE;
QuantizedType params;
SmallVector<std::pair<Operation*, int>> users;
};
using RequantizeStates = SmallVector<RequantizeState>;
class QuantizationDriver {
public:
using QuantStateIndex = int;
using OpWithOperandIndex = std::pair<Operation*, int>;
using OpWithResultIndex = std::pair<Operation*, int>;
explicit QuantizationDriver(func::FuncOp func_op, const bool is_signed,
const int bit_width,
const bool disable_per_channel,
OpQuantSpecGetter op_quant_spec_getter,
OpQuantScaleSpecGetter op_quant_scale_spec_getter,
const bool infer_tensor_range,
const bool legacy_float_scale = false,
const bool is_qdq_conversion = false)
: fn_(func_op),
builder_(func_op.getBody()),
is_signed_(is_signed),
bit_width_(bit_width),
disable_per_channel_(disable_per_channel),
op_quant_spec_getter_(op_quant_spec_getter),
op_quant_scale_spec_getter_(op_quant_scale_spec_getter),
infer_tensor_range_(infer_tensor_range),
legacy_float_scale_(legacy_float_scale),
is_qdq_conversion_(is_qdq_conversion) {}
void Run();
void Initialize();
bool PropagateParamsAndReturnIfChanged();
void Finalize();
SmallVector<BlockArgument, 4> GetArgs() { return args_; }
llvm::DenseMap<std::pair<mlir::Operation*, int>, int> GetResultStates() {
return result_states_;
}
DenseMap<OpWithResultIndex, QuantStateIndex> result_states_;
QuantState& GetArgQuantState(BlockArgument arg) {
return states_[arg_states_[arg]];
}
QuantState& GetResultQuantState(Operation* op, const int index) {
return states_[result_states_[{op, index}]];
}
private:
arith::ConstantOp DuplicateConstantOpIfNeeded(arith::ConstantOp op,
Operation* target_op,
int operand_index);
bool SetBiasParamsWithAdjustments(Operation* op, int bias_index,
ArrayRef<int> input_indices,
QuantizedType params);
bool ShouldCheckBiasScale(Operation* op, int bias_index,
ArrayRef<int> input_indices,
QuantizedType quantized_type, int& input_index,
int& filter_index);
void PreprocessConstantOps();
void SetupAllStates();
bool IsWeight(Operation* cst) { return llvm::is_contained(weights_, cst); }
std::unique_ptr<OpQuantSpec> GetQuantSpec(Operation* op);
std::unique_ptr<OpQuantScaleSpec> GetQuantScaleSpec(Operation* op);
bool IsQuantized(Operation* op);
void AddUserToList(Operation* op, const int index) {
for (Operation* user : op->getResult(index).getUsers()) {
work_list_.push_back(user);
}
}
void AddOperandToList(Operation* op, const int index) {
if (Operation* operand_op = op->getOperand(index).getDefiningOp();
operand_op != nullptr) {
work_list_.push_back(operand_op);
}
}
QuantizedType GetBiasParams(Operation* op, int bias_index,
ArrayRef<int> non_bias_operand_indices,
AccumulatorScaleFunc func);
bool SetResultParams(Operation* op, int result_index,
QuantizedType quantized_type);
bool SetOperandParams(Operation* op, int operand_index,
QuantizedType quantized_type, bool override = false);
bool SetConstantResultParams(Operation* op);
void QuantizeOpResult(Operation* op, int result_index,
QuantizedType quantized_type);
void QuantizeArg(BlockArgument arg, QuantizedType quantized_type);
void QuantizeValue(Value value, QuantizedType quantized_type, Location loc);
void RequantizeOpResult(Operation* op, int result_index,
RequantizeStates& states);
void RequantizeArg(BlockArgument arg, RequantizeStates& states);
void RequantizeValue(Value value, RequantizeStates& states, Location loc);
QuantizedType GetQuantParamsForSameScaleConstraint(Operation* op);
QuantState& GetOperandQuantState(Operation* op, const int index) {
return states_[operand_states_[{op, index}]];
}
RequantizeStates& GetOperandRequantizeStates(Operation* op, const int index) {
return rescale_states_[operand_states_[{op, index}]];
}
RequantizeStates& GetResultRequantizeStates(Operation* op, const int index) {
return rescale_states_[result_states_[{op, index}]];
}
RequantizeStates& GetArgRequantizeStates(BlockArgument arg) {
return rescale_states_[arg_states_[arg]];
}
void InitializeArgState(BlockArgument arg, Value arg_value);
void InitializeOperandState(Operation* op, int index, Value value);
void InitializeResultState(Operation* op, int index, Value value);
func::FuncOp fn_;
OpBuilder builder_;
const bool is_signed_;
const int bit_width_;
const bool disable_per_channel_;
DenseSet<Operation*> weights_;
DenseMap<Operation*, int> optimized_weights_;
std::vector<Operation*> work_list_;
absl::flat_hash_set<Operation*> quantized_;
std::vector<QuantState> states_;
absl::flat_hash_map<QuantStateIndex, RequantizeStates> rescale_states_;
DenseMap<OpWithOperandIndex, QuantStateIndex> operand_states_;
DenseMap<BlockArgument, QuantStateIndex> arg_states_;
DenseMap<Value, QuantStateIndex> value_to_state_;
SmallVector<BlockArgument, 4> args_;
OpQuantSpecGetter op_quant_spec_getter_;
OpQuantScaleSpecGetter op_quant_scale_spec_getter_;
const bool infer_tensor_range_;
const bool legacy_float_scale_;
const bool is_qdq_conversion_;
};
void ApplyQuantizationParamsPropagation(func::FuncOp func, bool is_signed,
int bit_width, bool disable_per_channel,
OpQuantSpecGetter op_quant_spec_getter,
bool infer_tensor_ranges,
bool legacy_float_scale,
bool is_qdq_conversion);
void ApplyQuantizationParamsPropagation(
func::FuncOp func, bool is_signed, int bit_width, bool disable_per_channel,
OpQuantSpecGetter op_quant_spec_getter,
OpQuantScaleSpecGetter op_quant_scale_spec_getter, bool infer_tensor_ranges,
bool legacy_float_scale, bool is_qdq_conversion);
}
}
#endif
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.h"
#include <cmath>
#include <cstdint>
#include <iterator>
#include <limits>
#include <memory>
#include <utility>
#include <vector>
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Quant/QuantTypes.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Matchers.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_traits.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
namespace mlir {
namespace quant {
namespace {
constexpr int32_t kBiasMax = std::numeric_limits<int32_t>::max() / 2;
void InitializeStateForValue(
Operation* op, const int index, const Value value, const bool as_result,
std::vector<QuantState>& states,
DenseMap<Value, QuantizationDriver::QuantStateIndex>& value_to_state,
DenseMap<QuantizationDriver::OpWithOperandIndex,
QuantizationDriver::QuantStateIndex>& operand_states,
DenseMap<QuantizationDriver::OpWithResultIndex,
QuantizationDriver::QuantStateIndex>& result_states) {
const auto [cached, inserted] = value_to_state.try_emplace(value, 0);
if (!inserted) {
if (as_result) {
result_states[{op, index}] = cached->second;
} else {
operand_states[{op, index}] = cached->second;
}
return;
}
const QuantizedType quantized_type =
QuantizedType::getQuantizedElementType(value.getType());
const bool immutable = quantized_type != nullptr;
const QuantizationDriver::QuantStateIndex next_state_index = states.size();
states.push_back({quantized_type, immutable});
if (as_result) {
result_states[{op, index}] = next_state_index;
} else {
operand_states[{op, index}] = next_state_index;
}
cached->second = next_state_index;
}
bool HasPerAxisQuantizedOperand(Operation* op) {
for (int i = 0; i < op->getNumOperands(); ++i) {
if (auto dq_op = dyn_cast_or_null<quantfork::DequantizeCastOp>(
op->getOperand(i).getDefiningOp())) {
auto type =
mlir::cast<TensorType>(dq_op.getArg().getType()).getElementType();
if (auto per_axis_qtype =
mlir::dyn_cast_or_null<quant::UniformQuantizedPerAxisType>(
QuantizedType::getQuantizedElementType(type))) {
return true;
}
}
}
return false;
}
}
void QuantizationDriver::InitializeArgState(const BlockArgument arg,
const Value arg_value) {
const auto [cached, inserted] = value_to_state_.try_emplace(arg_value, 0);
if (!inserted) {
arg_states_[arg] = cached->second;
return;
}
const QuantizedType quantized_type =
QuantizedType::getQuantizedElementType(arg_value.getType());
const bool immutable = quantized_type != nullptr;
const QuantizationDriver::QuantStateIndex next_state_index = states_.size();
states_.push_back({quantized_type, immutable});
arg_states_[arg] = next_state_index;
cached->second = next_state_index;
}
void QuantizationDriver::InitializeOperandState(Operation* op, const int index,
const Value value) {
InitializeStateForValue(op, index, value, false, states_,
value_to_state_, operand_states_, result_states_);
}
void QuantizationDriver::InitializeResultState(Operation* op, const int index,
const Value value) {
InitializeStateForValue(op, index, value, true, states_,
value_to_state_, operand_states_, result_states_);
}
std::unique_ptr<OpQuantSpec> QuantizationDriver::GetQuantSpec(Operation* op) {
return op_quant_spec_getter_(op);
}
std::unique_ptr<OpQuantScaleSpec> QuantizationDriver::GetQuantScaleSpec(
Operation* op) {
return op_quant_scale_spec_getter_(op);
}
bool QuantizationDriver::IsQuantized(Operation* op) {
for (int i = 0; i < op->getNumResults(); ++i) {
if (GetResultQuantState(op, i).IsEmpty()) return false;
}
return true;
}
bool QuantizationDriver::SetConstantResultParams(Operation* op) {
DenseFPElementsAttr attr;
const Value result = op->getResult(0);
if (!matchPattern(result, m_Constant(&attr))) {
return false;
}
Type final_type;
const auto it = optimized_weights_.find(op);
const bool is_weight = it != optimized_weights_.end();
const bool is_weight_with_per_channel_support =
is_weight && it->second != -1 && is_signed_;
if (is_weight_with_per_channel_support && !disable_per_channel_) {
final_type = GetUniformQuantizedPerAxisTypeForWeight(
attr, it->second, true, 8, is_signed_,
true, legacy_float_scale_);
} else {
final_type = GetUniformQuantizedTypeForWeight(
attr, is_weight && is_signed_,
8, is_signed_,
is_weight, legacy_float_scale_);
}
if (const auto quant_type = mlir::dyn_cast_or_null<QuantizedType>(final_type);
quant_type != nullptr) {
return SetResultParams(op, 0, quant_type);
}
return false;
}
bool QuantizationDriver::SetResultParams(Operation* op, const int result_index,
const QuantizedType quantized_type) {
QuantState& state = GetResultQuantState(op, result_index);
if (state.params == quantized_type) {
return false;
}
if (!state.IsEmpty()) {
RequantizeStates& rescales = GetResultRequantizeStates(op, result_index);
RequantizeState& rescale = rescales.emplace_back();
rescale.pos = RequantizeState::ON_INPUT;
rescale.params = quantized_type;
return true;
}
state.params = quantized_type;
AddUserToList(op, result_index);
return true;
}
QuantizedType QuantizationDriver::GetBiasParams(
Operation* op, const int bias_index,
const ArrayRef<int> non_bias_operand_indices,
const AccumulatorScaleFunc func) {
QuantState& bias_state = GetOperandQuantState(op, bias_index);
if (!bias_state.IsEmpty()) {
return bias_state.params;
}
std::vector<QuantizedType> op_types{};
op_types.reserve(non_bias_operand_indices.size());
int adjusted_quant_dim = -1;
if (op->getNumOperands() > bias_index) {
Operation* bias_op = op->getOperand(bias_index).getDefiningOp();
if (bias_op != nullptr) {
Type bias_type = bias_op->getResult(0).getType();
if (bias_type != builder_.getNoneType()) {
const int bias_rank = mlir::dyn_cast<ShapedType>(bias_type).getRank();
adjusted_quant_dim = bias_rank > 1 ? bias_rank - 1 : 0;
}
}
}
for (const int non_bias_operand_index : non_bias_operand_indices) {
const QuantState& non_bias_state =
GetOperandQuantState(op, non_bias_operand_index);
op_types.push_back(non_bias_state.params);
}
return func(op_types, adjusted_quant_dim, legacy_float_scale_);
}
bool QuantizationDriver::SetOperandParams(Operation* op,
const int operand_index,
const QuantizedType quantized_type,
const bool override) {
QuantState& state = GetOperandQuantState(op, operand_index);
if (state.params == quantized_type) {
return false;
}
if (!state.IsEmpty() && !override) {
RequantizeStates& rescales = GetOperandRequantizeStates(op, operand_index);
for (RequantizeState& rescale : rescales) {
if (rescale.params == quantized_type) {
rescale.users.emplace_back(op, operand_index);
return true;
}
}
RequantizeState& rescale = rescales.emplace_back();
rescale.pos = RequantizeState::ON_OUTPUT;
rescale.params = quantized_type;
rescale.users.emplace_back(op, operand_index);
return true;
}
state.params = quantized_type;
AddOperandToList(op, operand_index);
return true;
}
void QuantizationDriver::QuantizeOpResult(Operation* op, const int result_index,
const QuantizedType quantized_type) {
builder_.setInsertionPointAfter(op);
const Value original_result = op->getResult(result_index);
QuantizeValue(original_result, quantized_type, op->getLoc());
}
void QuantizationDriver::QuantizeArg(BlockArgument arg,
const QuantizedType quantized_type) {
builder_.setInsertionPointToStart(arg.getOwner());
QuantizeValue(arg, quantized_type, builder_.getUnknownLoc());
}
void QuantizationDriver::QuantizeValue(Value value,
QuantizedType quantized_type,
const Location loc) {
const Type expressed_type = value.getType();
const Type new_value_type =
quantized_type.castFromExpressedType(expressed_type);
if (new_value_type == nullptr) return;
auto quantize =
builder_.create<quantfork::QuantizeCastOp>(loc, new_value_type, value);
auto dequantize = builder_.create<quantfork::DequantizeCastOp>(
loc, expressed_type, quantize.getResult());
quantize->setAttr(kVolatileOpAttrName, builder_.getUnitAttr());
value.replaceAllUsesWith(dequantize);
quantize.getOperation()->replaceUsesOfWith(dequantize, value);
}
void QuantizationDriver::RequantizeOpResult(Operation* op,
const int result_index,
RequantizeStates& states) {
if (states.empty()) return;
builder_.setInsertionPointAfter(op);
Value value = op->getResult(result_index);
RequantizeState::RequantizePosition pos = states.front().pos;
if (pos == RequantizeState::NO_REQUANTIZE) {
return;
}
for (const RequantizeState& state : states) {
if (state.pos != pos) {
return;
}
}
if (pos == RequantizeState::ON_OUTPUT) {
Operation* user = value.getUses().begin().getUser();
if (isa<quantfork::QuantizeCastOp>(user)) {
value = user->getResult(0);
builder_.setInsertionPointAfter(user);
}
}
RequantizeValue(value, states, op->getLoc());
}
void QuantizationDriver::RequantizeArg(const BlockArgument arg,
RequantizeStates& states) {
Value value = arg;
builder_.setInsertionPointToStart(arg.getOwner());
if (value.hasOneUse()) {
Operation* user = value.use_begin().getUser();
if (auto q = dyn_cast<quantfork::QuantizeCastOp>(user)) {
value = q.getResult();
builder_.setInsertionPoint(arg.getOwner(), ++Block::iterator(user));
}
}
RequantizeValue(value, states, builder_.getUnknownLoc());
}
void QuantizationDriver::RequantizeValue(Value value, RequantizeStates& states,
const Location loc) {
if (states.empty() || states.front().pos == RequantizeState::NO_REQUANTIZE) {
return;
}
if (states.front().pos == RequantizeState::ON_INPUT) {
RequantizeState& state = states.front();
const Type expressed_type = value.getType();
const Type new_type = state.params.castFromExpressedType(expressed_type);
if (!new_type) return;
auto requantize_op =
builder_.create<quantfork::QuantizeCastOp>(loc, new_type, value);
value.replaceAllUsesWith(requantize_op);
requantize_op.getOperation()->replaceUsesOfWith(requantize_op, value);
return;
}
if (!value.hasOneUse()) {
return;
}
auto dequant_op = dyn_cast_or_null<quantfork::DequantizeCastOp>(
value.use_begin().getUser());
if (!dequant_op) {
return;
}
const int num_uses = std::distance(dequant_op.getResult().use_begin(),
dequant_op.getResult().use_end()); | #include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "llvm/ADT/DenseMap.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Quant/QuantTypes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.h"
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include "tensorflow/compiler/mlir/quantization/common/func.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace mlir::quant {
namespace {
using ApplyQuantizationParamsPropagationTest = QuantizationTestBase;
using ::testing::IsEmpty;
using ::testing::Not;
constexpr absl::string_view kModuleTFLite = R"mlir(
module {
func.func @main(%arg0: tensor<1x4x4x3xf32>) -> tensor<1x4x4x3xf32> attributes {_from_xla_call_module} {
%cst_0 = arith.constant dense<1.0> : tensor<3x1x1x3xf32>
%cst_1 = arith.constant dense<2.0> : tensor<3xf32>
%0 = "tf.XlaCallModule"(%arg0, %cst_0, %cst_1) <{Sout = [#tf_type.shape<1x4x4x3>], module = "", version = 9 : i64}> {_entry_function = @composite_fn_1, _original_entry_function = "composite_fn_1", _tfl_quant_trait = "fully_quantizable"} : (tensor<1x4x4x3xf32>, tensor<3x1x1x3xf32>, tensor<3xf32>) -> tensor<1x4x4x3xf32>
%1 = "tf.XlaCallModule"(%0, %cst_0, %cst_1) <{Sout = [#tf_type.shape<1x4x4x3>], module = "", version = 9 : i64}> {_entry_function = @composite_fn_2, _original_entry_function = "composite_fn_2", _tfl_quant_trait = "fully_quantizable"} : (tensor<1x4x4x3xf32>, tensor<3x1x1x3xf32>, tensor<3xf32>) -> tensor<1x4x4x3xf32>
return %1 : tensor<1x4x4x3xf32>
}
func.func private @composite_fn_1(%arg0: tensor<1x4x4x3xf32>, %arg1: tensor<3x1x1x3xf32>, %arg2: tensor<3xf32>) -> tensor<1x4x4x3xf32> attributes {tf_quant.composite_function} {
%0 = "tfl.conv_2d"(%arg0, %arg1, %arg2) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<1x4x4x3xf32>, tensor<3x1x1x3xf32>, tensor<3xf32>) -> tensor<1x4x4x3xf32>
return %0 : tensor<1x4x4x3xf32>
}
func.func private @composite_fn_2(%arg0: tensor<1x4x4x3xf32>, %arg1: tensor<3x1x1x3xf32>, %arg2: tensor<3xf32>) -> tensor<1x4x4x3xf32> attributes {tf_quant.composite_function} {
%0 = "tfl.conv_2d"(%arg0, %arg1, %arg2) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<1x4x4x3xf32>, tensor<3x1x1x3xf32>, tensor<3xf32>) -> tensor<1x4x4x3xf32>
return %0 : tensor<1x4x4x3xf32>
}
}
)mlir";
std::unique_ptr<quant::OpQuantSpec> GetOpQuantSpec(
const mlir::Operation* op,
bool disable_per_channel_for_dense_layers = false) {
auto spec = std::make_unique<quant::OpQuantSpec>();
spec->coeff_op_quant_dim[1] = 3;
spec->biases_params[2] = {{0, 1}, quant::GetUniformQuantizedTypeForBias};
for (const auto& [key, value] : spec->coeff_op_quant_dim) {
spec->quantizable_operands.insert(key);
}
return spec;
}
TEST_F(ApplyQuantizationParamsPropagationTest,
ConstsUsedMultipleTimesAreDuplicated) {
const OwningOpRef<ModuleOp> module_op_ref =
ParseModuleOpString(kModuleTFLite);
func::FuncOp main_fn = FindMainFuncOp(*module_op_ref);
auto op_quant_spec_getter = [&](Operation* op) {
return GetOpQuantSpec(op, false);
};
QuantizationDriver quantization_driver(
main_fn, true, 8,
false, op_quant_spec_getter,
GetDefaultQuantScaleSpec,
true, false,
false);
quantization_driver.Initialize();
int64_t num_constant_op = 0;
main_fn.walk([&](arith::ConstantOp cst) { ++num_constant_op; });
EXPECT_EQ(num_constant_op, 4);
}
TEST_F(ApplyQuantizationParamsPropagationTest,
PropagateParamsCreatesQuantState) {
const OwningOpRef<ModuleOp> module_op_ref =
ParseModuleOpString(kModuleTFLite);
func::FuncOp main_fn = FindMainFuncOp(*module_op_ref);
auto op_quant_spec_getter = [&](Operation* op) {
return GetOpQuantSpec(op, false);
};
QuantizationDriver quantization_driver(
main_fn, true, 8,
false, op_quant_spec_getter,
GetDefaultQuantScaleSpec,
true, false,
false);
quantization_driver.Initialize();
ASSERT_TRUE(quantization_driver.PropagateParamsAndReturnIfChanged());
EXPECT_THAT(quantization_driver.GetArgs(), Not(IsEmpty()));
for (const auto& arg : quantization_driver.GetArgs()) {
const QuantState& state = quantization_driver.GetArgQuantState(arg);
EXPECT_TRUE(isa<quant::QuantizedType>(state.params));
}
for (const auto& result : quantization_driver.GetResultStates()) {
Operation* op = result.first.first;
const int res_index = result.first.second;
const QuantState state =
quantization_driver.GetResultQuantState(op, res_index);
EXPECT_TRUE(isa<quant::QuantizedType>(state.params));
}
}
TEST_F(ApplyQuantizationParamsPropagationTest, FinalizeInsertsQDQOps) {
const OwningOpRef<ModuleOp> module_op_ref =
ParseModuleOpString(kModuleTFLite);
func::FuncOp main_fn = FindMainFuncOp(*module_op_ref);
auto op_quant_spec_getter = [&](Operation* op) {
return GetOpQuantSpec(op, false);
};
ApplyQuantizationParamsPropagation(
main_fn, true, 8,
false, op_quant_spec_getter,
true, false,
false);
Operation* xla_call_module_op =
FindOperationOfType<TF::XlaCallModuleOp>(main_fn);
Operation* filter_dcast_op =
xla_call_module_op->getOperand(1).getDefiningOp();
Operation* filter_qcast_op = filter_dcast_op->getOperand(0).getDefiningOp();
ASSERT_NE(filter_qcast_op, nullptr);
EXPECT_TRUE(isa<quantfork::QuantizeCastOp>(filter_qcast_op));
EXPECT_TRUE(isa<quantfork::DequantizeCastOp>(filter_dcast_op));
EXPECT_TRUE(isa<UniformQuantizedPerAxisType>(
mlir::cast<TensorType>(filter_qcast_op->getResult(0).getType())
.getElementType()));
}
}
} |
1,203 | cpp | tensorflow/tensorflow | math_utils | tensorflow/core/kernels/uniform_quant_ops/math_utils.cc | tensorflow/core/kernels/uniform_quant_ops/math_utils_test.cc | #ifndef TENSORFLOW_TSL_PROFILER_UTILS_MATH_UTILS_H_
#define TENSORFLOW_TSL_PROFILER_UTILS_MATH_UTILS_H_
#include <cstdint>
namespace tsl {
namespace profiler {
inline double PicoToNano(uint64_t p) { return p / 1E3; }
inline double PicoToMicro(uint64_t p) { return p / 1E6; }
inline double PicoToMilli(uint64_t p) { return p / 1E9; }
inline double PicoToUni(uint64_t p) { return p / 1E12; }
inline uint64_t NanoToPico(uint64_t n) { return n * 1000; }
inline double NanoToMicro(uint64_t n) { return n / 1E3; }
inline double NanoToMilli(uint64_t n) { return n / 1E6; }
inline double MicroToNano(double u) { return u * 1E3; }
inline double MicroToMilli(double u) { return u / 1E3; }
inline uint64_t MilliToPico(double m) { return m * 1E9; }
inline uint64_t MilliToNano(double m) { return m * 1E6; }
inline double MilliToUni(double m) { return m / 1E3; }
inline uint64_t UniToPico(double uni) { return uni * 1E12; }
inline uint64_t UniToNano(double uni) { return uni * 1E9; }
inline double UniToMicro(double uni) { return uni * 1E6; }
inline double UniToGiga(double uni) { return uni / 1E9; }
inline double GigaToUni(double giga) { return giga * 1E9; }
inline double GigaToTera(double giga) { return giga / 1E3; }
inline double TeraToGiga(double tera) { return tera * 1E3; }
inline double CyclesToSeconds(double cycles, double frequency_hz) {
return cycles / frequency_hz;
}
inline double SafeDivide(double dividend, double divisor) {
constexpr double kEpsilon = 1.0E-10;
if ((-kEpsilon < divisor) && (divisor < kEpsilon)) return 0.0;
return dividend / divisor;
}
inline double GibiToGiga(double gibi) { return gibi * ((1 << 30) / 1.0e9); }
inline double GigaToGibi(double giga) { return giga / ((1 << 30) / 1.0e9); }
inline double GibibytesPerSecond(double gigabytes, double ns) {
return GigaToGibi(SafeDivide(gigabytes, ns));
}
}
}
#endif
#include "tensorflow/core/kernels/uniform_quant_ops/math_utils.h"
#include <algorithm>
#include <cmath>
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
using errors::InvalidArgument;
Status QuantizeMultiplier(double double_multiplier,
int32_t& quantized_multiplier, int32_t& shift) {
if (!isfinite(double_multiplier) || double_multiplier <= 0) {
return InvalidArgument(
"double_multiplier must be a poisitive finite number. Given ",
double_multiplier);
}
const double q = std::frexp(double_multiplier, &shift);
auto q_fixed = static_cast<int64_t>(std::round(q * (1LL << 31)));
if (q_fixed == (1LL << 31)) {
q_fixed /= 2;
++shift;
}
if (shift < -31) {
shift = 0;
q_fixed = 0;
}
if (shift > 30) {
shift = 30;
q_fixed = (1LL << 31) - 1;
}
quantized_multiplier = static_cast<int32_t>(q_fixed);
return absl::OkStatus();
}
} | #include "tensorflow/core/kernels/uniform_quant_ops/math_utils.h"
#include <limits>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/lib/core/status_test_util.h"
namespace tensorflow {
TEST(MathUtilsTest, AffineQuantize) {
TensorShape shape({2, 2, 2});
Tensor tensor = test::AsTensor<float>(
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 70.0f, 80.0f}, shape);
Tensor quantized_tensor =
test::AsTensor<qint8>({0, 0, 0, 0, 0, 0, 0, 0}, shape);
Eigen::DSizes<Eigen::Index, 3> start_indices{1, 0, 0};
Eigen::DSizes<Eigen::Index, 3> sizes{1, 2, 2};
auto tensor_slice = tensor.tensor<float, 3>().slice(start_indices, sizes);
auto quantized_tensor_slice =
quantized_tensor.tensor<qint8, 3>().slice(start_indices, sizes);
AffineQuantize(tensor_slice, 0.5f, 3,
-128, 40,
quantized_tensor_slice);
Tensor expected_tensor =
test::AsTensor<qint8>({0, 0, 0, 0, 6, 6, 38, 40}, shape);
test::ExpectEqual(quantized_tensor, expected_tensor);
}
TEST(MathUtilsTest, AffineDequantize) {
TensorShape shape({2, 2, 2});
Tensor tensor = test::AsTensor<qint8>({10, 15, 20, 25, -10, -5, 0, 5}, shape);
Tensor dequantized_tensor =
test::AsTensor<float>({0, 0, 0, 0, 0, 0, 0, 0}, shape);
Eigen::DSizes<Eigen::Index, 3> start_indices{1, 0, 0};
Eigen::DSizes<Eigen::Index, 3> sizes{1, 2, 2};
auto tensor_slice = tensor.tensor<qint8, 3>().slice(start_indices, sizes);
auto dequantized_tensor_slice =
dequantized_tensor.tensor<float, 3>().slice(start_indices, sizes);
AffineDequantize(tensor_slice, 2.0f, 3,
dequantized_tensor_slice);
Tensor expected_tensor =
test::AsTensor<float>({0, 0, 0, 0, -26.0, -16.0, -6.0, 4.0}, shape);
test::ExpectTensorNear<float>(dequantized_tensor, expected_tensor, 1e-6);
}
TEST(MathUtilsTest, AsymmetricQuantize) {
float scale;
int32_t zero_point;
TensorShape shape({2, 2});
Tensor quantized_tensor = test::AsTensor<qint8>({0, 0, 0, 0}, shape);
TF_ASSERT_OK(AsymmetricQuantize(
test::AsTensor<float>({5.0f, 6.0f, 7.0f, 8.0f}, shape).tensor<float, 2>(),
-128,
127, scale, zero_point,
quantized_tensor.tensor<qint8, 2>()));
Tensor expected_tensor = test::AsTensor<qint8>({31, 63, 95, 127}, shape);
test::ExpectEqual(quantized_tensor, expected_tensor);
EXPECT_FLOAT_EQ(scale, 0.031372551f);
EXPECT_EQ(zero_point, -128);
}
TEST(MathUtilsTest, AsymmetricQuantizeZeroValuesTensor) {
float scale;
int32_t zero_point;
TensorShape shape({2, 2});
Tensor quantized_tensor = test::AsTensor<qint8>({0, 0, 0, 0}, shape);
TF_ASSERT_OK(AsymmetricQuantize(
test::AsTensor<float>({0.0f, 0.0f, 0.0f, 0.0f}, shape).tensor<float, 2>(),
-128,
127, scale, zero_point,
quantized_tensor.tensor<qint8, 2>()));
Tensor expected_tensor = test::AsTensor<qint8>({0, 0, 0, 0}, shape);
test::ExpectEqual(quantized_tensor, expected_tensor);
EXPECT_FLOAT_EQ(scale, 1.0f);
EXPECT_EQ(zero_point, 0);
}
TEST(MathUtilsTest, QuantizeMultiplierInvalidArgument) {
int32_t quantized_multiplier;
int shift;
EXPECT_TRUE(absl::IsInvalidArgument(
QuantizeMultiplier(0, quantized_multiplier, shift)));
EXPECT_TRUE(absl::IsInvalidArgument(
QuantizeMultiplier(-1, quantized_multiplier, shift)));
EXPECT_TRUE(absl::IsInvalidArgument(QuantizeMultiplier(
std::numeric_limits<double>::infinity(), quantized_multiplier, shift)));
EXPECT_TRUE(absl::IsInvalidArgument(QuantizeMultiplier(
std::numeric_limits<double>::quiet_NaN(), quantized_multiplier, shift)));
}
TEST(MathUtilsTest, QuantizeMultiplierComputesCorrectly) {
int32_t quantized_multiplier;
int shift;
TF_ASSERT_OK(QuantizeMultiplier(1.2, quantized_multiplier, shift));
EXPECT_EQ(quantized_multiplier, 1288490189);
EXPECT_EQ(shift, 1);
TF_ASSERT_OK(QuantizeMultiplier(15.5, quantized_multiplier, shift));
EXPECT_EQ(quantized_multiplier, 2080374784);
EXPECT_EQ(shift, 4);
}
TEST(MathUtilsTest,
QuantizeMultiplierAndAffineRequantizeWithQuantizedMultiplierAndShift) {
int32_t effective_quantized_multiplier;
int effective_shift;
TF_ASSERT_OK(
QuantizeMultiplier(1.5, effective_quantized_multiplier, effective_shift));
EXPECT_EQ((AffineRequantizeWithQuantizedMultiplierAndShift<int32_t, int32_t>(
-9, effective_quantized_multiplier, effective_shift,
2, 1,
-128, 127)),
-15);
EXPECT_EQ((AffineRequantizeWithQuantizedMultiplierAndShift<int32_t, int32_t>(
2, effective_quantized_multiplier, effective_shift,
2, 1,
-128, 127)),
1);
EXPECT_EQ((AffineRequantizeWithQuantizedMultiplierAndShift<int32_t, int32_t>(
31, effective_quantized_multiplier, effective_shift,
2, 1,
-128, 127)),
45);
EXPECT_EQ((AffineRequantizeWithQuantizedMultiplierAndShift<int32_t, int32_t>(
42, effective_quantized_multiplier, effective_shift,
2, 1,
-128, 127)),
61);
}
} |
1,204 | cpp | tensorflow/tensorflow | fill_quantization_options | tensorflow/compiler/mlir/quantization/stablehlo/utils/fill_quantization_options.cc | tensorflow/compiler/mlir/quantization/stablehlo/tests/fill_quantization_options_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_UTILS_FILL_QUANTIZATION_OPTIONS_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_UTILS_FILL_QUANTIZATION_OPTIONS_H_
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_options.pb.h"
namespace mlir::quant::stablehlo {
using ::stablehlo::quantization::QuantizationOptions;
QuantizationOptions FillPresetQuantizationOptions(
QuantizationOptions quantization_options);
LogicalResult GetActivationBitWidth(QuantizationOptions quantization_options,
int* bit_width);
}
#endif
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_options.pb.h"
namespace mlir::quant::stablehlo {
using ::stablehlo::quantization::CustomQuantizationMethod;
using ::stablehlo::quantization::PresetQuantizationMethod;
using ::stablehlo::quantization::QuantizationComponentSpec;
using ::stablehlo::quantization::QuantizationOptions;
using QuantizationComponent =
::stablehlo::quantization::QuantizationComponentSpec_QuantizationComponent;
using BitType = ::stablehlo::quantization::QuantizationComponentSpec_BitType;
using BitWidth = ::stablehlo::quantization::QuantizationComponentSpec_BitWidth;
void SetQuantizationComponentSpec(QuantizationComponentSpec* spec,
const QuantizationComponent& component,
const BitType bit_type,
const BitWidth bit_width) {
spec->set_quantization_component(component);
spec->set_bit_type(bit_type);
spec->set_bit_width(bit_width);
}
::stablehlo::quantization::QuantizationOptions FillPresetQuantizationOptions(
::stablehlo::quantization::QuantizationOptions quantization_options_) {
CustomQuantizationMethod custom_method =
quantization_options_.quantization_method().custom_quantization_method();
QuantizationComponentSpec *activation_component, *weight_component,
*bias_component;
const auto preset_method = quantization_options_.quantization_method()
.preset_quantization_method()
.preset_method();
if (!preset_method) return quantization_options_;
switch (preset_method) {
case PresetQuantizationMethod::FLOAT16:
weight_component = custom_method.add_quantization_component_spec();
SetQuantizationComponentSpec(weight_component,
QuantizationComponentSpec::COMPONENT_WEIGHT,
QuantizationComponentSpec::BIT_TYPE_FLOAT,
QuantizationComponentSpec::BIT_WIDTH_16);
bias_component = custom_method.add_quantization_component_spec();
SetQuantizationComponentSpec(bias_component,
QuantizationComponentSpec::COMPONENT_BIAS,
QuantizationComponentSpec::BIT_TYPE_FLOAT,
QuantizationComponentSpec::BIT_WIDTH_16);
break;
case PresetQuantizationMethod::WEIGHT_ONLY:
weight_component = custom_method.add_quantization_component_spec();
SetQuantizationComponentSpec(weight_component,
QuantizationComponentSpec::COMPONENT_WEIGHT,
QuantizationComponentSpec::BIT_TYPE_INT,
QuantizationComponentSpec::BIT_WIDTH_8);
break;
case PresetQuantizationMethod::POST_TRAINING_QUANTIZATION_STATIC_RANGE:
activation_component = custom_method.add_quantization_component_spec();
SetQuantizationComponentSpec(
activation_component, QuantizationComponentSpec::COMPONENT_ACTIVATION,
QuantizationComponentSpec::BIT_TYPE_INT,
QuantizationComponentSpec::BIT_WIDTH_8);
weight_component = custom_method.add_quantization_component_spec();
SetQuantizationComponentSpec(weight_component,
QuantizationComponentSpec::COMPONENT_WEIGHT,
QuantizationComponentSpec::BIT_TYPE_INT,
QuantizationComponentSpec::BIT_WIDTH_8);
bias_component = custom_method.add_quantization_component_spec();
SetQuantizationComponentSpec(bias_component,
QuantizationComponentSpec::COMPONENT_BIAS,
QuantizationComponentSpec::BIT_TYPE_INT,
QuantizationComponentSpec::BIT_WIDTH_32);
break;
default:
break;
}
*quantization_options_.mutable_quantization_method()
->mutable_custom_quantization_method() = custom_method;
return quantization_options_;
}
LogicalResult GetActivationBitWidth(QuantizationOptions quantization_options,
int* bit_width) {
CustomQuantizationMethod custom_method =
quantization_options.quantization_method().custom_quantization_method();
for (const auto& component : custom_method.quantization_component_spec()) {
if (component.quantization_component() ==
QuantizationComponentSpec::COMPONENT_ACTIVATION) {
switch (component.bit_width()) {
case QuantizationComponentSpec::BIT_WIDTH_4:
*bit_width = 4;
return success();
break;
case QuantizationComponentSpec::BIT_WIDTH_8:
*bit_width = 8;
return success();
break;
case QuantizationComponentSpec::BIT_WIDTH_16:
*bit_width = 16;
return success();
break;
case QuantizationComponentSpec::BIT_WIDTH_32:
*bit_width = 32;
return success();
break;
default:
break;
}
}
}
return failure();
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/utils/fill_quantization_options.h"
#include <ostream>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_options.pb.h"
#include "tsl/platform/protobuf.h"
namespace mlir::quant::stablehlo {
namespace {
using ::stablehlo::quantization::PresetQuantizationMethod;
using ::stablehlo::quantization::QuantizationComponentSpec;
using ::stablehlo::quantization::QuantizationOptions;
class ProtoStringMatcher {
public:
explicit ProtoStringMatcher(const tsl::protobuf::Message& expected)
: expected_(expected.SerializeAsString()) {}
template <typename Message>
bool MatchAndExplain(const Message& p, testing::MatchResultListener*) const {
return p.SerializeAsString() == expected_;
}
void DescribeTo(::std::ostream* os) const { *os << expected_; }
void DescribeNegationTo(::std::ostream* os) const {
*os << "not equal to expected message: " << expected_;
}
private:
const std::string expected_;
};
inline ::testing::PolymorphicMatcher<ProtoStringMatcher> EqualsProto(
const tsl::protobuf::Message& x) {
return ::testing::MakePolymorphicMatcher(ProtoStringMatcher(x));
}
void FillPresetQuantizationOptionsTestHelper(
const PresetQuantizationMethod::PresetMethod preset_quantization_options,
const QuantizationComponentSpec expected_activation_component,
const QuantizationComponentSpec expected_weight_component,
const QuantizationComponentSpec expected_bias_component) {
QuantizationOptions quantization_options;
quantization_options.mutable_quantization_method()
->mutable_preset_quantization_method()
->set_preset_method(preset_quantization_options);
QuantizationOptions filled_quantization_options =
quant::stablehlo::FillPresetQuantizationOptions(quantization_options);
for (QuantizationComponentSpec component :
filled_quantization_options.quantization_method()
.custom_quantization_method()
.quantization_component_spec()) {
switch (component.quantization_component()) {
case (QuantizationComponentSpec::COMPONENT_ACTIVATION):
EXPECT_THAT(component, EqualsProto(expected_activation_component));
break;
case (QuantizationComponentSpec::COMPONENT_WEIGHT):
EXPECT_THAT(component, EqualsProto(expected_weight_component));
break;
case (QuantizationComponentSpec::COMPONENT_BIAS):
EXPECT_THAT(component, EqualsProto(expected_bias_component));
break;
default:
break;
}
}
}
TEST(FillQuantizationOptionsTest, PresetFloat16) {
QuantizationComponentSpec activation_component, weight_component,
bias_component;
weight_component.set_quantization_component(
QuantizationComponentSpec::COMPONENT_WEIGHT);
weight_component.set_bit_width(QuantizationComponentSpec::BIT_WIDTH_16);
weight_component.set_bit_type(QuantizationComponentSpec::BIT_TYPE_FLOAT);
bias_component.set_quantization_component(
QuantizationComponentSpec::COMPONENT_BIAS);
bias_component.set_bit_width(QuantizationComponentSpec::BIT_WIDTH_16);
bias_component.set_bit_type(QuantizationComponentSpec::BIT_TYPE_FLOAT);
FillPresetQuantizationOptionsTestHelper(
PresetQuantizationMethod::FLOAT16,
activation_component,
weight_component,
bias_component);
}
}
} |
1,205 | cpp | tensorflow/tensorflow | bfloat16_type | tensorflow/compiler/mlir/quantization/stablehlo/utils/bfloat16_type.cc | tensorflow/compiler/mlir/quantization/stablehlo/utils/bfloat16_type_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_UTILS_BFLOAT16_TYPE_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_UTILS_BFLOAT16_TYPE_H_
#include "mlir/IR/Types.h"
namespace mlir::quant::stablehlo {
bool IsLargeFloatType(Type type);
Type ToBfloat16Type(Type type);
}
#endif
#include "tensorflow/compiler/mlir/quantization/stablehlo/utils/bfloat16_type.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/LLVM.h"
namespace mlir::quant::stablehlo {
bool IsLargeFloatType(Type type) {
type = getElementTypeOrSelf(type);
return isa<FloatType>(type) && type.getIntOrFloatBitWidth() > 16;
}
Type ToBfloat16Type(Type type) {
if (auto shaped = mlir::dyn_cast<ShapedType>(type)) {
const Type elem = shaped.getElementType();
if (IsLargeFloatType(elem)) {
return shaped.clone(BFloat16Type::get(type.getContext()));
}
} else if (IsLargeFloatType(type)) {
return BFloat16Type::get(type.getContext());
}
return type;
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/utils/bfloat16_type.h"
#include <memory>
#include <gtest/gtest.h>
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/MLIRContext.h"
#include "tensorflow/compiler/mlir/register_common_dialects.h"
namespace mlir::quant::stablehlo {
namespace {
std::unique_ptr<MLIRContext> CreateContext() {
auto context = std::make_unique<MLIRContext>();
DialectRegistry mlir_registry;
RegisterCommonToolingDialects(mlir_registry);
context->appendDialectRegistry(mlir_registry);
return context;
}
TEST(IsLargeFloatTypeTest, scalars) {
auto context = CreateContext();
EXPECT_FALSE(IsLargeFloatType(Float8E4M3FNType::get(context.get())));
EXPECT_FALSE(IsLargeFloatType(Float16Type::get(context.get())));
EXPECT_FALSE(IsLargeFloatType(BFloat16Type::get(context.get())));
EXPECT_TRUE(IsLargeFloatType(Float32Type::get(context.get())));
EXPECT_TRUE(IsLargeFloatType(Float64Type::get(context.get())));
EXPECT_TRUE(IsLargeFloatType(Float80Type::get(context.get())));
EXPECT_FALSE(IsLargeFloatType(IntegerType::get(context.get(), 8)));
EXPECT_FALSE(IsLargeFloatType(IntegerType::get(context.get(), 16)));
EXPECT_FALSE(IsLargeFloatType(IntegerType::get(context.get(), 32)));
}
TEST(IsLargeFloatTypeTest, tensors) {
auto context = CreateContext();
EXPECT_FALSE(IsLargeFloatType(
RankedTensorType::get({2, 2}, Float8E4M3FNType::get(context.get()))));
EXPECT_FALSE(IsLargeFloatType(
RankedTensorType::get({2, 2}, Float16Type::get(context.get()))));
EXPECT_FALSE(IsLargeFloatType(
RankedTensorType::get({2, 2}, BFloat16Type::get(context.get()))));
EXPECT_TRUE(IsLargeFloatType(
RankedTensorType::get({2, 2}, Float32Type::get(context.get()))));
EXPECT_TRUE(IsLargeFloatType(
RankedTensorType::get({2, 2}, Float64Type::get(context.get()))));
EXPECT_TRUE(IsLargeFloatType(
RankedTensorType::get({2, 2}, Float80Type::get(context.get()))));
EXPECT_FALSE(IsLargeFloatType(
RankedTensorType::get({2, 2}, IntegerType::get(context.get(), 8))));
EXPECT_FALSE(IsLargeFloatType(
RankedTensorType::get({2, 2}, IntegerType::get(context.get(), 16))));
EXPECT_FALSE(IsLargeFloatType(
RankedTensorType::get({2, 2}, IntegerType::get(context.get(), 32))));
}
TEST(ToBfloat16TypeTest, scalars) {
auto context = CreateContext();
EXPECT_EQ(ToBfloat16Type(Float8E4M3FNType::get(context.get())),
Float8E4M3FNType::get(context.get()));
EXPECT_EQ(ToBfloat16Type(Float16Type::get(context.get())),
Float16Type::get(context.get()));
EXPECT_EQ(ToBfloat16Type(BFloat16Type::get(context.get())),
BFloat16Type::get(context.get()));
EXPECT_EQ(ToBfloat16Type(Float32Type::get(context.get())),
BFloat16Type::get(context.get()));
EXPECT_EQ(ToBfloat16Type(Float64Type::get(context.get())),
BFloat16Type::get(context.get()));
EXPECT_EQ(ToBfloat16Type(Float80Type::get(context.get())),
BFloat16Type::get(context.get()));
EXPECT_EQ(ToBfloat16Type(IntegerType::get(context.get(), 8)),
IntegerType::get(context.get(), 8));
EXPECT_EQ(ToBfloat16Type(IntegerType::get(context.get(), 16)),
IntegerType::get(context.get(), 16));
EXPECT_EQ(ToBfloat16Type(IntegerType::get(context.get(), 32)),
IntegerType::get(context.get(), 32));
}
TEST(ToBfloat16TypeTest, tensors) {
auto context = CreateContext();
EXPECT_EQ(
ToBfloat16Type(
RankedTensorType::get({2, 2}, Float8E4M3FNType::get(context.get()))),
RankedTensorType::get({2, 2}, Float8E4M3FNType::get(context.get())));
EXPECT_EQ(ToBfloat16Type(
RankedTensorType::get({2, 2}, Float16Type::get(context.get()))),
RankedTensorType::get({2, 2}, Float16Type::get(context.get())));
EXPECT_EQ(ToBfloat16Type(RankedTensorType::get(
{2, 2}, BFloat16Type::get(context.get()))),
RankedTensorType::get({2, 2}, BFloat16Type::get(context.get())));
EXPECT_EQ(ToBfloat16Type(
RankedTensorType::get({2, 2}, Float32Type::get(context.get()))),
RankedTensorType::get({2, 2}, BFloat16Type::get(context.get())));
EXPECT_EQ(ToBfloat16Type(
RankedTensorType::get({2, 2}, Float64Type::get(context.get()))),
RankedTensorType::get({2, 2}, BFloat16Type::get(context.get())));
EXPECT_EQ(ToBfloat16Type(
RankedTensorType::get({2, 2}, Float80Type::get(context.get()))),
RankedTensorType::get({2, 2}, BFloat16Type::get(context.get())));
EXPECT_EQ(ToBfloat16Type(RankedTensorType::get(
{2, 2}, IntegerType::get(context.get(), 8))),
RankedTensorType::get({2, 2}, IntegerType::get(context.get(), 8)));
EXPECT_EQ(ToBfloat16Type(RankedTensorType::get(
{2, 2}, IntegerType::get(context.get(), 16))),
RankedTensorType::get({2, 2}, IntegerType::get(context.get(), 16)));
EXPECT_EQ(ToBfloat16Type(RankedTensorType::get(
{2, 2}, IntegerType::get(context.get(), 32))),
RankedTensorType::get({2, 2}, IntegerType::get(context.get(), 32)));
}
}
} |
1,206 | cpp | tensorflow/tensorflow | tf_type_utils | tensorflow/compiler/mlir/quantization/stablehlo/utils/tf_type_utils.cc | tensorflow/compiler/mlir/quantization/stablehlo/utils/tf_type_utils_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_UTILS_TF_TYPE_UTILS_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_UTILS_TF_TYPE_UTILS_H_
#include "llvm/ADT/StringRef.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/LogicalResult.h"
namespace mlir::quant::tensorflow {
FailureOr<mlir::DenseElementsAttr> GetDenseAttrFromTensorProtoAttr(
llvm::StringRef mangled_tensor_proto, TensorType result_tensor_type);
bool IsTFQintType(Type type);
Type GetIntTypeFromTFQint(Type type);
bool IsTFUniformQuantizedOp(Operation* op);
}
#endif
#include "tensorflow/compiler/mlir/quantization/stablehlo/utils/tf_type_utils.h"
#include "absl/status/status.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/TypeSwitch.h"
#include "llvm/Support/Casting.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/mangling_util.h"
#include "tensorflow/core/framework/numeric_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.pb.h"
namespace mlir::quant::tensorflow {
bool IsTFQintType(const Type type) {
return mlir::isa<TF::Qint8Type, TF::Qint16Type, TF::Qint32Type,
TF::Quint8Type, TF::Quint16Type>(type);
}
Type GetIntTypeFromTFQint(const Type type) {
return TypeSwitch<Type, Type>(type)
.Case<TF::Qint8Type>(
[&type](Type) { return IntegerType::get(type.getContext(), 8); })
.Case<TF::Qint16Type>(
[&type](Type) { return IntegerType::get(type.getContext(), 16); })
.Case<TF::Qint32Type>(
[&type](Type) { return IntegerType::get(type.getContext(), 32); })
.Case<TF::Quint8Type>([&type](Type) {
return IntegerType::get(type.getContext(), 8,
IntegerType::SignednessSemantics::Unsigned);
})
.Case<TF::Quint16Type>([&type](Type) {
return IntegerType::get(type.getContext(), 16,
IntegerType::SignednessSemantics::Unsigned);
})
.Default([&type](Type) { return type; });
}
FailureOr<mlir::DenseElementsAttr> GetDenseAttrFromTensorProtoAttr(
const llvm::StringRef mangled_tensor_proto, TensorType tensor_type) {
::tensorflow::TensorProto tensor_proto;
absl::Status status = ::tensorflow::mangling_util::DemangleTensor(
mangled_tensor_proto, &tensor_proto);
if (!status.ok()) {
return failure();
}
::tensorflow::Tensor t;
if (!t.FromProto(tensor_proto)) {
return failure();
}
if (t.dtype() == ::tensorflow::DT_QINT8) {
const auto arr = t.flat<::tensorflow::qint8>();
return mlir::DenseElementsAttr::get(
tensor_type.clone(IntegerType::get(tensor_type.getContext(), 8)),
llvm::ArrayRef(arr.data(), arr.size()));
} else if (t.dtype() == ::tensorflow::DT_QINT32) {
const auto arr = t.flat<::tensorflow::qint32>();
return mlir::DenseElementsAttr::get(
tensor_type.clone(IntegerType::get(tensor_type.getContext(), 32)),
llvm::ArrayRef(arr.data(), arr.size()));
} else {
return failure();
}
}
bool IsTFUniformQuantizedOp(Operation *op) {
return llvm::isa<
TF::UniformDequantizeOp,
TF::UniformQuantizeOp,
TF::UniformQuantizedAddOp,
TF::UniformQuantizedClipByValueOp,
TF::UniformQuantizedConvolutionHybridOp,
TF::UniformQuantizedConvolutionOp,
TF::UniformQuantizedDotHybridOp,
TF::UniformQuantizedDotOp,
TF::UniformRequantizeOp
>(op);
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/utils/tf_type_utils.h"
#include <cstdint>
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Quant/QuantOps.h"
#include "mlir/Dialect/Quant/QuantTypes.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/register_common_dialects.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/mangling_util.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/tsl/framework/numeric_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/ir/types/dialect.h"
namespace mlir::quant::tensorflow {
namespace {
std::string GetQint8Tensor() {
::tensorflow::Tensor tensor(::tensorflow::DT_QINT8, {2, 2});
tensor.matrix<tsl::qint8>()(0, 0) = tsl::qint8(1);
tensor.matrix<tsl::qint8>()(0, 1) = tsl::qint8(2);
tensor.matrix<tsl::qint8>()(1, 0) = tsl::qint8(3);
tensor.matrix<tsl::qint8>()(1, 1) = tsl::qint8(4);
::tensorflow::TensorProto tensor_proto;
tensor.AsProtoTensorContent(&tensor_proto);
return ::tensorflow::mangling_util::MangleTensor(tensor_proto);
}
std::string GetQint16Tensor() {
::tensorflow::Tensor tensor(::tensorflow::DT_QINT16, {2, 2});
tensor.matrix<tsl::qint16>()(0, 0) = tsl::qint16(1);
tensor.matrix<tsl::qint16>()(0, 1) = tsl::qint16(2);
tensor.matrix<tsl::qint16>()(1, 0) = tsl::qint16(3);
tensor.matrix<tsl::qint16>()(1, 1) = tsl::qint16(4);
::tensorflow::TensorProto tensor_proto;
tensor.AsProtoTensorContent(&tensor_proto);
return ::tensorflow::mangling_util::MangleTensor(tensor_proto);
}
std::string GetQint32Tensor() {
::tensorflow::Tensor tensor(::tensorflow::DT_QINT32, {2, 2});
tensor.matrix<tsl::qint32>()(0, 0) = tsl::qint32(1);
tensor.matrix<tsl::qint32>()(0, 1) = tsl::qint32(2);
tensor.matrix<tsl::qint32>()(1, 0) = tsl::qint32(3);
tensor.matrix<tsl::qint32>()(1, 1) = tsl::qint32(4);
::tensorflow::TensorProto tensor_proto;
tensor.AsProtoTensorContent(&tensor_proto);
return ::tensorflow::mangling_util::MangleTensor(tensor_proto);
}
std::unique_ptr<MLIRContext> CreateContext() {
auto context = std::make_unique<MLIRContext>();
DialectRegistry mlir_registry;
RegisterCommonToolingDialects(mlir_registry);
context->appendDialectRegistry(mlir_registry);
context->getOrLoadDialect<tf_type::TFTypeDialect>();
context->getOrLoadDialect<quant::QuantizationDialect>();
context->getOrLoadDialect<mlir::mhlo::MhloDialect>();
context->getOrLoadDialect<sparse_tensor::SparseTensorDialect>();
return context;
}
TEST(GetDenseAttrFromTensorProtoAttrTest, Qint8ToUQ8Succeeds) {
auto context = CreateContext();
TensorType result_tensor_type = RankedTensorType::get(
{2, 2}, quant::UniformQuantizedType::get(
quant::QuantizationFlags::FlagValue::Signed,
IntegerType::get(context.get(), 8),
FloatType::getF32(context.get()), 3.0, 2, -128, 127));
auto dense_attr =
GetDenseAttrFromTensorProtoAttr(GetQint8Tensor(), result_tensor_type);
ASSERT_TRUE(succeeded(dense_attr));
EXPECT_THAT(dense_attr->getValues<int8_t>(), testing::SizeIs(4));
EXPECT_EQ(dense_attr->getValues<int8_t>()[0], 1);
EXPECT_EQ(dense_attr->getValues<int8_t>()[1], 2);
EXPECT_EQ(dense_attr->getValues<int8_t>()[2], 3);
EXPECT_EQ(dense_attr->getValues<int8_t>()[3], 4);
}
TEST(GetDenseAttrFromTensorProtoAttrTest, Qint8ToInt8Succeeds) {
auto context = CreateContext();
TensorType result_tensor_type =
RankedTensorType::get({2, 2}, IntegerType::get(context.get(), 8));
auto dense_attr =
GetDenseAttrFromTensorProtoAttr(GetQint8Tensor(), result_tensor_type);
ASSERT_TRUE(succeeded(dense_attr));
EXPECT_THAT(dense_attr->getValues<int8_t>(), testing::SizeIs(4));
EXPECT_EQ(dense_attr->getValues<int8_t>()[0], 1);
EXPECT_EQ(dense_attr->getValues<int8_t>()[1], 2);
EXPECT_EQ(dense_attr->getValues<int8_t>()[2], 3);
EXPECT_EQ(dense_attr->getValues<int8_t>()[3], 4);
}
TEST(GetDenseAttrFromTensorProtoAttrTest, Qint32ToUQ32Succeeds) {
auto context = CreateContext();
TensorType result_tensor_type = RankedTensorType::get(
{2, 2},
quant::UniformQuantizedType::get(
quant::QuantizationFlags::FlagValue::Signed,
IntegerType::get(context.get(), 32), FloatType::getF32(context.get()),
3.0, 2, -2147483648, 2147483647));
auto dense_attr =
GetDenseAttrFromTensorProtoAttr(GetQint32Tensor(), result_tensor_type);
ASSERT_TRUE(succeeded(dense_attr));
EXPECT_THAT(dense_attr->getValues<int32_t>(), testing::SizeIs(4));
EXPECT_EQ(dense_attr->getValues<int32_t>()[0], 1);
EXPECT_EQ(dense_attr->getValues<int32_t>()[1], 2);
EXPECT_EQ(dense_attr->getValues<int32_t>()[2], 3);
EXPECT_EQ(dense_attr->getValues<int32_t>()[3], 4);
}
TEST(GetDenseAttrFromTensorProtoAttrTest, Qint32ToInt32Succeeds) {
auto context = CreateContext();
TensorType result_tensor_type =
RankedTensorType::get({2, 2}, IntegerType::get(context.get(), 32));
auto dense_attr =
GetDenseAttrFromTensorProtoAttr(GetQint32Tensor(), result_tensor_type);
ASSERT_TRUE(succeeded(dense_attr));
EXPECT_THAT(dense_attr->getValues<int32_t>(), testing::SizeIs(4));
EXPECT_EQ(dense_attr->getValues<int32_t>()[0], 1);
EXPECT_EQ(dense_attr->getValues<int32_t>()[1], 2);
EXPECT_EQ(dense_attr->getValues<int32_t>()[2], 3);
EXPECT_EQ(dense_attr->getValues<int32_t>()[3], 4);
}
TEST(GetDenseAttrFromTensorProtoAttrTest, UnsupportedQint16Fails) {
auto context = CreateContext();
TensorType result_tensor_type =
RankedTensorType::get({2, 2}, IntegerType::get(context.get(), 16));
EXPECT_TRUE(failed(
GetDenseAttrFromTensorProtoAttr(GetQint16Tensor(), result_tensor_type)));
}
TEST(IsTFQintTypeTest, ValidTFQintTypeSucceeds) {
auto context = CreateContext();
EXPECT_TRUE(IsTFQintType(TF::Qint8Type::get(context.get())));
EXPECT_TRUE(IsTFQintType(TF::Qint16Type::get(context.get())));
EXPECT_TRUE(IsTFQintType(TF::Qint32Type::get(context.get())));
EXPECT_TRUE(IsTFQintType(TF::Quint8Type::get(context.get())));
EXPECT_TRUE(IsTFQintType(TF::Quint16Type::get(context.get())));
EXPECT_FALSE(IsTFQintType(TF::Int8RefType::get(context.get())));
EXPECT_FALSE(IsTFQintType(TF::Float8E5M2RefType::get(context.get())));
}
TEST(GetIntTypeFromTFQintTest, ChecksIntTypesFromTFQint) {
auto context = CreateContext();
auto type = GetIntTypeFromTFQint(TF::Qint8Type::get(context.get()));
EXPECT_TRUE(llvm::isa<IntegerType>(type));
EXPECT_EQ(mlir::dyn_cast<IntegerType>(type).getWidth(), 8);
EXPECT_FALSE(mlir::dyn_cast<IntegerType>(type).isSigned());
EXPECT_FALSE(mlir::dyn_cast<IntegerType>(type).isUnsigned());
type = GetIntTypeFromTFQint(TF::Qint16Type::get(context.get()));
EXPECT_TRUE(llvm::isa<IntegerType>(type));
EXPECT_EQ(mlir::dyn_cast<IntegerType>(type).getWidth(), 16);
EXPECT_FALSE(mlir::dyn_cast<IntegerType>(type).isSigned());
EXPECT_FALSE(mlir::dyn_cast<IntegerType>(type).isUnsigned());
type = GetIntTypeFromTFQint(TF::Qint32Type::get(context.get()));
EXPECT_TRUE(llvm::isa<IntegerType>(type));
EXPECT_EQ(mlir::dyn_cast<IntegerType>(type).getWidth(), 32);
EXPECT_FALSE(mlir::dyn_cast<IntegerType>(type).isSigned());
EXPECT_FALSE(mlir::dyn_cast<IntegerType>(type).isUnsigned());
type = GetIntTypeFromTFQint(TF::Quint8Type::get(context.get()));
EXPECT_TRUE(llvm::isa<IntegerType>(type));
EXPECT_EQ(mlir::dyn_cast<IntegerType>(type).getWidth(), 8);
EXPECT_TRUE(mlir::dyn_cast<IntegerType>(type).isUnsigned());
type = GetIntTypeFromTFQint(TF::Quint16Type::get(context.get()));
EXPECT_TRUE(llvm::isa<IntegerType>(type));
EXPECT_EQ(mlir::dyn_cast<IntegerType>(type).getWidth(), 16);
EXPECT_TRUE(mlir::dyn_cast<IntegerType>(type).isUnsigned());
EXPECT_EQ(GetIntTypeFromTFQint(IntegerType::get(type.getContext(), 32)),
IntegerType::get(type.getContext(), 32));
}
}
} |
1,207 | cpp | tensorflow/tensorflow | save_report | tensorflow/compiler/mlir/quantization/stablehlo/instrumentations/save_report.cc | tensorflow/compiler/mlir/quantization/stablehlo/instrumentations/save_report_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_INSTRUMENTATIONS_SAVE_REPORT_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_INSTRUMENTATIONS_SAVE_REPORT_H_
#include <optional>
#include <string>
#include "absl/strings/string_view.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassInstrumentation.h"
namespace mlir::quant::stablehlo {
class SaveQuantizationReportInstrumentation : public PassInstrumentation {
public:
explicit SaveQuantizationReportInstrumentation(
std::optional<absl::string_view> file_path);
void runAfterPass(Pass* pass, Operation* op) override;
private:
std::optional<std::string> file_path_;
};
}
#endif
#include "tensorflow/compiler/mlir/quantization/stablehlo/instrumentations/save_report.h"
#include <optional>
#include <string>
#include "absl/base/nullability.h"
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Operation.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/report.h"
namespace mlir::quant::stablehlo {
namespace {
std::optional<std::string> OptionalStringViewToOptionalString(
std::optional<absl::string_view> view) {
if (view == std::nullopt) return std::nullopt;
return std::make_optional<std::string>(*view);
}
bool IsQuantizeCompositeFunctionPass(absl::Nullable<Pass*> pass,
absl::Nullable<Operation*> op) {
return pass != nullptr &&
pass->getArgument() == "stablehlo-quantize-composite-functions" &&
isa_and_nonnull<ModuleOp>(op);
}
bool ShouldSaveReport(absl::Nullable<Pass*> pass, absl::Nullable<Operation*> op,
const std::optional<std::string>& file_path) {
return file_path != std::nullopt && IsQuantizeCompositeFunctionPass(pass, op);
}
void SaveReport(const QuantizationReport& report,
const absl::string_view file_path) {
if (const absl::Status save_status = report.Save(file_path);
save_status.ok()) {
LOG(INFO) << "Successfully saved quantization report to: " << file_path;
} else {
LOG(ERROR) << "Failed to save quantization report to: " << file_path
<< " with status: " << save_status;
}
}
}
SaveQuantizationReportInstrumentation::SaveQuantizationReportInstrumentation(
std::optional<absl::string_view> file_path)
: file_path_(OptionalStringViewToOptionalString(file_path)) {}
void SaveQuantizationReportInstrumentation::runAfterPass(Pass* pass,
Operation* op) {
if (!IsQuantizeCompositeFunctionPass(pass, op)) return;
auto module_op = cast<ModuleOp>(op);
const QuantizationReport report(module_op);
report.Print();
if (!ShouldSaveReport(pass, op, file_path_)) return;
SaveReport(report, *file_path_);
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/instrumentations/save_report.h"
#include <memory>
#include <optional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/io.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status_matchers.h"
namespace mlir::quant::stablehlo {
namespace {
using ::stablehlo::quantization::QuantizationResults;
using ::stablehlo::quantization::io::ReadFileToString;
using ::testing::SizeIs;
using ::testing::StrEq;
using ::tsl::protobuf::TextFormat;
using ::tsl::testing::IsOk;
using ::tsl::testing::StatusIs;
using SaveQuantizationReportInstrumentationTest = QuantizationTestBase;
TEST_F(SaveQuantizationReportInstrumentationTest, SaveReport) {
constexpr absl::string_view kModuleWithCompositeDotGeneral = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%cst = "tf.Const"() {value = dense<3.00000000e-1> : tensor<2x3xf32>} : () -> tensor<2x3xf32>
%0 = "quantfork.stats"(%arg0) {layerStats = dense<[6.00000000e-6, 9.00000000e-1]> : tensor<2xf32>} : (tensor<1x2xf32>) -> tensor<1x2xf32>
%1 = "tf.XlaCallModule"(%0, %cst) {Sout = [#tf_type.shape<1x3>], _entry_function = @composite_dot_general_fn, _original_entry_function = "composite_dot_general_fn", _quantization_method = "static_range_ptq { }", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
%2 = "quantfork.stats"(%1) {layerStats = dense<[5.00000000e-6, 7.00000000e-1]> : tensor<2xf32>} : (tensor<1x3xf32>) -> tensor<1x3xf32>
return %2 : tensor<1x3xf32>
}
func.func private @composite_dot_general_fn(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithCompositeDotGeneral);
ASSERT_TRUE(module_op);
PassManager pm(ctx_.get());
QuantizeCompositeFunctionsPassOptions options;
pm.addPass(createQuantizeCompositeFunctionsPass(options));
const std::string report_file_path =
absl::StrCat(testing::TempDir(), "/save_report.txtpb");
pm.addInstrumentation(std::make_unique<SaveQuantizationReportInstrumentation>(
report_file_path));
const LogicalResult run_result = pm.run(*module_op);
ASSERT_TRUE(succeeded(run_result));
const absl::StatusOr<std::string> file_data =
ReadFileToString(report_file_path);
ASSERT_THAT(file_data, IsOk());
QuantizationResults results{};
ASSERT_TRUE(TextFormat::ParseFromString(*file_data, &results));
ASSERT_THAT(results.results(), SizeIs(1));
EXPECT_THAT(results.results(0).quantizable_unit().name(),
StrEq("composite_dot_general_fn"));
EXPECT_TRUE(results.results(0).method().has_static_range_ptq());
}
TEST_F(SaveQuantizationReportInstrumentationTest,
ReportNotSavedWhenNoQuantizeCompositeFunctionsPass) {
constexpr absl::string_view kModuleWithCompositeDotGeneral = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%cst = "stablehlo.constant"() {value = dense<3.00000000e-1> : tensor<2x3xf32>} : () -> tensor<2x3xf32>
%0 = "quantfork.stats"(%arg0) {layerStats = dense<[6.00000000e-6, 9.00000000e-1]> : tensor<2xf32>} : (tensor<1x2xf32>) -> tensor<1x2xf32>
%1 = "tf.XlaCallModule"(%0, %cst) {Sout = [#tf_type.shape<1x3>], _entry_function = @composite_dot_general_fn, _original_entry_function = "composite_dot_general_fn", _quantization_method = "static_range_ptq { }", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
%2 = "quantfork.stats"(%1) {layerStats = dense<[5.00000000e-6, 7.00000000e-1]> : tensor<2xf32>} : (tensor<1x3xf32>) -> tensor<1x3xf32>
return %2 : tensor<1x3xf32>
}
func.func private @composite_dot_general_fn(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithCompositeDotGeneral);
ASSERT_TRUE(module_op);
PassManager pm(ctx_.get());
pm.addPass(createPrepareQuantizePass());
const std::string report_file_path = absl::StrCat(
testing::TempDir(),
"/report_not_saved_no_quantize_composite_functions_pass.txtpb");
pm.addInstrumentation(std::make_unique<SaveQuantizationReportInstrumentation>(
report_file_path));
const LogicalResult run_result = pm.run(*module_op);
ASSERT_TRUE(succeeded(run_result));
EXPECT_THAT(ReadFileToString(report_file_path),
StatusIs(absl::StatusCode::kNotFound));
}
TEST_F(SaveQuantizationReportInstrumentationTest,
ReportNotSavedWhenReportFilePathIsNullopt) {
constexpr absl::string_view kModuleWithCompositeDotGeneral = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%cst = "stablehlo.constant"() {value = dense<3.00000000e-1> : tensor<2x3xf32>} : () -> tensor<2x3xf32>
%0 = "quantfork.stats"(%arg0) {layerStats = dense<[6.00000000e-6, 9.00000000e-1]> : tensor<2xf32>} : (tensor<1x2xf32>) -> tensor<1x2xf32>
%1 = "tf.XlaCallModule"(%0, %cst) {Sout = [#tf_type.shape<1x3>], _entry_function = @composite_dot_general_fn, _original_entry_function = "composite_dot_general_fn", _quantization_method = "static_range_ptq { }", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
%2 = "quantfork.stats"(%1) {layerStats = dense<[5.00000000e-6, 7.00000000e-1]> : tensor<2xf32>} : (tensor<1x3xf32>) -> tensor<1x3xf32>
return %2 : tensor<1x3xf32>
}
func.func private @composite_dot_general_fn(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithCompositeDotGeneral);
ASSERT_TRUE(module_op);
PassManager pm(ctx_.get());
QuantizeCompositeFunctionsPassOptions options;
pm.addPass(createQuantizeCompositeFunctionsPass(options));
pm.addInstrumentation(std::make_unique<SaveQuantizationReportInstrumentation>(
std::nullopt));
const LogicalResult run_result = pm.run(*module_op);
ASSERT_TRUE(succeeded(run_result));
}
}
} |
1,208 | cpp | tensorflow/tensorflow | stablehlo_op_quant_spec | tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec.cc | tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_OPS_STABLEHLO_OP_QUANT_SPEC_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_OPS_STABLEHLO_OP_QUANT_SPEC_H_
#include <memory>
#include "mlir/IR/Operation.h"
#include "mlir/IR/Value.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
namespace mlir::quant::stablehlo {
std::unique_ptr<OpQuantSpec> GetStableHloOpQuantSpec(Operation* op);
std::unique_ptr<OpQuantScaleSpec> GetStableHloQuantConstraints(Operation* op);
bool IsOpQuantizableStableHlo(Operation* op);
}
#endif
#include "tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec.h"
#include <memory>
#include "absl/status/statusor.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.h"
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include "tensorflow/compiler/mlir/quantization/common/lift_as_function_call.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tsl/platform/protobuf.h"
#define DEBUG_TYPE "stablehlo_opt_quant_spec"
namespace mlir::quant::stablehlo {
namespace {
using ::mlir::stablehlo::DotGeneralOp;
using ::stablehlo::quantization::Method;
using ::stablehlo::quantization::StaticRangePtq;
bool IsDenylistedLiftedFunction(Operation* op) {
if (auto xla_call_module_op = dyn_cast_or_null<TF::XlaCallModuleOp>(op);
xla_call_module_op != nullptr) {
absl::StatusOr<Method> method = GetQuantizationMethod(xla_call_module_op);
if (method.ok() && method->has_no_quantization()) {
return true;
}
}
return false;
}
void PopulateCoeffOpQuantDimIfPerChannelQuantized(
TF::XlaCallModuleOp xla_call_module_op, OpQuantSpec& spec) {
absl::StatusOr<Method> method = GetQuantizationMethod(xla_call_module_op);
if (method.ok() && method->has_static_range_ptq()) {
const StaticRangePtq& static_range_ptq_spec = method->static_range_ptq();
for (const auto& [operand_idx, quantized_type] :
static_range_ptq_spec.input_quantized_types()) {
if (quantized_type.has_dimension_specs()) {
spec.coeff_op_quant_dim[operand_idx] =
quantized_type.dimension_specs().dimension();
}
}
}
}
}
std::unique_ptr<OpQuantSpec> GetStableHloOpQuantSpec(Operation* op) {
auto spec = std::make_unique<OpQuantSpec>();
if (auto call_op = dyn_cast_or_null<TF::XlaCallModuleOp>(op)) {
auto entry_function =
call_op->getAttrOfType<FlatSymbolRefAttr>("_entry_function");
StringRef function_name = entry_function.getValue();
if (!function_name.starts_with("composite_")) {
return spec;
}
if (function_name.contains("conv")) {
PopulateCoeffOpQuantDimIfPerChannelQuantized(call_op, *spec);
if (function_name.contains("with_bias")) {
spec->biases_params[2] = {{0, 1},
quant::GetUniformQuantizedTypeForBias};
}
} else if (function_name.contains("dot_general")) {
const auto module_op = call_op->getParentOfType<ModuleOp>();
const SymbolTable symbol_table(module_op);
auto entry_func_op =
dyn_cast_or_null<func::FuncOp>(symbol_table.lookup(function_name));
auto dot_general_op = *entry_func_op.getOps<DotGeneralOp>().begin();
if (auto optional_dim = GetDotGeneralQuantizationDim(dot_general_op);
optional_dim) {
spec->coeff_op_quant_dim[1] = optional_dim.value();
} else {
spec->coeff_op_quant_dim[1] = -1;
}
if (function_name.contains("with_bias")) {
spec->biases_params[2] = {{0, 1},
quant::GetUniformQuantizedTypeForBias};
}
}
for (const auto [operand_idx, per_channel_dim] : spec->coeff_op_quant_dim) {
spec->quantizable_operands.insert(operand_idx);
}
}
return spec;
}
std::unique_ptr<OpQuantScaleSpec> GetStableHloQuantConstraints(Operation* op) {
auto scale_spec = std::make_unique<OpQuantScaleSpec>();
if (llvm::isa<mlir::stablehlo::BroadcastInDimOp,
mlir::stablehlo::ConcatenateOp,
mlir::stablehlo::DynamicReshapeOp,
mlir::stablehlo::DynamicSliceOp, mlir::stablehlo::GatherOp,
mlir::stablehlo::PadOp, mlir::stablehlo::ReduceWindowOp,
mlir::stablehlo::ReshapeOp, mlir::stablehlo::SelectOp,
mlir::stablehlo::SliceOp, mlir::stablehlo::TransposeOp>(op)) {
scale_spec->has_same_scale_requirement = true;
}
if (llvm::isa<mlir::stablehlo::DynamicSliceOp, mlir::stablehlo::GatherOp,
mlir::stablehlo::PadOp, mlir::stablehlo::SliceOp>(op)) {
scale_spec->has_same_operand_and_result_type_requirement = true;
}
return scale_spec;
}
bool IsOpQuantizableStableHlo(Operation* op) {
if (isa<func::ConstantOp, mlir::stablehlo::ConstantOp>(op)) {
return true;
} else if (op->hasTrait<OpTrait::IsTerminator>() ||
isa<quantfork::QuantizeCastOp, quantfork::DequantizeCastOp>(op)) {
return false;
}
if (IsDenylistedLiftedFunction(op)) {
LLVM_DEBUG(llvm::errs() << "Denylisted quantizable unit: \n" << op << "\n");
return false;
}
if (GetStableHloQuantConstraints(op)->has_same_scale_requirement) {
return true;
}
const bool attr_enforced_quantizable =
op->hasAttrOfType<StringAttr>(kQuantTraitAttrName) &&
op->getAttrOfType<StringAttr>(kQuantTraitAttrName).getValue().str() ==
QuantTraitValues[QuantizationTrait::FullyQuantizable];
return attr_enforced_quantizable;
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Support/LogicalResult.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.h"
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include "tensorflow/compiler/mlir/quantization/common/func.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/core/platform/test.h"
namespace mlir::quant::stablehlo {
namespace {
using ::mlir::stablehlo::GatherOp;
using ::testing::IsEmpty;
using ::testing::IsTrue;
using ::testing::NotNull;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
using IsOpQuantizableStableHloTest = ::mlir::quant::QuantizationTestBase;
constexpr absl::string_view kModuleConstantAdd = R"mlir(
module {
func.func @constant_add() -> (tensor<3x2xf32>) {
%cst1 = stablehlo.constant dense<2.4> : tensor<3x2xf32>
%cst2 = stablehlo.constant dense<5.7> : tensor<3x2xf32>
%add = stablehlo.add %cst1, %cst2 : (tensor<3x2xf32>, tensor<3x2xf32>) -> tensor<3x2xf32>
func.return %add : tensor<3x2xf32>
}
}
)mlir";
constexpr absl::string_view kModuleCompositeSameScale = R"mlir(
module {
func.func @same_scale_after_composite() -> tensor<3x1xf32> {
%0 = "tf.XlaCallModule"() {Sout = [#tf_type.shape<1x3>], _entry_function = @composite_dot_general_fn_1, _original_entry_function = "composite_dot_general_fn_1", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : () -> tensor<1x3xf32>
%1 = "quantfork.qcast"(%0) {volatile} : (tensor<1x3xf32>) -> tensor<1x3x!quant.uniform<i8:f32, 0.13170163023705575:-1>>
%2 = "quantfork.dcast"(%1) : (tensor<1x3x!quant.uniform<i8:f32, 0.13170163023705575:-1>>) -> tensor<1x3xf32>
%3 = stablehlo.reshape %2 : (tensor<1x3xf32>) -> tensor<3x1xf32>
%4 = "quantfork.qcast"(%3) {volatile} : (tensor<3x1xf32>) -> tensor<3x1x!quant.uniform<i8:f32, 0.13170163023705575:-1>>
%5 = "quantfork.dcast"(%4) : (tensor<3x1x!quant.uniform<i8:f32, 0.13170163023705575:-1>>) -> tensor<3x1xf32>
return %5 : tensor<3x1xf32>
}
}
)mlir";
constexpr absl::string_view kModuleCompositeNoAttr = R"mlir(
module {
func.func @composite_without_attr() -> tensor<1x3xf32> {
%0 = "tf.XlaCallModule"() {Sout = [#tf_type.shape<1x3>], _entry_function = @non_quantizable_composite, _original_entry_function = "non_quantizable_composite", _stablehlo_module_attrs = {}, device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : () -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
}
)mlir";
TEST_F(IsOpQuantizableStableHloTest, ConstantOpQuantizable) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleConstantAdd);
ASSERT_TRUE(module_op);
auto test_func = module_op->lookupSymbol<func::FuncOp>("constant_add");
ASSERT_THAT(test_func, NotNull());
auto constant_op =
FindOperationOfType<mlir::stablehlo::ConstantOp>(test_func);
EXPECT_TRUE(IsOpQuantizableStableHlo(constant_op));
}
TEST_F(IsOpQuantizableStableHloTest, TerminatorOpNotQuantizable) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleConstantAdd);
ASSERT_TRUE(module_op);
auto test_func = module_op->lookupSymbol<func::FuncOp>("constant_add");
ASSERT_THAT(test_func, NotNull());
auto return_op = FindOperationOfType<func::ReturnOp>(test_func);
EXPECT_FALSE(IsOpQuantizableStableHlo(return_op));
}
TEST_F(IsOpQuantizableStableHloTest, SameScaleOpQuantizable) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleCompositeSameScale);
ASSERT_TRUE(module_op);
auto test_func =
module_op->lookupSymbol<func::FuncOp>("same_scale_after_composite");
ASSERT_THAT(test_func, NotNull());
auto reshape_op = FindOperationOfType<mlir::stablehlo::ReshapeOp>(test_func);
EXPECT_TRUE(IsOpQuantizableStableHlo(reshape_op));
}
TEST_F(IsOpQuantizableStableHloTest, NonSameScaleOpNotQuantizable) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleConstantAdd);
ASSERT_TRUE(module_op);
auto test_func = module_op->lookupSymbol<func::FuncOp>("constant_add");
ASSERT_THAT(test_func, NotNull());
auto add_op = FindOperationOfType<mlir::stablehlo::AddOp>(test_func);
EXPECT_FALSE(IsOpQuantizableStableHlo(add_op));
}
TEST_F(IsOpQuantizableStableHloTest, ValidXlaCallModuleOpQuantizable) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleCompositeSameScale);
ASSERT_TRUE(module_op);
auto test_func =
module_op->lookupSymbol<func::FuncOp>("same_scale_after_composite");
ASSERT_THAT(test_func, NotNull());
auto xla_call_module_op = FindOperationOfType<TF::XlaCallModuleOp>(test_func);
EXPECT_TRUE(IsOpQuantizableStableHlo(xla_call_module_op));
}
TEST_F(IsOpQuantizableStableHloTest, InvalidXlaCallModuleOpNotQuantizable) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleCompositeNoAttr);
ASSERT_TRUE(module_op);
auto test_func =
module_op->lookupSymbol<func::FuncOp>("composite_without_attr");
ASSERT_THAT(test_func, NotNull());
auto xla_call_module_op = FindOperationOfType<TF::XlaCallModuleOp>(test_func);
EXPECT_FALSE(IsOpQuantizableStableHlo(xla_call_module_op));
}
TEST_F(IsOpQuantizableStableHloTest, QuantizeDequantizeOpNotQuantizable) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleCompositeSameScale);
ASSERT_TRUE(module_op);
auto test_func =
module_op->lookupSymbol<func::FuncOp>("same_scale_after_composite");
ASSERT_THAT(test_func, NotNull());
auto quantize_op = FindOperationOfType<quantfork::QuantizeCastOp>(test_func);
EXPECT_FALSE(IsOpQuantizableStableHlo(quantize_op));
auto dequantize_op =
FindOperationOfType<quantfork::DequantizeCastOp>(test_func);
EXPECT_FALSE(IsOpQuantizableStableHlo(dequantize_op));
}
TEST_F(IsOpQuantizableStableHloTest,
XlaCallModuleOpQuantizableWhenNotDenylisted) {
constexpr absl::string_view
kModuleXlaCallModuleOpWithDefaultQuantizationMethod = R"mlir(
func.func @xla_call_module_default_quantization_method(%arg0: tensor<1x1x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x1x4xf32> {
%0 = "tf.XlaCallModule"(%arg0, %arg1) <{Sout = [#tf_type.shape<1x1x4>], dim_args_spec = [], disabled_checks = [], function_list = [], has_token_input_output = false, module = "", platforms = ["CPU"], version = 9 : i64}> {_entry_function = @composite_dot_general_fn_1, _original_entry_function = "composite_dot_general_fn_1", _quantization_method = "", _stablehlo_module_attrs = {jax.uses_shape_polymorphism = true}, _tfl_quant_trait = "fully_quantizable"} : (tensor<1x1x3xf32>, tensor<3x4xf32>) -> tensor<1x1x4xf32>
return %0 : tensor<1x1x4xf32>
}
)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleXlaCallModuleOpWithDefaultQuantizationMethod);
ASSERT_TRUE(module_op);
auto test_func = module_op->lookupSymbol<func::FuncOp>(
"xla_call_module_default_quantization_method");
ASSERT_THAT(test_func, NotNull());
auto xla_call_module_op = FindOperationOfType<TF::XlaCallModuleOp>(test_func);
EXPECT_TRUE(IsOpQuantizableStableHlo(xla_call_module_op));
}
TEST_F(IsOpQuantizableStableHloTest, DenylistedXlaCallModuleOpNotQuantizable) {
constexpr absl::string_view kModuleDenylistedXlaCallModuleOp = R"mlir(
func.func @xla_call_module_denylisted(%arg0: tensor<1x1x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x1x4xf32> {
%0 = "tf.XlaCallModule"(%arg0, %arg1) <{Sout = [#tf_type.shape<1x1x4>], dim_args_spec = [], disabled_checks = [], function_list = [], has_token_input_output = false, module = "", platforms = ["CPU"], version = 9 : i64}> {_entry_function = @composite_dot_general_fn_1, _original_entry_function = "composite_dot_general_fn_1", _quantization_method = "no_quantization {}", _stablehlo_module_attrs = {jax.uses_shape_polymorphism = true}, _tfl_quant_trait = "fully_quantizable"} : (tensor<1x1x3xf32>, tensor<3x4xf32>) -> tensor<1x1x4xf32>
return %0 : tensor<1x1x4xf32>
}
)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleDenylistedXlaCallModuleOp);
ASSERT_TRUE(module_op);
auto test_func =
module_op->lookupSymbol<func::FuncOp>("xla_call_module_denylisted");
ASSERT_THAT(test_func, NotNull());
auto xla_call_module_op = FindOperationOfType<TF::XlaCallModuleOp>(test_func);
EXPECT_FALSE(IsOpQuantizableStableHlo(xla_call_module_op));
}
using GetStableHloOpQuantSpecTest = ::mlir::quant::QuantizationTestBase;
TEST_F(GetStableHloOpQuantSpecTest,
EmptyCoeffOpQuantDimForPerTensorQuantizedConvolution) {
constexpr absl::string_view
kXlaCallModuleOpWithPerTensorQuantizedConvolution = R"mlir(
func.func @main(%arg0: tensor<1x1x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x1x4xf32> {
%0 = "tf.XlaCallModule"(%arg0, %arg1) <{Sout = [#tf_type.shape<1x1x4>], dim_args_spec = [], disabled_checks = [], function_list = [], has_token_input_output = false, module = "", platforms = ["CPU"], version = 9 : i64}>
{
_entry_function = @composite_conv_fn_1,
_original_entry_function = "composite_conv_fn_1",
_quantization_method = "static_range_ptq {}",
_stablehlo_module_attrs = {jax.uses_shape_polymorphism = true},
_tfl_quant_trait = "fully_quantizable"
} : (tensor<1x1x3xf32>, tensor<3x4xf32>) -> tensor<1x1x4xf32>
return %0 : tensor<1x1x4xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kXlaCallModuleOpWithPerTensorQuantizedConvolution);
ASSERT_TRUE(module_op);
const FailureOr<TF::XlaCallModuleOp> xla_call_module_op =
FindFirstOpFromMainFunc<TF::XlaCallModuleOp>(*module_op);
ASSERT_TRUE(succeeded(xla_call_module_op));
const std::unique_ptr<OpQuantSpec> op_quant_spec =
GetStableHloOpQuantSpec(*xla_call_module_op);
ASSERT_THAT(op_quant_spec, NotNull());
EXPECT_THAT(op_quant_spec->coeff_op_quant_dim, IsEmpty());
}
TEST_F(GetStableHloOpQuantSpecTest,
EmptyCoeffOpQuantDimForPerChannelQuantizedConvolution) {
constexpr absl::string_view
kXlaCallModuleOpWithPerChannelQuantizedConvolution = R"mlir(
func.func @main(%arg0: tensor<1x1x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x1x4xf32> {
%0 = "tf.XlaCallModule"(%arg0, %arg1) <{Sout = [#tf_type.shape<1x1x4>], dim_args_spec = [], disabled_checks = [], function_list = [], has_token_input_output = false, module = "", platforms = ["CPU"], version = 9 : i64}>
{
_entry_function = @composite_conv_fn_1,
_original_entry_function = "composite_conv_fn_1",
_quantization_method = "static_range_ptq {input_quantized_types {key: 1, value {dimension_specs {dimension: 3}}}}",
_stablehlo_module_attrs = {jax.uses_shape_polymorphism = true},
_tfl_quant_trait = "fully_quantizable"
} : (tensor<1x1x3xf32>, tensor<3x4xf32>) -> tensor<1x1x4xf32>
return %0 : tensor<1x1x4xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kXlaCallModuleOpWithPerChannelQuantizedConvolution);
ASSERT_TRUE(module_op);
const FailureOr<TF::XlaCallModuleOp> xla_call_module_op =
FindFirstOpFromMainFunc<TF::XlaCallModuleOp>(*module_op);
ASSERT_TRUE(succeeded(xla_call_module_op));
const std::unique_ptr<OpQuantSpec> op_quant_spec =
GetStableHloOpQuantSpec(*xla_call_module_op);
ASSERT_THAT(op_quant_spec, NotNull());
EXPECT_THAT(op_quant_spec->coeff_op_quant_dim,
UnorderedElementsAre(Pair(1, 3)));
}
using GetStableHloQuantConstraintsTest = ::mlir::quant::QuantizationTestBase;
TEST_F(GetStableHloQuantConstraintsTest,
HasSameOperandAndResultTypeRequirementSucceeds) {
constexpr absl::string_view kModuleGather = R"mlir(
module {
func.func @main() -> (tensor<2x3x2x2xf32>) {
%0 = stablehlo.constant dense<1.0> : tensor<3x4x2xf32>
%1 = stablehlo.constant dense<2> : tensor<2x3x2xi64>
%2 = "stablehlo.gather"(%0, %1) {
dimension_numbers = #stablehlo.gather<
offset_dims = [2, 3],
collapsed_slice_dims = [0],
start_index_map = [1, 0],
index_vector_dim = 2>,
slice_sizes = array<i64: 1, 2, 2>,
indices_are_sorted = false
} : (tensor<3x4x2xf32>, tensor<2x3x2xi64>) -> tensor<2x3x2x2xf32>
func.return %2 : tensor<2x3x2x2xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleGather);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
Operation* gather_op = FindOperationOfType<GatherOp>(main_fn);
const auto spec = GetStableHloQuantConstraints(gather_op);
EXPECT_THAT(spec, NotNull());
EXPECT_THAT(spec->has_same_operand_and_result_type_requirement, IsTrue());
}
}
} |
1,209 | cpp | tensorflow/tensorflow | report | tensorflow/compiler/mlir/quantization/stablehlo/cc/report.cc | tensorflow/compiler/mlir/quantization/stablehlo/cc/report_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_CC_REPORT_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_CC_REPORT_H_
#include <string>
#include "absl/status/status.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
namespace mlir::quant::stablehlo {
class QuantizationReport {
public:
QuantizationReport() = default;
explicit QuantizationReport(ModuleOp module_op);
void AddQuantizationResult(
::stablehlo::quantization::QuantizationResult&& result);
const ::stablehlo::quantization::QuantizationResults& GetQuantizationResults()
const {
return quantization_results_;
}
std::string ToString() const;
void Print() const;
absl::Status Save(StringRef file_path) const;
private:
::stablehlo::quantization::QuantizationResults CollectResultsFromModuleOp(
ModuleOp module_op) const;
::stablehlo::quantization::QuantizationResults quantization_results_;
};
}
#endif
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/report.h"
#include <optional>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/quantization/common/lift_as_function_call.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/io.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tsl/platform/protobuf.h"
namespace mlir::quant::stablehlo {
namespace {
using ::stablehlo::quantization::Method;
using ::stablehlo::quantization::QuantizationResult;
using ::stablehlo::quantization::QuantizationResults;
using ::stablehlo::quantization::io::WriteStringToFile;
using ::tsl::protobuf::TextFormat;
std::string GetCompositeFunctionName(const StringRef quantized_func_name) {
return Twine(kCompositeFuncPrefix)
.concat(quantized_func_name.rsplit(kQuantizedFuncPrefix).second)
.str();
}
std::optional<QuantizationResult> GetQuantizationResult(func::CallOp call_op) {
const StringRef callee_name = call_op.getCalleeAttr().getValue();
if (!callee_name.starts_with(kQuantizedFuncPrefix)) {
return std::nullopt;
}
absl::StatusOr<Method> method = GetQuantizationMethod(call_op);
if (!method.ok()) {
call_op->emitError() << "Failed to get quantization method: "
<< method.status().ToString();
return std::nullopt;
}
QuantizationResult result{};
result.mutable_quantizable_unit()->set_name(
GetCompositeFunctionName(callee_name));
*result.mutable_method() = std::move(*method);
return result;
}
std::optional<QuantizationResult> GetQuantizationResult(
TF::XlaCallModuleOp xla_call_module_op) {
const StringAttr callee_name_attr =
mlir::dyn_cast_or_null<StringAttr>(xla_call_module_op->getDiscardableAttr(
kOriginalStablehloEntryFunctionAttrName));
if (callee_name_attr == nullptr) return std::nullopt;
if (callee_name_attr.getValue().starts_with(kCompositeFuncPrefix)) {
QuantizationResult result{};
result.mutable_quantizable_unit()->set_name(
callee_name_attr.getValue().str());
result.mutable_method()->mutable_no_quantization();
return result;
} else {
return std::nullopt;
}
}
void PopulateQuantizedResults(ModuleOp module_op,
QuantizationResults& results) {
module_op.walk([&results](func::CallOp call_op) {
std::optional<QuantizationResult> result = GetQuantizationResult(call_op);
if (result == std::nullopt) return WalkResult::skip();
*results.add_results() = std::move(*result);
return WalkResult::advance();
});
}
void PopulateNonQuantizedResults(ModuleOp module_op,
QuantizationResults& results) {
module_op.walk([&results](TF::XlaCallModuleOp xla_call_module_op) {
std::optional<QuantizationResult> result =
GetQuantizationResult(xla_call_module_op);
if (result == std::nullopt) return WalkResult::skip();
*results.add_results() = std::move(*result);
return WalkResult::advance();
});
}
}
QuantizationReport::QuantizationReport(ModuleOp module_op)
: quantization_results_(CollectResultsFromModuleOp(module_op)) {}
QuantizationResults QuantizationReport::CollectResultsFromModuleOp(
ModuleOp module_op) const {
QuantizationResults results{};
PopulateQuantizedResults(module_op, results);
PopulateNonQuantizedResults(module_op, results);
return results;
}
void QuantizationReport::AddQuantizationResult(QuantizationResult&& result) {
*quantization_results_.add_results() = std::move(result);
}
std::string QuantizationReport::ToString() const {
std::string results_str{};
TextFormat::PrintToString(quantization_results_, &results_str);
return absl::StrCat("===== Quantization Report =====\n\n", results_str,
"\n===== Quantization Report End =====\n\n");
}
void QuantizationReport::Print() const {
llvm::outs() << ToString();
llvm::outs().flush();
}
absl::Status QuantizationReport::Save(const StringRef file_path) const {
std::string results_str{};
TextFormat::PrintToString(GetQuantizationResults(), &results_str);
return WriteStringToFile(file_path, results_str);
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/report.h"
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/io.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status_matchers.h"
namespace mlir::quant::stablehlo {
namespace {
using ::stablehlo::quantization::Method;
using ::stablehlo::quantization::QuantizableUnit;
using ::stablehlo::quantization::QuantizationResult;
using ::stablehlo::quantization::QuantizationResults;
using ::stablehlo::quantization::io::ReadFileToString;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::SizeIs;
using ::testing::StrEq;
using ::testing::TempDir;
using ::tsl::protobuf::TextFormat;
using ::tsl::testing::IsOk;
using QuantizationReportTest = ::mlir::quant::QuantizationTestBase;
TEST_F(QuantizationReportTest, GetQuantizationResultsReturnsEmptyResults) {
QuantizationReport report{};
const QuantizationResults& results = report.GetQuantizationResults();
ASSERT_THAT(results.results(), IsEmpty());
}
TEST_F(QuantizationReportTest, AddQuantizationResult) {
QuantizationResult result{};
QuantizableUnit& quantizable_unit = *result.mutable_quantizable_unit();
quantizable_unit.set_name("quantized_my_function");
Method& method = *result.mutable_method();
method.mutable_no_quantization();
QuantizationReport report{};
report.AddQuantizationResult(std::move(result));
const QuantizationResults& results = report.GetQuantizationResults();
ASSERT_THAT(results.results(), SizeIs(1));
const QuantizationResult& first_result = results.results(0);
EXPECT_THAT(first_result.quantizable_unit().name(),
StrEq("quantized_my_function"));
EXPECT_TRUE(first_result.method().has_no_quantization());
}
TEST_F(QuantizationReportTest, InitializeWithModuleOp) {
constexpr absl::string_view kQuantizedDotGeneral = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%0 = stablehlo.constant() {value = dense<127> : tensor<2x3xi8>} : () -> tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>
%1 = stablehlo.uniform_quantize %arg0 : (tensor<1x2xf32>) -> tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>
%2 = call @quantized_dot_general_fn(%1, %0) {_quantization_method = "static_range_ptq { }"} : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
%3 = stablehlo.uniform_dequantize %2 : (tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>) -> tensor<1x3xf32>
return %3 : tensor<1x3xf32>
}
func.func private @quantized_dot_general_fn(%arg0: tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, %arg1: tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>> {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>
%1 = stablehlo.uniform_quantize %0 : (tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
return %1 : tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kQuantizedDotGeneral);
ASSERT_TRUE(module_op);
const QuantizationReport report(*module_op);
const QuantizationResults& results = report.GetQuantizationResults();
ASSERT_THAT(results.results(), SizeIs(1));
const QuantizationResult& result = results.results(0);
EXPECT_THAT(result.quantizable_unit().name(),
StrEq("composite_dot_general_fn"));
EXPECT_TRUE(result.method().has_static_range_ptq());
}
TEST_F(QuantizationReportTest,
InitializeWithModuleOpWithoutQuantizationMethodAttribute) {
constexpr absl::string_view
kQuantizedDotGeneralMissingQuantizationMethodAttr = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%0 = stablehlo.constant() {value = dense<127> : tensor<2x3xi8>} : () -> tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>
%1 = stablehlo.uniform_quantize %arg0 : (tensor<1x2xf32>) -> tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>
%2 = call @quantized_dot_general_fn(%1, %0) : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
%3 = stablehlo.uniform_dequantize %2 : (tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>) -> tensor<1x3xf32>
return %3 : tensor<1x3xf32>
}
func.func private @quantized_dot_general_fn(%arg0: tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, %arg1: tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>> {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>
%1 = stablehlo.uniform_quantize %0 : (tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
return %1 : tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kQuantizedDotGeneralMissingQuantizationMethodAttr);
ASSERT_TRUE(module_op);
const QuantizationReport report(*module_op);
const QuantizationResults& results = report.GetQuantizationResults();
ASSERT_THAT(results.results(), IsEmpty());
}
TEST_F(QuantizationReportTest, InitializeWithModuleOpWithInvalidCalleeName) {
constexpr absl::string_view kQuantizedDotGeneralWithInvalidCalleeName =
R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%0 = stablehlo.constant() {value = dense<127> : tensor<2x3xi8>} : () -> tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>
%1 = stablehlo.uniform_quantize %arg0 : (tensor<1x2xf32>) -> tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>
%2 = call @invalid_quantized_dot_general_fn(%1, %0) {_quantization_method = "static_range_ptq { }"} : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
%3 = stablehlo.uniform_dequantize %2 : (tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>) -> tensor<1x3xf32>
return %3 : tensor<1x3xf32>
}
func.func private @invalid_quantized_dot_general_fn(%arg0: tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, %arg1: tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>> {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>
%1 = stablehlo.uniform_quantize %0 : (tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
return %1 : tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kQuantizedDotGeneralWithInvalidCalleeName);
ASSERT_TRUE(module_op);
const QuantizationReport report(*module_op);
const QuantizationResults& results = report.GetQuantizationResults();
ASSERT_THAT(results.results(), IsEmpty());
}
TEST_F(QuantizationReportTest, InitializeWithModuleOpWithNonQuantizedOp) {
constexpr absl::string_view kNonQuantizedDotGeneral = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%0 = stablehlo.constant dense<3.000000e+0> : tensor<2x3xf32>
%1 = "tf.XlaCallModule"(%arg0, %0) {Sout = [#tf_type.shape<1x3>], _entry_function = @composite_dot_general_fn, _original_entry_function = "composite_dot_general_fn", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
return %1 : tensor<1x3xf32>
}
func.func private @composite_dot_general_fn(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3xf32>) -> tensor<1x3xf32> {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kNonQuantizedDotGeneral);
ASSERT_TRUE(module_op);
const QuantizationReport report(*module_op);
const QuantizationResults& results = report.GetQuantizationResults();
ASSERT_THAT(results.results(), SizeIs(1));
const QuantizationResult& result = results.results(0);
EXPECT_THAT(result.quantizable_unit().name(),
StrEq("composite_dot_general_fn"));
EXPECT_TRUE(result.method().has_no_quantization());
}
TEST_F(QuantizationReportTest,
InitializeWithModuleOpWithQuantizedAndNonQuantizedOps) {
constexpr absl::string_view kQuantizedDotGeneralAndNonQuantizedDotGeneral =
R"mlir(
func.func @main(%arg0: tensor<1x2xf32>, %arg1: tensor<1x2xf32>) -> tensor<1x3xf32> {
%0 = stablehlo.constant dense<3.000000e+0> : tensor<2x3xf32>
%1 = "tf.XlaCallModule"(%arg0, %0) {Sout = [#tf_type.shape<1x3>], _entry_function = @composite_dot_general_fn_1, _original_entry_function = "composite_dot_general_fn_1", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
%2 = stablehlo.constant() {value = dense<127> : tensor<2x3xi8>} : () -> tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>
%3 = stablehlo.uniform_quantize %arg1 : (tensor<1x2xf32>) -> tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>
%4 = call @quantized_dot_general_fn_2(%3, %2) {_quantization_method = "static_range_ptq { }"} : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
%5 = stablehlo.uniform_dequantize %4 : (tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>) -> tensor<1x3xf32>
%6 = stablehlo.add %1, %5 : tensor<1x3xf32>
return %6 : tensor<1x3xf32>
}
func.func private @composite_dot_general_fn_1(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3xf32>) -> tensor<1x3xf32> {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
func.func private @quantized_dot_general_fn_2(%arg0: tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, %arg1: tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>> {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>
%1 = stablehlo.uniform_quantize %0 : (tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
return %1 : tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kQuantizedDotGeneralAndNonQuantizedDotGeneral);
ASSERT_TRUE(module_op);
const QuantizationReport report(*module_op);
const QuantizationResults& results = report.GetQuantizationResults();
ASSERT_THAT(results.results(), SizeIs(2));
const QuantizationResult& quantized_result = results.results(0);
EXPECT_THAT(quantized_result.quantizable_unit().name(),
StrEq("composite_dot_general_fn_2"));
EXPECT_TRUE(quantized_result.method().has_static_range_ptq());
const QuantizationResult& non_quantized_result = results.results(1);
EXPECT_THAT(non_quantized_result.quantizable_unit().name(),
StrEq("composite_dot_general_fn_1"));
EXPECT_TRUE(non_quantized_result.method().has_no_quantization());
}
TEST_F(QuantizationReportTest, ToString) {
QuantizationResult result{};
QuantizableUnit& quantizable_unit = *result.mutable_quantizable_unit();
quantizable_unit.set_name("quantized_my_function");
Method& method = *result.mutable_method();
method.mutable_no_quantization();
QuantizationReport report{};
report.AddQuantizationResult(std::move(result));
std::string result_str{};
TextFormat::PrintToString(report.GetQuantizationResults(), &result_str);
EXPECT_THAT(report.ToString(), HasSubstr("Quantization Report"));
EXPECT_THAT(report.ToString(), HasSubstr(result_str));
EXPECT_THAT(report.ToString(), HasSubstr("Quantization Report End"));
}
TEST_F(QuantizationReportTest, Save) {
constexpr absl::string_view kQuantizedDotGeneral = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%0 = stablehlo.constant() {value = dense<127> : tensor<2x3xi8>} : () -> tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>
%1 = stablehlo.uniform_quantize %arg0 : (tensor<1x2xf32>) -> tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>
%2 = call @quantized_dot_general_fn(%1, %0) {_quantization_method = "static_range_ptq { }"} : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
%3 = stablehlo.uniform_dequantize %2 : (tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>) -> tensor<1x3xf32>
return %3 : tensor<1x3xf32>
}
func.func private @quantized_dot_general_fn(%arg0: tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, %arg1: tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>> {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>
%1 = stablehlo.uniform_quantize %0 : (tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
return %1 : tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kQuantizedDotGeneral);
ASSERT_TRUE(module_op);
const QuantizationReport report(*module_op);
const std::string dst_file_path =
absl::StrCat(TempDir(), "/quantization_report.txtpb");
const absl::Status save_status = report.Save(dst_file_path);
ASSERT_THAT(save_status, IsOk());
const absl::StatusOr<std::string> file_data = ReadFileToString(dst_file_path);
ASSERT_THAT(file_data, IsOk());
QuantizationResults results{};
ASSERT_TRUE(TextFormat::ParseFromString(*file_data, &results));
ASSERT_THAT(results.results(), SizeIs(1));
EXPECT_THAT(results.results(0).quantizable_unit().name(),
StrEq("composite_dot_general_fn"));
EXPECT_TRUE(results.results(0).method().has_static_range_ptq());
}
}
} |
1,210 | cpp | tensorflow/tensorflow | io | tensorflow/compiler/mlir/quantization/stablehlo/cc/io.cc | tensorflow/compiler/mlir/quantization/stablehlo/cc/io_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_CC_IO_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_CC_IO_H_
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
namespace stablehlo::quantization::io {
absl::StatusOr<std::string> GetLocalTmpFileName(tsl::Env* env);
absl::StatusOr<std::string> GetLocalTmpFileName();
absl::StatusOr<std::string> CreateTmpDir(tsl::Env* env);
absl::StatusOr<std::string> CreateTmpDir();
absl::Status WriteStringToFile(absl::string_view file_path,
absl::string_view data);
absl::StatusOr<std::string> ReadFileToString(absl::string_view file_path);
absl::StatusOr<std::vector<std::string>> ListDirectory(
absl::string_view directory);
template <class MessageT>
absl::StatusOr<MessageT> ReadBinaryProto(const std::string& binary_file_path) {
MessageT message;
TF_RETURN_IF_ERROR(
tsl::ReadBinaryProto(tsl::Env::Default(), binary_file_path, &message));
return message;
}
}
#endif
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/io.h"
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace stablehlo::quantization::io {
absl::StatusOr<std::string> GetLocalTmpFileName(tsl::Env* const env) {
std::string tmp_fname{};
if (!env->LocalTempFilename(&tmp_fname)) {
return absl::InternalError("Failed to create tmp file name.");
}
return tmp_fname;
}
absl::StatusOr<std::string> GetLocalTmpFileName() {
return GetLocalTmpFileName(tsl::Env::Default());
}
absl::StatusOr<std::string> CreateTmpDir(tsl::Env* const env) {
TF_ASSIGN_OR_RETURN(std::string tmp_dir, GetLocalTmpFileName(env));
if (!env->RecursivelyCreateDir(tmp_dir).ok()) {
return absl::InternalError(
absl::StrFormat("Failed to create tmp dir: '%s'", tmp_dir));
}
return tmp_dir;
}
absl::StatusOr<std::string> CreateTmpDir() {
return CreateTmpDir(tsl::Env::Default());
}
absl::Status WriteStringToFile(const absl::string_view file_path,
const absl::string_view data) {
auto* env = tsl::Env::Default();
return WriteStringToFile(env, std::string(file_path), data);
}
absl::StatusOr<std::string> ReadFileToString(
const absl::string_view file_path) {
auto* env = tsl::Env::Default();
std::string data{};
absl::Status read_status =
ReadFileToString(env, std::string(file_path), &data);
if (read_status.ok()) {
return data;
} else {
return read_status;
}
}
absl::StatusOr<std::vector<std::string>> ListDirectory(
absl::string_view directory) {
std::vector<std::string> children;
TF_RETURN_IF_ERROR(
tsl::Env::Default()->GetChildren(std::string(directory), &children));
return children;
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/io.h"
#include <cstdint>
#include <fstream>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tsl/platform/env.h"
#include "tsl/platform/file_system.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/types.h"
namespace stablehlo::quantization::io {
namespace {
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::Not;
using ::testing::SizeIs;
using ::testing::UnorderedElementsAre;
using ::tsl::testing::IsOk;
using ::tsl::testing::StatusIs;
class TestEnvBrokenFileSystem : public tsl::Env {
public:
TestEnvBrokenFileSystem() = default;
bool MatchPath(const tsl::string& path, const tsl::string& pattern) override {
return false;
}
void SleepForMicroseconds(int64_t micros) override {}
tsl::string GetRunfilesDir() override { return tsl::string("dummy_path"); }
int32_t GetCurrentThreadId() override { return 0; }
tsl::Thread* StartThread(const tsl::ThreadOptions& thread_options,
const tsl::string& name,
absl::AnyInvocable<void()> fn) override {
return nullptr;
}
bool GetCurrentThreadName(tsl::string* name) override { return false; }
void SchedClosure(absl::AnyInvocable<void()> closure) override {}
void SchedClosureAfter(int64_t micros,
absl::AnyInvocable<void()> closure) override {}
absl::Status LoadDynamicLibrary(const char* library_filename,
void** handle) override {
return absl::OkStatus();
}
absl::Status GetSymbolFromLibrary(void* handle, const char* symbol_name,
void** symbol) override {
return absl::OkStatus();
}
tsl::string FormatLibraryFileName(const tsl::string& name,
const tsl::string& version) override {
return tsl::string("dummy_path");
}
absl::Status GetFileSystemForFile(const std::string& fname,
tsl::FileSystem** result) override {
return absl::InternalError("Broken file system");
}
private:
void GetLocalTempDirectories(std::vector<tsl::string>* list) override {
list->push_back("/tmp");
}
};
class TestEnvBrokenFileSystemAndNoLocalTempDirs
: public TestEnvBrokenFileSystem {
private:
void GetLocalTempDirectories(std::vector<tsl::string>* list) override {}
};
TEST(IoTest, GetLocalTmpFileNameGivesValidFileName) {
absl::StatusOr<std::string> tmp_file_name = GetLocalTmpFileName();
ASSERT_THAT(tmp_file_name, IsOk());
EXPECT_THAT(*tmp_file_name, Not(IsEmpty()));
}
TEST(IoTest, GetLocalTmpFileNameWhenNoTempDirsReturnsInternalError) {
TestEnvBrokenFileSystemAndNoLocalTempDirs broken_env;
absl::StatusOr<std::string> tmp_file_name = GetLocalTmpFileName(&broken_env);
EXPECT_THAT(tmp_file_name,
StatusIs(absl::StatusCode::kInternal,
HasSubstr("Failed to create tmp file name")));
}
TEST(IoTest, CreateTmpDirReturnsValidTmpPath) {
absl::StatusOr<std::string> tmp_dir = CreateTmpDir();
ASSERT_THAT(tmp_dir, IsOk());
auto* const env = tsl::Env::Default();
EXPECT_THAT(env->FileExists(*tmp_dir), IsOk());
}
TEST(IoTest, CreateTmpDirWhenInvalidPathReturnsInternalError) {
TestEnvBrokenFileSystem test_env{};
absl::StatusOr<std::string> tmp_dir = CreateTmpDir(&test_env);
EXPECT_THAT(tmp_dir, StatusIs(absl::StatusCode::kInternal,
HasSubstr("Failed to create tmp dir")));
}
TEST(IoTest, WriteStringToFile) {
const std::string dst_file_path =
absl::StrCat(testing::TempDir(), "/tmp_file");
const absl::Status write_status =
WriteStringToFile(dst_file_path, "test_string");
ASSERT_THAT(write_status, IsOk());
auto* const env = tsl::Env::Default();
ASSERT_THAT(env->FileExists(dst_file_path), IsOk());
std::string data{};
ASSERT_THAT(tsl::ReadFileToString(env, dst_file_path, &data), IsOk());
EXPECT_THAT(data, Eq("test_string"));
}
TEST(IoTest, ReadFileToString) {
const std::string src_file_path =
absl::StrCat(testing::TempDir(), "/tmp_file");
{
std::ofstream ofs(src_file_path);
ofs << "test_string";
}
const absl::StatusOr<std::string> read_status =
ReadFileToString(src_file_path);
ASSERT_THAT(read_status, IsOk());
EXPECT_THAT(*read_status, Eq("test_string"));
}
TEST(IoTest, ListChildrenInDirectory) {
absl::StatusOr<std::string> tmp_dir = CreateTmpDir();
ASSERT_THAT(tmp_dir, IsOk());
auto* const env = tsl::Env::Default();
EXPECT_THAT(env->FileExists(*tmp_dir), IsOk());
ASSERT_THAT(
WriteStringToFile(absl::StrCat(*tmp_dir, "/tmp_file1"), "test_string"),
IsOk());
ASSERT_THAT(
WriteStringToFile(absl::StrCat(*tmp_dir, "/tmp_file2"), "test_string"),
IsOk());
ASSERT_THAT(env->RecursivelyCreateDir(absl::StrCat(*tmp_dir, "/subdir")),
IsOk());
absl::StatusOr<std::vector<std::string>> children = ListDirectory(*tmp_dir);
EXPECT_THAT(children, IsOk());
EXPECT_THAT(children.value(), SizeIs(3));
EXPECT_THAT(children.value(),
UnorderedElementsAre("subdir", "tmp_file1", "tmp_file2"));
}
}
} |
1,211 | cpp | tensorflow/tensorflow | saved_model_export | tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_export.cc | tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_export_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_CC_SAVED_MODEL_EXPORT_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_CC_SAVED_MODEL_EXPORT_H_
#include <optional>
#include <string>
#include <unordered_set>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/types.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/exported_model.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saver.pb.h"
namespace mlir::quant::stablehlo {
constexpr absl::string_view kExportStepSuffix = "_export";
struct ExportOptions {
bool duplicate_shape_determining_constants = true;
bool unfreeze_constants = false;
std::string checkpoint_dir = "";
std::string debug_name = "stablehlo_quant";
};
absl::StatusOr<tensorflow::quantization::ExportedModel> CreateExportedModel(
const std::vector<std::string>& signature_keys,
const std::unordered_set<std::string>& tags,
const ::stablehlo::quantization::QuantizationConfig& quantization_config,
absl::string_view debug_name_prefix,
const absl::flat_hash_map<FunctionName, FunctionAlias>& function_aliases,
MLIRContext& ctx ABSL_ATTRIBUTE_LIFETIME_BOUND, ModuleOp module_op);
[[nodiscard]] tensorflow::quantization::ExportedModel
CreateExportedModelFromGraphDef(
tensorflow::GraphDef&& graph_def, absl::string_view init_node_name,
absl::string_view checkpoint_dir,
std::optional<tensorflow::SaverDef> saver_def,
const absl::flat_hash_map<std::string, std::string>& function_aliases,
const std::vector<tensorflow::AssetFileDef>& asset_file_defs);
absl::StatusOr<std::optional<tensorflow::SaverDef>> CreateSaverDef(
const std::vector<std::string>& control_ret_node_names,
const tensorflow::GraphDef& graph_def);
void AddExportPasses(mlir::PassManager& pm,
bool duplicate_shape_determining_constants);
absl::StatusOr<tensorflow::quantization::ExportedModel>
ConvertMlirModuleToExportedModel(
mlir::ModuleOp module_op, absl::string_view checkpoint_dir,
const absl::flat_hash_map<std::string, std::string>& function_aliases,
const std::vector<tensorflow::AssetFileDef>& asset_file_defs);
absl::StatusOr<SmallVector<::tensorflow::AssetFileDef>> RunExportPasses(
const ExportOptions& export_opts, MLIRContext& ctx, ModuleOp module_op);
}
#endif
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_export.h"
#include <memory>
#include <optional>
#include <string>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/io.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/pass_pipeline.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/types.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/cc/convert_asset_args.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/cc/run_passes.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/exported_model.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/passes/constants.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/python/unfreeze_constants.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/export_graphdef.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saver.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace mlir::quant::stablehlo {
namespace {
using ::mlir::tf_saved_model::kTfSavedModelIndexPathAttr;
using ::mlir::tf_saved_model::kTfSavedModelInitializerInitType;
using ::mlir::tf_saved_model::kTfSavedModelInitializerRestoreType;
using ::stablehlo::quantization::QuantizationConfig;
using ::stablehlo::quantization::io::GetLocalTmpFileName;
using ::tensorflow::AssetFileDef;
using ::tensorflow::ConvertMlirToGraph;
using ::tensorflow::FunctionDefLibrary;
using ::tensorflow::FunctionLibraryDefinition;
using ::tensorflow::Graph;
using ::tensorflow::GraphDef;
using ::tensorflow::Node;
using ::tensorflow::NodeDef;
using ::tensorflow::OpRegistry;
using ::tensorflow::SaverDef;
using ::tensorflow::quantization::ExportedModel;
using ::tensorflow::quantization::RunPasses;
using ::tensorflow::quantization::UnfreezeConstantsAndSaveVariables;
std::string GetNodeName(const std::vector<std::string>& control_ret_node_names,
const absl::string_view contains) {
for (const std::string& node_name : control_ret_node_names) {
if (absl::StrContains(node_name, contains)) {
VLOG(1) << "Node found: " << node_name << ", contains: " << contains;
return node_name;
}
}
VLOG(1) << "Could not find node whose name conatins: " << contains;
return "";
}
std::string FindFilePrefixTensorName(const GraphDef& graph_def) {
for (const NodeDef& node_def : graph_def.node()) {
if (node_def.op() == FunctionLibraryDefinition::kArgOp) {
const auto index_path_attr_itr =
node_def.attr().find(kTfSavedModelIndexPathAttr.str());
if (index_path_attr_itr != node_def.attr().end()) {
const auto& index_paths = index_path_attr_itr->second.list().s();
if (absl::c_find(index_paths, kTfFilePrefix.str()) !=
index_paths.end()) {
return absl::StrCat(node_def.name(), ":0");
}
}
}
}
return "";
}
}
absl::StatusOr<ExportedModel> CreateExportedModel(
const std::vector<std::string>& signature_keys,
const std::unordered_set<std::string>& tags,
const QuantizationConfig& quantization_config,
absl::string_view debug_name_prefix,
const absl::flat_hash_map<FunctionName, FunctionAlias>& function_aliases,
MLIRContext& ctx ABSL_ATTRIBUTE_LIFETIME_BOUND, ModuleOp module_op) {
TF_ASSIGN_OR_RETURN(const std::string checkpoint_dir, GetLocalTmpFileName());
const ExportOptions export_opts = {
true,
false, checkpoint_dir,
absl::StrCat(debug_name_prefix, kExportStepSuffix)};
TF_ASSIGN_OR_RETURN(const SmallVector<AssetFileDef> asset_file_defs,
RunExportPasses(export_opts, ctx, module_op));
return ConvertMlirModuleToExportedModel(
module_op, checkpoint_dir, function_aliases,
{asset_file_defs.begin(), asset_file_defs.end()});
}
ExportedModel CreateExportedModelFromGraphDef(
GraphDef&& graph_def, const absl::string_view init_node_name,
const absl::string_view checkpoint_dir,
const std::optional<SaverDef> saver_def,
const absl::flat_hash_map<FunctionName, FunctionAlias>& function_aliases,
const std::vector<AssetFileDef>& asset_file_defs) {
ExportedModel exported_model{};
*exported_model.mutable_graph_def() = graph_def;
exported_model.set_init_node_name(std::string(init_node_name));
exported_model.set_checkpoint_dir(std::string(checkpoint_dir));
exported_model.mutable_function_aliases()->insert(function_aliases.begin(),
function_aliases.end());
for (const AssetFileDef& asset_file_def : asset_file_defs) {
*exported_model.mutable_asset_file_defs()->Add() = asset_file_def;
}
if (saver_def != std::nullopt) {
*exported_model.mutable_saver_def() = *std::move(saver_def);
}
return exported_model;
}
void AddExportPasses(mlir::PassManager& pm,
const bool duplicate_shape_determining_constants) {
AddCallModuleSerializationPasses(pm);
if (duplicate_shape_determining_constants) {
pm.addNestedPass<mlir::func::FuncOp>(
mlir::quant::CreateDuplicateShapeDeterminingConstantsPass());
}
pm.addPass(mlir::quant::CreateInsertMainFunctionPass());
pm.addPass(mlir::quant::CreateLiftHashTableOpsAsArgsPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::CreateFunctionalToExecutorDialectConversionPass());
pm.addPass(mlir::CreateBreakUpIslandsPass());
pm.addPass(mlir::quant::CreateMergeInitializerFunctionOpsToMainPass());
pm.addPass(mlir::quant::CreateMergeSaveFunctionOpsToMainPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::quant::CreateMergeDuplicateResourceOpsPass());
pm.addPass(mlir::TF::CreateStripNoinlineAttributePass());
}
absl::StatusOr<std::optional<SaverDef>> CreateSaverDef(
const std::vector<std::string>& control_ret_node_names,
const GraphDef& graph_def) {
const std::string filename_tensor_name = FindFilePrefixTensorName(graph_def);
const std::string restore_op_name =
GetNodeName(control_ret_node_names, kTfSavedModelInitializerRestoreType);
const std::string save_node_name =
GetNodeName(control_ret_node_names, kTfQuantSaveOpName);
const std::vector<absl::string_view> fields = {
filename_tensor_name, restore_op_name, save_node_name};
const auto is_empty_predicate = [](const absl::string_view s) {
return s.empty();
};
if (absl::c_all_of(fields, is_empty_predicate)) {
return std::nullopt;
} else if (absl::c_none_of(fields, is_empty_predicate)) {
SaverDef saver_def{};
saver_def.set_version(SaverDef::V2);
saver_def.set_filename_tensor_name(filename_tensor_name);
saver_def.set_restore_op_name(restore_op_name);
saver_def.set_save_tensor_name(absl::StrCat(save_node_name, ":0"));
return saver_def;
} else {
return absl::InternalError(
absl::StrCat("Failed to create SaverDef. Fields should be either all "
"empty strings or all non-empty strings. Got fields: ",
absl::StrJoin(fields, ",")));
}
}
absl::StatusOr<ExportedModel> ConvertMlirModuleToExportedModel(
const mlir::ModuleOp module_op, const absl::string_view checkpoint_dir,
const absl::flat_hash_map<FunctionName, FunctionAlias>& function_aliases,
const std::vector<AssetFileDef>& asset_file_defs) {
const tensorflow::GraphExportConfig config{};
FunctionLibraryDefinition flib_def{OpRegistry::Global(),
FunctionDefLibrary()};
std::unique_ptr<Graph> graph;
absl::flat_hash_set<Node*> control_ret_nodes{};
TF_RETURN_IF_ERROR(tensorflow::tf2xla::v2::ConvertMlirToGraph(
module_op, config, &graph, &flib_def, &control_ret_nodes));
GraphDef graph_def{};
graph->ToGraphDef(&graph_def);
std::vector<std::string> control_ret_node_names{};
for (Node* node : control_ret_nodes) {
control_ret_node_names.push_back(node->name());
}
const std::string init_node_name =
GetNodeName(control_ret_node_names, kTfSavedModelInitializerInitType);
TF_ASSIGN_OR_RETURN(const std::optional<SaverDef> saver_def,
CreateSaverDef(control_ret_node_names, graph_def));
return CreateExportedModelFromGraphDef(std::move(graph_def), init_node_name,
checkpoint_dir, std::move(saver_def),
function_aliases, asset_file_defs);
}
absl::StatusOr<SmallVector<AssetFileDef>> RunExportPasses(
const ExportOptions& export_opts, MLIRContext& ctx, ModuleOp module_op) {
if (export_opts.unfreeze_constants) {
TF_RETURN_IF_ERROR(UnfreezeConstantsAndSaveVariables(
export_opts.checkpoint_dir, ctx, module_op));
LOG(INFO) << "Unfrozen constants and saved variables to checkpoint file: "
<< export_opts.checkpoint_dir;
}
TF_RETURN_IF_ERROR(RunPasses(
export_opts.debug_name,
[dup_constants = export_opts.duplicate_shape_determining_constants](
PassManager& pm) { AddExportPasses(pm, dup_constants); },
ctx, module_op));
FailureOr<SmallVector<AssetFileDef>> asset_file_defs =
quant::ConvertAssetArgs(module_op);
if (failed(asset_file_defs)) {
return absl::InternalError("Failed to convert asset args.");
}
return *asset_file_defs;
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_export.h"
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/STLExtras.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/protobuf/saver.pb.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace mlir::quant::stablehlo {
namespace {
using ::tensorflow::AssetFileDef;
using ::tensorflow::GraphDef;
using ::tensorflow::NodeDef;
using ::tensorflow::SaverDef;
using ::tensorflow::quantization::ExportedModel;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::SizeIs;
using ::testing::StrEq;
using ::tsl::protobuf::TextFormat;
using ::tsl::testing::IsOk;
using ::tsl::testing::StatusIs;
TEST(CreateExportedModelTest, CreateExportedModelBasicFieldsSet) {
GraphDef graph_def{};
ASSERT_TRUE(
TextFormat::ParseFromString(R"pb(node { name: "foo" })pb", &graph_def));
const ExportedModel exported_model = CreateExportedModelFromGraphDef(
std::move(graph_def), "init_node_name", "checkpoint_dir",
std::nullopt,
{}, {});
ASSERT_THAT(exported_model.graph_def().node(), SizeIs(1));
EXPECT_THAT(exported_model.graph_def().node()[0].name(), StrEq("foo"));
EXPECT_THAT(exported_model.init_node_name(), StrEq("init_node_name"));
EXPECT_THAT(exported_model.checkpoint_dir(), StrEq("checkpoint_dir"));
EXPECT_FALSE(exported_model.has_saver_def());
EXPECT_THAT(exported_model.function_aliases(), IsEmpty());
EXPECT_THAT(exported_model.asset_file_defs(), IsEmpty());
}
TEST(CreateExportedModelTest, CreateExportedModelWithAddedFunctionAliases) {
const ExportedModel exported_model = CreateExportedModelFromGraphDef(
GraphDef(), "", "",
std::nullopt,
{{"func1", "alias1"}, {"func2", "alias2"}},
{});
ASSERT_THAT(exported_model.function_aliases(), SizeIs(2));
EXPECT_TRUE(exported_model.function_aliases().contains("func1"));
EXPECT_THAT(exported_model.function_aliases().at("func1"), StrEq("alias1"));
EXPECT_TRUE(exported_model.function_aliases().contains("func2"));
EXPECT_THAT(exported_model.function_aliases().at("func2"), StrEq("alias2"));
}
TEST(CreateExportedModelTest, CreateExportedModelWithAddedAssetFileDefs) {
AssetFileDef asset1;
ASSERT_TRUE(
TextFormat::ParseFromString(R"pb(filename: "fname1")pb", &asset1));
AssetFileDef asset2;
ASSERT_TRUE(
TextFormat::ParseFromString(R"pb(filename: "fname2")pb", &asset2));
const ExportedModel exported_model = CreateExportedModelFromGraphDef(
GraphDef(), "", "",
std::nullopt, {},
{asset1, asset2});
ASSERT_THAT(exported_model.asset_file_defs(), SizeIs(2));
EXPECT_THAT(exported_model.asset_file_defs()[0].filename(), StrEq("fname1"));
EXPECT_THAT(exported_model.asset_file_defs()[1].filename(), StrEq("fname2"));
}
TEST(CreateExportedModelTest, CreateExportedModelWithAddedSaverDef) {
SaverDef saver_def;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(filename_tensor_name: "my_file")pb", &saver_def));
const ExportedModel exported_model = CreateExportedModelFromGraphDef(
GraphDef(), "", "", saver_def,
{}, {});
EXPECT_THAT(exported_model.saver_def().filename_tensor_name(), "my_file");
}
TEST(CreateSaverDefTest, CreateValidSaverDef) {
GraphDef graph_def;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(node {
name: "foo",
op: "_Arg",
attr {
key: "tf_saved_model.index_path",
value { list { s: "__tf_file_prefix" } }
}
})pb",
&graph_def));
const std::vector<std::string> control_ret_node_names = {
"restore_op_0", "tf_quant__save_op_0"};
TF_ASSERT_OK_AND_ASSIGN(const std::optional<SaverDef> saver_def,
CreateSaverDef(control_ret_node_names, graph_def));
ASSERT_NE(saver_def, std::nullopt);
EXPECT_THAT(saver_def->version(), SaverDef::V2);
EXPECT_THAT(saver_def->restore_op_name(), "restore_op_0");
EXPECT_THAT(saver_def->filename_tensor_name(), "foo:0");
EXPECT_THAT(saver_def->save_tensor_name(), "tf_quant__save_op_0:0");
}
TEST(CreateSaverDefTest, ReturnsNulloptIfNoSaverDefRelatedNodesExist) {
TF_ASSERT_OK_AND_ASSIGN(
const std::optional<SaverDef> saver_def,
CreateSaverDef({}, GraphDef()));
EXPECT_EQ(saver_def, std::nullopt);
}
TEST(CreateSaverDefTest, ReturnsErrorStatusIfSaverDefNodesPartiallyExist) {
GraphDef graph_def;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(node { name: "foo", op: "_Arg" })pb", &graph_def));
const std::vector<std::string> control_ret_node_names = {
"restore_op_0", "tf_quant__save_op_0"};
const absl::StatusOr<std::optional<SaverDef>> saver_def =
CreateSaverDef(control_ret_node_names, graph_def);
EXPECT_THAT(
saver_def,
StatusIs(
absl::StatusCode::kInternal,
HasSubstr(
"should be either all empty strings or all non-empty strings")));
}
using ConvertMlirModuleToExportedModelTest =
::mlir::quant::QuantizationTestBase;
TEST_F(ConvertMlirModuleToExportedModelTest, SimpleGraphDefSet) {
mlir::OwningOpRef<mlir::ModuleOp> module_op = ParseModuleOpString(R"mlir(
module attributes {tf_saved_model.semantics} {
func.func @main(%arg: tensor<1x2xf32> {tf_saved_model.index_path = ["input_tensor:0"]}) -> (tensor<1x2xf32> {tf_saved_model.index_path = ["output_tensor:0"]}) attributes {tf.entry_function = {inputs = "input_tensor:0", outputs = "output_tensor:0"}, tf_saved_model.exported_names = ["main"]} {
%0 = tf_executor.graph {
tf_executor.fetch %arg : tensor<1x2xf32>
}
return %0 : tensor<1x2xf32>
}
}
)mlir");
ASSERT_TRUE(module_op);
const absl::StatusOr<ExportedModel> exported_model =
ConvertMlirModuleToExportedModel(*module_op, "",
{},
{});
ASSERT_THAT(exported_model, IsOk());
ASSERT_THAT(exported_model->graph_def().node(), SizeIs(2));
const auto arg_node_itr =
llvm::find_if(exported_model->graph_def().node(),
[](const NodeDef& node) { return node.op() == "_Arg"; });
ASSERT_NE(arg_node_itr, exported_model->graph_def().node().end());
EXPECT_THAT(arg_node_itr->name(), StrEq("input_tensor"));
ASSERT_TRUE(arg_node_itr->attr().contains("tf_saved_model.index_path"));
ASSERT_THAT(arg_node_itr->attr().at("tf_saved_model.index_path").list().s(),
SizeIs(1));
EXPECT_THAT(
arg_node_itr->attr().at("tf_saved_model.index_path").list().s()[0],
StrEq("input_tensor:0"));
const auto retval_node_itr =
llvm::find_if(exported_model->graph_def().node(),
[](const NodeDef& node) { return node.op() == "_Retval"; });
ASSERT_NE(retval_node_itr, exported_model->graph_def().node().end());
EXPECT_THAT(retval_node_itr->name(), StrEq("output_tensor"));
ASSERT_TRUE(retval_node_itr->attr().contains("tf_saved_model.index_path"));
ASSERT_THAT(
retval_node_itr->attr().at("tf_saved_model.index_path").list().s(),
SizeIs(1));
EXPECT_THAT(
retval_node_itr->attr().at("tf_saved_model.index_path").list().s()[0],
StrEq("output_tensor:0"));
}
TEST_F(ConvertMlirModuleToExportedModelTest, CheckpointDirSet) {
mlir::OwningOpRef<mlir::ModuleOp> module_op = ParseModuleOpString(R"mlir(
module attributes {tf_saved_model.semantics} {
func.func @main() -> () attributes {tf_saved_model.exported_names = ["main"]} {
tf_executor.graph {
tf_executor.fetch
}
return
}
}
)mlir");
ASSERT_TRUE(module_op);
const absl::StatusOr<ExportedModel> exported_model =
ConvertMlirModuleToExportedModel(*module_op, "my_checkpoint_dir",
{},
{});
ASSERT_THAT(exported_model, IsOk());
EXPECT_THAT(exported_model->checkpoint_dir(), StrEq("my_checkpoint_dir"));
}
TEST_F(ConvertMlirModuleToExportedModelTest, FunctionAliasesSet) {
mlir::OwningOpRef<mlir::ModuleOp> module_op = ParseModuleOpString(R"mlir(
module attributes {tf_saved_model.semantics} {
func.func private @function_1() -> () attributes {tf._original_func_name = "__func_1"} {
tf_executor.graph {
%control_0 = tf_executor.island wraps "tf.NoOp"() : () -> ()
}
return
}
func.func private @function_2() -> () attributes {tf._original_func_name = "__func_2"} {
tf_executor.graph {
%control_0 = tf_executor.island wraps "tf.NoOp"() : () -> ()
}
return
}
func.func @main() -> () attributes {tf_saved_model.exported_names = ["main"]} {
tf_executor.graph {
%control_0 = tf_executor.island wraps "tf.PartitionedCall"() <{config = "", config_proto = "", executor_type = "", f = @function_1}> : () -> ()
%control_1 = tf_executor.island wraps "tf.PartitionedCall"() <{config = "", config_proto = "", executor_type = "", f = @function_2}> : () -> ()
tf_executor.fetch %control_0, %control_1 : !tf_executor.control, !tf_executor.control
}
return
}
}
)mlir");
ASSERT_TRUE(module_op);
const absl::StatusOr<ExportedModel> exported_model =
ConvertMlirModuleToExportedModel(
*module_op, "",
{{"alias_1", "function_1"}, {"alias_2", "function_2"}},
{});
ASSERT_THAT(exported_model, IsOk());
ASSERT_THAT(exported_model->function_aliases(), SizeIs(2));
EXPECT_THAT(exported_model->function_aliases().at("alias_1"),
StrEq("function_1"));
EXPECT_THAT(exported_model->function_aliases().at("alias_2"),
StrEq("function_2"));
}
TEST_F(ConvertMlirModuleToExportedModelTest, AssetFileDefSet) {
mlir::OwningOpRef<mlir::ModuleOp> module_op = ParseModuleOpString(R"mlir(
module attributes {tf_saved_model.semantics} {
func.func @main() -> () attributes {tf_saved_model.exported_names = ["main"]} {
tf_executor.graph {
tf_executor.fetch
}
return
}
}
)mlir");
ASSERT_TRUE(module_op);
AssetFileDef asset_file_def{};
ASSERT_TRUE(
TextFormat::ParseFromString(R"pb(filename: "vocab_file.txt",
tensor_info { name: "arg_0:0" })pb",
&asset_file_def));
const std::vector<AssetFileDef> asset_file_defs = {asset_file_def};
const absl::StatusOr<ExportedModel> exported_model =
ConvertMlirModuleToExportedModel(*module_op, "",
{},
asset_file_defs);
ASSERT_THAT(exported_model, IsOk());
ASSERT_THAT(exported_model->asset_file_defs(), SizeIs(1));
EXPECT_THAT(exported_model->asset_file_defs()[0].filename(),
StrEq("vocab_file.txt"));
EXPECT_THAT(exported_model->asset_file_defs()[0].tensor_info().name(),
StrEq("arg_0:0"));
}
TEST_F(ConvertMlirModuleToExportedModelTest,
InitNodeNameSetToLocOfControlOutput) {
mlir::OwningOpRef<mlir::ModuleOp> module_op = ParseModuleOpString(R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() <{initializers = []}> : () -> ()
"tf_saved_model.asset"() <{filename = "assets/vocab_file.txt", sym_name = "__tf_saved_model_asset0_vocab_file.txt"}> : () -> ()
func.func @main(%arg1: tensor<!tf_type.string> {tf_saved_model.index_path = ["arg_0:0"]}) -> (tensor<1x2xf32> {tf_saved_model.index_path = ["output:0"]}) attributes {tf.entry_function = {inputs = "arg_0:0", outputs = "output:0"}, tf_saved_model.exported_names = ["main"]} {
%0 = tf_executor.graph {
%o_0, %c_0 = tf_executor.island wraps "tf.Const"() <{value = dense<1.0> : tensor<1x2xf32>}> : () -> tensor<1x2xf32>
%o, %c = tf_executor.island wraps "tf.HashTableV2"() <{container = "", key_dtype = !tf_type.string, shared_name = "vocab_file.txt", use_node_name_sharing = false, value_dtype = i64}> {device = ""} : () -> tensor<!tf_type.resource>
%c_9 = tf_executor.island wraps "tf.InitializeTableFromTextFileV2"(%o, %arg1) <{delimiter = "\09", key_index = -2 : i64, value_index = -1 : i64, vocab_size = -1 : i64}> {_has_manual_control_dependencies = true, device = ""} : (tensor<!tf_type.resource>, tensor<!tf_type.string>) -> ()
%c_10 = tf_executor.island(%c_9) wraps "tf.NoOp"() : () -> () loc("init_op_init_all_tables")
tf_executor.fetch %o_0, %c_10 : tensor<1x2xf32>, !tf_executor.control
}
return %0 : tensor<1x2xf32>
}
}
)mlir");
ASSERT_TRUE(module_op);
const absl::StatusOr<ExportedModel> exported_model =
ConvertMlirModuleToExportedModel(*module_op, "",
{},
{});
ASSERT_THAT(exported_model, IsOk());
EXPECT_THAT(exported_model->init_node_name(),
StrEq("init_op_init_all_tables"));
const auto init_node_itr = llvm::find_if(
exported_model->graph_def().node(), [](const NodeDef& node) {
return node.name() == "init_op_init_all_tables";
});
ASSERT_NE(init_node_itr, exported_model->graph_def().node().end());
EXPECT_THAT(init_node_itr->op(), StrEq("NoOp"));
ASSERT_THAT(init_node_itr->input(), SizeIs(1));
EXPECT_THAT(init_node_itr->input()[0],
StrEq("^tf.InitializeTableFromTextFileV2"));
}
TEST_F(ConvertMlirModuleToExportedModelTest, InitNodeNotSetIfLocNameMismatch) {
mlir::OwningOpRef<mlir::ModuleOp> module_op = ParseModuleOpString(R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() <{initializers = []}> : () -> ()
"tf_saved_model.asset"() <{filename = "assets/vocab_file.txt", sym_name = "__tf_saved_model_asset0_vocab_file.txt"}> : () -> ()
func.func @main(%arg1: tensor<!tf_type.string> {tf_saved_model.index_path = ["arg_0:0"]}) -> (tensor<1x2xf32> {tf_saved_model.index_path = ["output:0"]}) attributes {tf.entry_function = {inputs = "arg_0:0", outputs = "output:0"}, tf_saved_model.exported_names = ["main"]} {
%0 = tf_executor.graph {
%output_0, %control_0 = tf_executor.island wraps "tf.Const"() <{value = dense<1.0> : tensor<1x2xf32>}> : () -> tensor<1x2xf32>
%output_1, %control_1 = tf_executor.island wraps "tf.HashTableV2"() <{container = "", key_dtype = !tf_type.string, shared_name = "vocab_file.txt", use_node_name_sharing = false, value_dtype = i64}> {device = ""} : () -> tensor<!tf_type.resource>
%control_2 = tf_executor.island wraps "tf.InitializeTableFromTextFileV2"(%output_1, %arg1) <{delimiter = "\09", key_index = -2 : i64, value_index = -1 : i64, vocab_size = -1 : i64}> {_has_manual_control_dependencies = true, device = ""} : (tensor<!tf_type.resource>, tensor<!tf_type.string>) -> ()
%control_3 = tf_executor.island(%control_2) wraps "tf.NoOp"() : () -> () loc("init_ok")
tf_executor.fetch %output_0, %control_3 : tensor<1x2xf32>, !tf_executor.control
}
return %0 : tensor<1x2xf32>
}
}
)mlir");
ASSERT_TRUE(module_op);
const absl::StatusOr<ExportedModel> exported_model =
ConvertMlirModuleToExportedModel(*module_op, "",
{},
{});
ASSERT_THAT(exported_model, IsOk());
EXPECT_THAT(exported_model->init_node_name(), IsEmpty());
}
TEST_F(ConvertMlirModuleToExportedModelTest,
ConversionFailureWhenNoMainFunction) {
mlir::OwningOpRef<mlir::ModuleOp> module_op = ParseModuleOpString(R"mlir(
module attributes {tf_saved_model.semantics} {
func.func @not_main() -> () attributes {tf_saved_model.exported_names = ["not_main"]} {
tf_executor.graph {
tf_executor.fetch
}
return
}
}
)mlir");
ASSERT_TRUE(module_op);
const absl::StatusOr<ExportedModel> exported_model =
ConvertMlirModuleToExportedModel(*module_op, "my_checkpoint_dir",
{},
{});
EXPECT_THAT(exported_model,
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("entry function `main` must be present")));
}
}
} |
1,212 | cpp | tensorflow/tensorflow | pre_calibration | tensorflow/compiler/mlir/quantization/stablehlo/cc/pre_calibration.cc | tensorflow/compiler/mlir/quantization/stablehlo/cc/pre_calibration_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_CC_PRE_CALIBRATION_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_CC_PRE_CALIBRATION_H_
#include "absl/base/nullability.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/component.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
namespace mlir::quant::stablehlo {
class PreCalibrationComponent : public Component {
public:
static constexpr absl::string_view kName = "quant_ptq_pre_calibration";
explicit PreCalibrationComponent(absl::Nonnull<MLIRContext*> ctx);
absl::StatusOr<ModuleOp> Run(
ModuleOp,
const ::stablehlo::quantization::QuantizationConfig& config) override;
private:
absl::Nonnull<MLIRContext*> ctx_;
};
}
#endif
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/pre_calibration.h"
#include "absl/base/nullability.h"
#include "absl/log/die_if_null.h"
#include "absl/status/statusor.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/PassManager.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/pass_pipeline.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/cc/run_passes.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tsl/platform/errors.h"
namespace mlir::quant::stablehlo {
using ::stablehlo::quantization::QuantizationConfig;
using ::tensorflow::quantization::RunPasses;
PreCalibrationComponent::PreCalibrationComponent(
absl::Nonnull<MLIRContext*> ctx)
: ctx_(ABSL_DIE_IF_NULL(ctx)) {}
absl::StatusOr<ModuleOp> PreCalibrationComponent::Run(
ModuleOp module_op, const QuantizationConfig& config) {
TF_RETURN_IF_ERROR(RunPasses(
kName,
[&config](PassManager& pm) {
AddPreCalibrationPasses(pm, config.calibration_options(),
config.specs(), config.debugger_config());
},
*ctx_, module_op));
return module_op;
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/pre_calibration.h"
#include <type_traits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/config.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/passes/tf_quant_ops.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tsl/platform/status_matchers.h"
namespace mlir::quant::stablehlo {
namespace {
using ::stablehlo::quantization::ExpandPresets;
using ::stablehlo::quantization::PopulateDefaults;
using ::stablehlo::quantization::QuantizationConfig;
using ::testing::Contains;
using ::testing::SizeIs;
using ::testing::StartsWith;
using ::testing::StrEq;
using ::tsl::testing::IsOk;
MATCHER_P(HasSymName, name, "") {
auto non_const_arg = const_cast<std::remove_const_t<decltype(arg)>>(arg);
*result_listener << "where the name is " << non_const_arg.getSymName().str();
return non_const_arg.getSymName() == name;
}
MATCHER_P2(HasStringAttr, name, value_matcher,
absl::StrCat(negation ? "doesn't have" : "has",
"string attribute: ", name, ", with desirable value")) {
auto non_const_arg = const_cast<std::remove_const_t<decltype(arg)>>(arg);
return non_const_arg->template hasAttrOfType<StringAttr>(name) &&
ExplainMatchResult(
value_matcher,
non_const_arg->template getAttrOfType<StringAttr>(name).str(),
result_listener);
}
MATCHER_P2(HasSymNameAttr, name, value_matcher,
absl::StrCat(negation ? "doesn't have" : "has",
"string attribute: ", name, ", with desirable value")) {
auto non_const_arg = const_cast<std::remove_const_t<decltype(arg)>>(arg);
return non_const_arg->template hasAttrOfType<FlatSymbolRefAttr>(name) &&
ExplainMatchResult(
value_matcher,
non_const_arg->template getAttrOfType<FlatSymbolRefAttr>(name)
.getValue()
.str(),
result_listener);
}
using PreCalibrationComponentTest = ::mlir::quant::QuantizationTestBase;
TEST_F(PreCalibrationComponentTest,
HasCustomAggregatorOpAndQuantizableFuncForSimpleDotGeneral) {
PreCalibrationComponent component(ctx_.get());
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(
module attributes {} {
func.func @main(%arg0: tensor<1x4xf32>) -> tensor<1x3xf32> attributes {} {
%0 = stablehlo.constant dense<1.0> : tensor<4x3xf32>
%1 = stablehlo.dot_general %arg0, %0, contracting_dims = [1] x [0], precision = [DEFAULT, DEFAULT] : (tensor<1x4xf32>, tensor<4x3xf32>) -> tensor<1x3xf32>
return %1 : tensor<1x3xf32>
}
}
)mlir");
ASSERT_TRUE(module_op);
QuantizationConfig quantization_config{};
quantization_config.mutable_static_range_ptq_preset();
quantization_config = ExpandPresets(PopulateDefaults(quantization_config));
absl::StatusOr<ModuleOp> pre_calibration_result =
component.Run(*module_op, quantization_config);
EXPECT_THAT(pre_calibration_result, IsOk());
SmallVector<func::FuncOp> func_ops;
for (auto func_op : pre_calibration_result->getOps<func::FuncOp>()) {
func_ops.push_back(func_op);
}
ASSERT_THAT(func_ops, SizeIs(2));
EXPECT_THAT(func_ops, Contains(HasSymName("main")));
EXPECT_THAT(func_ops, Contains(HasSymName("composite_dot_general_fn_1")));
SmallVector<TF::XlaCallModuleOp> xla_call_module_ops;
for (auto xla_call_module_op : func_ops[0].getOps<TF::XlaCallModuleOp>()) {
xla_call_module_ops.push_back(xla_call_module_op);
}
ASSERT_THAT(xla_call_module_ops, SizeIs(1));
auto xla_call_module_op = xla_call_module_ops[0];
EXPECT_THAT(xla_call_module_op,
HasStringAttr("_tfl_quant_trait", StrEq("fully_quantizable")));
EXPECT_THAT(xla_call_module_op,
HasSymNameAttr("_entry_function",
StartsWith("composite_dot_general_fn")));
EXPECT_THAT(xla_call_module_op,
HasStringAttr("_original_entry_function",
StartsWith("composite_dot_general_fn")));
SmallVector<TF::CustomAggregatorOp> custom_aggregator_ops;
for (auto custom_aggregator_op :
func_ops[0].getOps<TF::CustomAggregatorOp>()) {
custom_aggregator_ops.push_back(custom_aggregator_op);
}
EXPECT_THAT(custom_aggregator_ops, SizeIs(2));
}
}
} |
1,213 | cpp | tensorflow/tensorflow | config | tensorflow/core/tfrt/graph_executor/config.cc | tensorflow/core/tfrt/graph_executor/config_test.cc | #define CFLAG_FNO_COMMON 1
#define CFLAG_FVISIBILITY_HIDDEN 1
#define CFLAG_FWRAPV 1
#define CFLAG_PEDANTIC 1
#define CFLAG_U_STRICT_ANSI 1
#define CFLAG_W 1
#define CFLAG_WALL 1
#define CFLAG_WERROR_ATTRIBUTES 1
#define CFLAG_WERROR_COMMENT 1
#define CFLAG_WERROR_IMPLICIT 1
#define CFLAG_WERROR_MISSING_BRACES 1
#define CFLAG_WERROR_MISSING_DECLARATIONS 1
#define CFLAG_WERROR_MISSING_PROTOTYPES 1
#define CFLAG_WERROR_POINTER_ARITH 1
#define CFLAG_WERROR_RETURN_TYPE 1
#define CFLAG_WERROR_TRIGRAPHS 1
#define CFLAG_WERROR_VLA 1
#define CFLAG_WLONG_LONG 1
#define CFLAG_WSHIFT_NEGATIVE_VALUE 1
#define HAVE_ACCESS 1
#define HAVE_DECL_STRCASECMP 1
#define HAVE_DECL_STRICMP 0
#define HAVE_DECL_STRLCPY 0
#define HAVE_DECL_STRNCASECMP 1
#define HAVE_DECL_STRNICMP 0
#define HAVE_DECL_STRNLEN 1
#define HAVE_DECL_STRRCHRNUL 0
#define HAVE_DECL_STRSEP 1
#define HAVE_FACCESSAT 1
#define HAVE_FCNTL_H 1
#define HAVE_FILENO 1
#define HAVE_FSEEKO 1
#define HAVE_FSTAT 1
#define HAVE_FTRUNCATE 1
#define HAVE_FUNC_ATTRIBUTE_ALLOC_SIZE 1
#define HAVE_FUNC_ATTRIBUTE_COLD 1
#define HAVE_FUNC_ATTRIBUTE_CONST 1
#define HAVE_FUNC_ATTRIBUTE_FORMAT 1
#define HAVE_FUNC_ATTRIBUTE_MALLOC 1
#define HAVE_FUNC_ATTRIBUTE_NORETURN 1
#define HAVE_FUNC_ATTRIBUTE_PURE 1
#define HAVE_FUNC_ATTRIBUTE_RETURNS_NONNULL 1
#define HAVE_FUNC_ATTRIBUTE_SENTINEL 1
#define HAVE_GETGID 1
#define HAVE_GETPAGESIZE 1
#define HAVE_GETUID 1
#define HAVE_INTTYPES_H 1
#define HAVE_MEMORY_H 1
#define HAVE_MMAP 1
#define HAVE_PATHCONF 1
#define HAVE_REALPATH 1
#define HAVE_SNPRINTF 1
#define HAVE_STAT 1
#define HAVE_STDBOOL_H 1
#define HAVE_STDC_INLINE 1
#define HAVE_STDINT_H 1
#define HAVE_STDLIB_H 1
#define HAVE_STDNORETURN_H 1
#define HAVE_STRCASECMP 1
#define HAVE_STRINGS_H 1
#define HAVE_STRING_H 1
#define HAVE_STRNCASECMP 1
#define HAVE_STRNLEN 1
#define HAVE_STRSEP 1
#define HAVE_STRUCT_STAT 1
#define HAVE_SYSCONF 1
#define HAVE_SYS_MMAN_H 1
#define HAVE_SYS_PARAM_H 1
#define HAVE_SYS_STAT_H 1
#define HAVE_SYS_TYPES_H 1
#define HAVE_UINTPTR_T 1
#define HAVE_UNISTD_H 1
#define HAVE_VSNPRINTF 1
#define HAVE__BOOL 1
#define HAVE___BUILTIN_BSWAP16 1
#define HAVE___BUILTIN_BSWAP32 1
#define HAVE___BUILTIN_BSWAP64 1
#define HAVE___BUILTIN_CLZ 1
#define HAVE___BUILTIN_CLZL 1
#define HAVE___BUILTIN_CLZLL 1
#define HAVE___BUILTIN_CONSTANT_P 1
#define HAVE___BUILTIN_EXPECT 1
#define PACKAGE_BUGREPORT ""
#define PACKAGE_NAME ""
#define PACKAGE_STRING ""
#define PACKAGE_TARNAME ""
#define PACKAGE_URL ""
#define PACKAGE_VERSION ""
#define STDC_HEADERS 1
#ifndef _ALL_SOURCE
#define _ALL_SOURCE 1
#endif
#ifndef _GNU_SOURCE
#define _GNU_SOURCE 1
#endif
#ifndef _POSIX_PTHREAD_SEMANTICS
#define _POSIX_PTHREAD_SEMANTICS 1
#endif
#ifndef _TANDEM_SOURCE
#define _TANDEM_SOURCE 1
#endif
#ifndef __EXTENSIONS__
#define __EXTENSIONS__ 1
#endif
#define WORDS_LITTLEENDIAN 1
#ifndef _DARWIN_USE_64_BIT_INODE
#define _DARWIN_USE_64_BIT_INODE 1
#endif
#ifndef __cplusplus
#endif
#define restrict __restrict
#if defined __SUNPRO_CC && !defined __RESTRICT
#define _Restrict
#define __restrict__
#endif
#include "tensorflow/core/tfrt/graph_executor/config.h"
#include <string>
#include <utility>
namespace tensorflow {
namespace tfrt_stub {
absl::StatusOr<RuntimeConfig> RuntimeConfig::CreateFromProto(
RuntimeConfigProto proto) {
RuntimeConfig model_config;
model_config.proto_ = std::move(proto);
size_t i = 0;
for (const auto& any : model_config.proto_.config()) {
std::string full_name;
if (!::google::protobuf::Any::ParseAnyTypeUrl(any.type_url(), &full_name)) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid Any proto type url: ", any.type_url()));
}
model_config.map_[full_name] = i++;
}
return model_config;
}
}
} | #include "tensorflow/core/tfrt/graph_executor/config.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/core/tfrt/graph_executor/config.pb.h"
#include "tensorflow/core/tfrt/graph_executor/test_config.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
TEST(ConfigTest, Basic) {
TestConfig1 expected_test_config1;
expected_test_config1.set_tag("test config1");
TestConfig2 expected_test_config2;
expected_test_config2.set_tag("test config2");
RuntimeConfig runtime_config;
TF_ASSERT_OK(runtime_config.Add(expected_test_config2));
TF_ASSERT_OK(runtime_config.Add(expected_test_config1));
auto test_config1 = runtime_config.Get<TestConfig1>();
TF_ASSERT_OK(test_config1.status());
auto test_config2 = runtime_config.Get<TestConfig2>();
TF_ASSERT_OK(test_config2.status());
EXPECT_EQ(test_config1->tag(), "test config1");
EXPECT_EQ(test_config2->tag(), "test config2");
}
TEST(ConfigTest, Load) {
TestConfig1 expected_test_config1;
expected_test_config1.set_tag("test config1");
TestConfig2 expected_test_config2;
expected_test_config2.set_tag("test config2");
RuntimeConfigProto runtime_config_proto;
runtime_config_proto.add_config()->PackFrom(expected_test_config1);
runtime_config_proto.add_config()->PackFrom(expected_test_config2);
TF_ASSERT_OK_AND_ASSIGN(RuntimeConfig runtime_config,
RuntimeConfig::CreateFromProto(runtime_config_proto));
auto test_config1 = runtime_config.Get<TestConfig1>();
TF_ASSERT_OK(test_config1.status());
auto test_config2 = runtime_config.Get<TestConfig2>();
TF_ASSERT_OK(test_config2.status());
EXPECT_EQ(test_config1->tag(), "test config1");
EXPECT_EQ(test_config2->tag(), "test config2");
}
TEST(ConfigTest, NotFound) {
TestConfig1 expected_test_config1;
expected_test_config1.set_tag("test config1");
RuntimeConfigProto runtime_config_proto;
runtime_config_proto.add_config()->PackFrom(expected_test_config1);
TF_ASSERT_OK_AND_ASSIGN(RuntimeConfig runtime_config,
RuntimeConfig::CreateFromProto(runtime_config_proto));
EXPECT_THAT(runtime_config.Get<TestConfig2>(),
::tsl::testing::StatusIs(absl::StatusCode::kNotFound));
}
TEST(ConfigTest, Duplicate) {
TestConfig1 expected_test_config1;
expected_test_config1.set_tag("test config1");
RuntimeConfig runtime_config;
TF_ASSERT_OK(runtime_config.Add(expected_test_config1));
EXPECT_THAT(runtime_config.Add(expected_test_config1),
::tsl::testing::StatusIs(absl::StatusCode::kAlreadyExists));
}
}
}
} |
1,214 | cpp | tensorflow/tensorflow | saved_model_import | tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_import.cc | tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_import_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_CC_SAVED_MODEL_IMPORT_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_CC_SAVED_MODEL_IMPORT_H_
#include <string>
#include <unordered_set>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "tensorflow/cc/saved_model/loader.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/types.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
namespace mlir::quant::stablehlo {
using ImportedMlirModuleOp =
std::pair<OwningOpRef<ModuleOp>,
std::unique_ptr<::tensorflow::SavedModelBundle>>;
absl::StatusOr<ImportedMlirModuleOp> SavedModelToMlirModuleOp(
absl::string_view saved_model_path,
const std::unordered_set<std::string>& tags,
const std::vector<std::string>& signature_keys,
MLIRContext& ctx ABSL_ATTRIBUTE_LIFETIME_BOUND);
absl::StatusOr<absl::flat_hash_map<FunctionName, FunctionAlias>>
GetFunctionAliases(absl::string_view saved_model_path,
const std::unordered_set<std::string>& tags);
void UpdateFunctionAliases(
absl::flat_hash_map<FunctionName, FunctionAlias>& function_aliases,
ModuleOp module_op);
absl::StatusOr<OwningOpRef<ModuleOp>> ImportSavedModel(
absl::string_view saved_model_path,
const std::vector<std::string>& signature_keys,
const std::unordered_set<std::string>& tags,
const ::stablehlo::quantization::QuantizationConfig& quantization_config,
absl::string_view mlir_dump_file_prefix,
absl::flat_hash_map<FunctionName, FunctionAlias>& function_aliases,
MLIRContext& ctx ABSL_ATTRIBUTE_LIFETIME_BOUND);
}
#endif
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_import.h"
#include <memory>
#include <string>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "tensorflow/cc/saved_model/loader.h"
#include "tensorflow/cc/saved_model/reader.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/types.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantize_preprocess.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_import_options.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace mlir::quant::stablehlo {
using ::stablehlo::quantization::QuantizationConfig;
using ::tensorflow::MLIRImportOptions;
using ::tensorflow::SavedModelBundle;
using ::tensorflow::SavedModelSignatureDefsToMlirImport;
using ::tensorflow::quantization::PreprocessAndFreezeGraph;
absl::StatusOr<ImportedMlirModuleOp> SavedModelToMlirModuleOp(
const absl::string_view saved_model_path,
const std::unordered_set<std::string>& tags,
const std::vector<std::string>& signature_keys,
MLIRContext& ctx ABSL_ATTRIBUTE_LIFETIME_BOUND) {
MLIRImportOptions import_options;
import_options.upgrade_legacy = true;
import_options.lift_variables = false;
import_options.include_variables_in_initializers = true;
auto bundle = std::make_unique<SavedModelBundle>();
std::vector<std::string> exported_names = signature_keys;
absl::StatusOr<OwningOpRef<ModuleOp>> module_op =
SavedModelSignatureDefsToMlirImport(saved_model_path, tags,
absl::MakeSpan(exported_names), &ctx,
import_options, &bundle);
if (!module_op.status().ok()) {
return absl::InternalError(absl::StrCat("Failed to import SavedModel: ",
module_op.status().ToString()));
}
return std::make_pair(std::move(*module_op), std::move(bundle));
}
absl::StatusOr<absl::flat_hash_map<FunctionName, FunctionAlias>>
GetFunctionAliases(absl::string_view saved_model_path,
const std::unordered_set<std::string>& tags) {
tensorflow::MetaGraphDef meta_graph;
TF_RETURN_IF_ERROR(tensorflow::ReadMetaGraphDefFromSavedModel(
saved_model_path, tags, &meta_graph));
absl::flat_hash_map<FunctionName, FunctionAlias> function_aliases(
meta_graph.meta_info_def().function_aliases().begin(),
meta_graph.meta_info_def().function_aliases().end());
return function_aliases;
}
void UpdateFunctionAliases(
absl::flat_hash_map<FunctionName, FunctionAlias>& function_aliases,
ModuleOp module_op) {
absl::flat_hash_set<FunctionName> existing_func_names;
module_op->walk([&](func::FuncOp func_op) {
FunctionName func_name = func_op.getSymName().str();
existing_func_names.insert(func_name);
auto original_func_name =
func_op->getAttrOfType<StringAttr>("tf._original_func_name");
if (original_func_name) {
if (auto alias_itr = function_aliases.find(original_func_name.str());
alias_itr != function_aliases.end()) {
const FunctionAlias alias = alias_itr->second;
function_aliases[func_name] = alias;
}
}
});
absl::erase_if(function_aliases, [&existing_func_names](const auto& item) {
return !existing_func_names.contains(item.first);
});
}
absl::StatusOr<OwningOpRef<ModuleOp>> ImportSavedModel(
const absl::string_view saved_model_path,
const std::vector<std::string>& signature_keys,
const std::unordered_set<std::string>& tags,
const QuantizationConfig& quantization_config,
const absl::string_view mlir_dump_file_prefix,
absl::flat_hash_map<FunctionName, FunctionAlias>& function_aliases,
MLIRContext& ctx ABSL_ATTRIBUTE_LIFETIME_BOUND) {
TF_ASSIGN_OR_RETURN(
ImportedMlirModuleOp imported_module,
SavedModelToMlirModuleOp(saved_model_path, tags, signature_keys, ctx));
auto [module_op, saved_model_bundle] = std::move(imported_module);
UpdateFunctionAliases(function_aliases, *module_op);
absl::flat_hash_set<std::string> aliased_function_names;
absl::c_for_each(function_aliases, [&](const auto& aliases) {
return aliased_function_names.insert(aliases.first);
});
TF_RETURN_IF_ERROR(PreprocessAndFreezeGraph(
mlir_dump_file_prefix, true,
aliased_function_names, *module_op, &ctx,
saved_model_bundle == nullptr ? nullptr
: saved_model_bundle->GetSession(),
true, false));
return std::move(module_op);
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_import.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/types.h"
namespace mlir::quant::stablehlo {
namespace {
using ::testing::IsEmpty;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
using UpdateFunctionAliasesTest = ::mlir::quant::QuantizationTestBase;
TEST_F(UpdateFunctionAliasesTest, NoAliasesReturnsEmptyMap) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(
func.func private @main(%arg: tensor<1x2xf32>) -> (tensor<1x2xf32>) attributes {tf._original_func_name = "main_original"} {
return %arg : tensor<1x2xf32>
}
)mlir");
ASSERT_TRUE(module_op);
absl::flat_hash_map<FunctionName, FunctionAlias> function_aliases;
UpdateFunctionAliases(function_aliases, *module_op);
EXPECT_THAT(function_aliases, IsEmpty());
}
TEST_F(UpdateFunctionAliasesTest, AliasUpdatedByMlirFunctionName) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(
func.func private @main(%arg: tensor<1x2xf32>) -> (tensor<1x2xf32>) attributes {tf._original_func_name = "main_original"} {
return %arg : tensor<1x2xf32>
}
)mlir");
ASSERT_TRUE(module_op);
absl::flat_hash_map<FunctionName, FunctionAlias> function_aliases{
{"main_original", "main_alias"}};
UpdateFunctionAliases(function_aliases, *module_op);
EXPECT_THAT(function_aliases,
UnorderedElementsAre(Pair("main", "main_alias")));
}
TEST_F(UpdateFunctionAliasesTest, IgnoresUnmatchedFunctions) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(
func.func private @main(%arg: tensor<1x2xf32>) -> (tensor<1x2xf32>) attributes {tf._original_func_name = "main_original"} {
return %arg : tensor<1x2xf32>
}
)mlir");
ASSERT_TRUE(module_op);
absl::flat_hash_map<FunctionName, FunctionAlias> function_aliases{
{"not_main", "not_main_alias"}};
UpdateFunctionAliases(function_aliases, *module_op);
EXPECT_THAT(function_aliases, IsEmpty());
}
TEST_F(UpdateFunctionAliasesTest,
SkipsFunctionsWithNoOriginalFuncNameAttribute) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(
func.func private @main(%arg: tensor<1x2xf32>) -> (tensor<1x2xf32>) {
return %arg : tensor<1x2xf32>
}
)mlir");
ASSERT_TRUE(module_op);
absl::flat_hash_map<FunctionName, FunctionAlias> function_aliases{
{"main_original", "main_alias"}};
UpdateFunctionAliases(function_aliases, *module_op);
EXPECT_THAT(function_aliases, IsEmpty());
}
TEST_F(UpdateFunctionAliasesTest, FunctionNameNotChanged) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(
func.func private @main_original(%arg: tensor<1x2xf32>) -> (tensor<1x2xf32>) {
return %arg : tensor<1x2xf32>
}
)mlir");
ASSERT_TRUE(module_op);
absl::flat_hash_map<FunctionName, FunctionAlias> function_aliases{
{"main_original", "main_alias"}};
UpdateFunctionAliases(function_aliases, *module_op);
EXPECT_THAT(function_aliases,
UnorderedElementsAre(Pair("main_original", "main_alias")));
}
}
} |
1,215 | cpp | tensorflow/tensorflow | representative_dataset | tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/representative_dataset.cc | tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/representative_dataset_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_CC_CALIBRATION_REPRESENTATIVE_DATASET_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_CC_CALIBRATION_REPRESENTATIVE_DATASET_H_
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
namespace stablehlo::quantization {
absl::StatusOr<absl::flat_hash_map<
std::string, tensorflow::quantization::RepresentativeDatasetFile>>
CreateRepresentativeDatasetFileMap(absl::Span<const RepresentativeDatasetConfig>
representative_dataset_configs);
}
#endif
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/representative_dataset.h"
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
namespace stablehlo::quantization {
using ::tensorflow::quantization::RepresentativeDatasetFile;
absl::StatusOr<absl::flat_hash_map<std::string, RepresentativeDatasetFile>>
CreateRepresentativeDatasetFileMap(absl::Span<const RepresentativeDatasetConfig>
representative_dataset_configs) {
absl::flat_hash_map<std::string, RepresentativeDatasetFile>
repr_dataset_file_map{};
for (const RepresentativeDatasetConfig& dataset_config :
representative_dataset_configs) {
RepresentativeDatasetFile repr_dataset_file;
repr_dataset_file.set_tfrecord_file_path(dataset_config.tf_record().path());
const std::string signature_key = dataset_config.has_signature_key()
? dataset_config.signature_key()
: "serving_default";
if (repr_dataset_file_map.contains(signature_key)) {
return absl::InvalidArgumentError(
absl::StrCat("RepresentativeDatasetConfig should not contain "
"duplicate signature key: ",
signature_key));
}
repr_dataset_file_map[signature_key] = std::move(repr_dataset_file);
}
return repr_dataset_file_map;
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/representative_dataset.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tsl/platform/status_matchers.h"
namespace mlir::quant::stablehlo {
namespace {
using ::stablehlo::quantization::RepresentativeDatasetConfig;
using ::tensorflow::quantization::RepresentativeDatasetFile;
using ::testing::Contains;
using ::testing::HasSubstr;
using ::testing::Key;
using ::testing::SizeIs;
using ::testing::StrEq;
using ::tsl::testing::IsOk;
using ::tsl::testing::StatusIs;
TEST(CreateRepresentativeDatasetFileMapTest,
ConfigWithoutExplicitSignatureKeyMappedToServingDefault) {
std::vector<RepresentativeDatasetConfig> representative_dataset_configs;
RepresentativeDatasetConfig config{};
*(config.mutable_tf_record()->mutable_path()) = "test_path";
representative_dataset_configs.push_back(config);
const absl::StatusOr<
absl::flat_hash_map<std::string, RepresentativeDatasetFile>>
representative_dataset_file_map =
CreateRepresentativeDatasetFileMap(representative_dataset_configs);
ASSERT_THAT(representative_dataset_file_map, IsOk());
ASSERT_THAT(*representative_dataset_file_map, SizeIs(1));
EXPECT_THAT(*representative_dataset_file_map,
Contains(Key("serving_default")));
EXPECT_THAT(representative_dataset_file_map->at("serving_default")
.tfrecord_file_path(),
StrEq("test_path"));
}
TEST(CreateRepresentativeDatasetFileMapTest, ConfigWithExplicitSignatureKey) {
std::vector<RepresentativeDatasetConfig> representative_dataset_configs;
RepresentativeDatasetConfig config{};
config.set_signature_key("test_signature_key");
*(config.mutable_tf_record()->mutable_path()) = "test_path";
representative_dataset_configs.push_back(config);
const absl::StatusOr<
absl::flat_hash_map<std::string, RepresentativeDatasetFile>>
representative_dataset_file_map =
CreateRepresentativeDatasetFileMap(representative_dataset_configs);
ASSERT_THAT(representative_dataset_file_map, IsOk());
ASSERT_THAT(*representative_dataset_file_map, SizeIs(1));
EXPECT_THAT(*representative_dataset_file_map,
Contains(Key(StrEq("test_signature_key"))));
EXPECT_THAT(representative_dataset_file_map->at("test_signature_key")
.tfrecord_file_path(),
StrEq("test_path"));
}
TEST(CreateRepresentativeDatasetFileMapTest,
ConfigWithDuplicateSignatureKeyReturnsInvalidArgumentError) {
std::vector<RepresentativeDatasetConfig> representative_dataset_configs;
RepresentativeDatasetConfig config_1{};
config_1.set_signature_key("serving_default");
*(config_1.mutable_tf_record()->mutable_path()) = "test_path_1";
representative_dataset_configs.push_back(config_1);
RepresentativeDatasetConfig config_2{};
*(config_2.mutable_tf_record()->mutable_path()) = "test_path_2";
representative_dataset_configs.push_back(config_2);
const absl::StatusOr<
absl::flat_hash_map<std::string, RepresentativeDatasetFile>>
representative_dataset_file_map =
CreateRepresentativeDatasetFileMap(representative_dataset_configs);
EXPECT_THAT(representative_dataset_file_map,
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("duplicate signature key: serving_default")));
}
}
} |
1,216 | cpp | tensorflow/tensorflow | tf_to_uniform_attribute_utils | tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_uniform_attribute_utils.cc | tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_uniform_attribute_utils_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_UTILS_TF_TO_UNIFORM_ATTRIBUTE_UTILS_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_UTILS_TF_TO_UNIFORM_ATTRIBUTE_UTILS_H_
#include "llvm/ADT/StringMap.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/PatternMatch.h"
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
namespace mlir::quant {
LogicalResult FillAttributesForUniformQuantizedDotOp(
PatternRewriter& rewriter, Operation* op,
llvm::StringMap<Attribute>& identifier_to_attr,
tensorflow::quantization::QuantizationMethod::PresetMethod
quantization_method,
bool enable_per_channel_quantization);
LogicalResult FillAttributesForUniformQuantizedConvolutionOp(
PatternRewriter& rewriter, Operation* op,
llvm::StringMap<Attribute>& identifier_to_attr,
tensorflow::quantization::QuantizationMethod::PresetMethod
quantization_method,
bool enable_per_channel_quantization);
LogicalResult FillAttributesForUniformQuantizedAddOp(
PatternRewriter& rewriter, Operation* op,
llvm::StringMap<Attribute>& identifier_to_attr,
tensorflow::quantization::QuantizationMethod::PresetMethod
quantization_method,
bool enable_per_channel_quantization);
LogicalResult FillAttributesForUniformQuantizedClipByValueOp(
PatternRewriter& rewriter, Operation* op,
llvm::StringMap<Attribute>& identifier_to_attr,
tensorflow::quantization::QuantizationMethod::PresetMethod
quantization_method,
bool enable_per_channel_quantization);
LogicalResult FillAttributesForUniformRequantizeOp(
PatternRewriter& rewriter, Operation* op,
llvm::StringMap<Attribute>& identifier_to_attr,
tensorflow::quantization::QuantizationMethod::PresetMethod
quantization_method,
bool enable_per_channel_quantization);
LogicalResult FillAttributesForUniformQuantizeOp(
PatternRewriter& rewriter, Operation* op,
llvm::StringMap<Attribute>& identifier_to_attr,
tensorflow::quantization::QuantizationMethod::PresetMethod
quantization_method,
bool enable_per_channel_quantization);
}
#endif
#include "tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_uniform_attribute_utils.h"
#include <array>
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/ops/uniform_op_quant_spec.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/core/util/quantization/uniform_quant_ops_attr.pb.h"
namespace mlir::quant {
using QuantMethod = tensorflow::quantization::QuantizationMethod::PresetMethod;
enum class OpType {
kDynamicRangeOp,
kUnaryOp,
kBinaryOp,
kQuantizationOp,
};
constexpr std::array<absl::string_view, 3> kQuantizationAxisAttrs = {
"input_quantization_axis", "quantization_axis", "rhs_quantization_axis"};
constexpr std::array<absl::string_view, 2> kSuffixes = {"_min_val", "_max_val"};
Attribute GetWindowStridesValue(
PatternRewriter& rewriter, llvm::StringMap<Attribute>& identifier_to_attr) {
ArrayAttr stride = mlir::dyn_cast<ArrayAttr>(identifier_to_attr["strides"]);
const int stride_h = mlir::cast<IntegerAttr>(stride[1]).getInt();
const int stride_w = mlir::cast<IntegerAttr>(stride[2]).getInt();
return rewriter.getI64ArrayAttr({stride_h, stride_w});
}
Attribute GetLhsDilationValue(PatternRewriter& rewriter,
llvm::StringMap<Attribute>& identifier_to_attr) {
return rewriter.getI64ArrayAttr({1, 1});
}
Attribute GetRhsDilationValue(PatternRewriter& rewriter,
llvm::StringMap<Attribute>& identifier_to_attr) {
ArrayAttr dilations =
mlir::dyn_cast<ArrayAttr>(identifier_to_attr["dilations"]);
const int dilation_h = mlir::cast<IntegerAttr>(dilations[1]).getInt();
const int dilation_w = mlir::cast<IntegerAttr>(dilations[2]).getInt();
return rewriter.getI64ArrayAttr({dilation_h, dilation_w});
}
Attribute GetPaddingValue(PatternRewriter& rewriter,
llvm::StringMap<Attribute>& identifier_to_attr) {
llvm::StringRef padding =
mlir::dyn_cast<StringAttr>(identifier_to_attr["padding"]).getValue();
return rewriter.getStringAttr(padding);
}
Attribute GetExplicitPaddingValue(
PatternRewriter& rewriter, llvm::StringMap<Attribute>& identifier_to_attr) {
ArrayAttr explicit_padding =
mlir::dyn_cast<ArrayAttr>(identifier_to_attr["explicit_paddings"]);
return explicit_padding;
}
Attribute GetDimensionNumbersValue(
PatternRewriter& rewriter, llvm::StringMap<Attribute>& identifier_to_attr) {
tensorflow::UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
if (!tensorflow::protobuf::TextFormat::ParseFromString(
R"pb(
input_batch_dimension: 0
input_feature_dimension: 3
input_spatial_dimensions: 1
input_spatial_dimensions: 2
kernel_output_feature_dimension: 3
kernel_input_feature_dimension: 2
kernel_spatial_dimensions: 0
kernel_spatial_dimensions: 1
output_batch_dimension: 0
output_feature_dimension: 3
output_spatial_dimensions: 1
output_spatial_dimensions: 2
)pb",
&dimension_numbers)) {
return rewriter.getStringAttr("");
}
return rewriter.getStringAttr(dimension_numbers.SerializeAsString());
}
Attribute GetBatchGroupCountValue(
PatternRewriter& rewriter, llvm::StringMap<Attribute>& identifier_to_attr) {
return rewriter.getI64IntegerAttr(1);
}
Attribute GetQuantizationAxis(PatternRewriter& rewriter, Operation* op,
const int operand_index) {
auto* defining_op = op->getOperand(operand_index).getDefiningOp();
for (auto attr : kQuantizationAxisAttrs) {
if (defining_op->hasAttr(attr)) {
return defining_op->getAttr(attr);
}
}
return rewriter.getI64IntegerAttr(-1);
}
LogicalResult CheckIfAttrIs8Bit(const std::string& attr, Operation* op,
bool& is_8_bit) {
Type element_type;
if (attr == "lhs_quantization" || attr == "input_quantization" ||
attr == "quantization") {
if (op->getNumOperands() < 1) {
return failure();
}
element_type = getElementTypeOrSelf(op->getOperand(0).getType());
}
if (attr == "rhs_quantization") {
if (op->getNumOperands() < 2) {
return failure();
}
element_type = getElementTypeOrSelf(op->getOperand(1).getType());
}
if (attr == "output_quantization") {
if (op->getNumResults() < 1) {
return failure();
}
element_type = getElementTypeOrSelf(op->getOpResult(0).getType());
}
if (element_type) {
is_8_bit = mlir::isa<TF::Qint8Type>(element_type);
return success();
}
return failure();
}
LogicalResult FillQuantizationAttributes(
PatternRewriter& rewriter, Operation* op, NamedAttrList& attrs,
llvm::StringMap<Attribute>& identifier_to_attr, OpType op_type) {
absl::flat_hash_map<std::string, int> min_max_scheme_for_8bit = {
{"min", -128}, {"max", 127}};
absl::flat_hash_map<std::string, int> min_max_schema_for_32bit = {
{"min", -2147483648}, {"max", 2147483647}};
std::vector<std::string> quantization_attributes;
switch (op_type) {
case OpType::kDynamicRangeOp:
quantization_attributes = {"rhs_quantization"};
break;
case OpType::kUnaryOp:
quantization_attributes = {"quantization"};
break;
case OpType::kBinaryOp:
quantization_attributes = {"lhs_quantization", "rhs_quantization",
"output_quantization"};
break;
case OpType::kQuantizationOp:
quantization_attributes = {"input_quantization", "output_quantization"};
break;
default:
quantization_attributes = {};
break;
}
for (const auto& attr : quantization_attributes) {
bool attr_is_8_bit;
if (failed(CheckIfAttrIs8Bit(attr, op, attr_is_8_bit))) {
return failure();
}
for (int i = 0; i < kSuffixes.size(); i++) {
int64_t quant_val;
if (attr_is_8_bit) {
quant_val = i == 0 ? min_max_scheme_for_8bit["min"]
: min_max_scheme_for_8bit["max"];
} else {
quant_val = i == 0 ? min_max_schema_for_32bit["min"]
: min_max_schema_for_32bit["max"];
}
std::string attr_minmax = absl::StrCat(attr, kSuffixes[i]);
attrs.push_back(rewriter.getNamedAttr(
attr_minmax, rewriter.getI64IntegerAttr(quant_val)));
}
}
return success();
}
LogicalResult FillAttributesForUniformQuantizedDotOp(
PatternRewriter& rewriter, Operation* op,
llvm::StringMap<Attribute>& identifier_to_attr,
QuantMethod quantization_method, bool enable_per_channel_quantization) {
NamedAttrList attrs;
if (quantization_method ==
tensorflow::quantization::QuantizationMethod::METHOD_DYNAMIC_RANGE_INT8) {
if (failed(FillQuantizationAttributes(rewriter, op, attrs,
identifier_to_attr,
OpType::kDynamicRangeOp))) {
return failure();
}
} else {
if (failed(FillQuantizationAttributes(
rewriter, op, attrs, identifier_to_attr, OpType::kBinaryOp))) {
return failure();
}
attrs.push_back(rewriter.getNamedAttr("lhs_quantization_axis",
rewriter.getI64IntegerAttr(-1)));
}
std::unique_ptr<OpQuantSpec> spec = GetUniformOpQuantSpec(op);
absl::flat_hash_set<int> operands = spec->quantizable_operands;
int quant_dim = -1;
if (enable_per_channel_quantization && operands.size() == 1) {
quant_dim = spec->coeff_op_quant_dim[*(operands.begin())];
}
attrs.push_back(rewriter.getNamedAttr("rhs_quantization_axis",
rewriter.getI64IntegerAttr(quant_dim)));
attrs.push_back(rewriter.getNamedAttr("output_quantization_axis",
rewriter.getI64IntegerAttr(quant_dim)));
op->setAttrs(rewriter.getDictionaryAttr(attrs));
return success();
}
LogicalResult FillAttributesForUniformQuantizedConvolutionOp(
PatternRewriter& rewriter, Operation* op,
llvm::StringMap<Attribute>& identifier_to_attr,
QuantMethod quantization_method, bool enable_per_channel_quantization) {
NamedAttrList attrs;
absl::flat_hash_map<std::string, Attribute (*)(PatternRewriter&,
llvm::StringMap<Attribute>&)>
attribute_getter_map;
attribute_getter_map = {{"window_strides", GetWindowStridesValue},
{"lhs_dilation", GetLhsDilationValue},
{"rhs_dilation", GetRhsDilationValue},
{"padding", GetPaddingValue},
{"explicit_padding", GetExplicitPaddingValue},
{"dimension_numbers", GetDimensionNumbersValue},
{"batch_group_count", GetBatchGroupCountValue}};
for (auto& attr : op->getAttrs()) {
llvm::StringRef attr_name = attr.getName().getValue();
if (attribute_getter_map.find(attr_name.str()) !=
attribute_getter_map.end()) {
auto attr_val =
(attribute_getter_map[attr_name.str()])(rewriter, identifier_to_attr);
attrs.push_back(rewriter.getNamedAttr(attr_name, attr_val));
}
}
auto feature_group_cnt_attr = llvm::StringRef("feature_group_count");
int feature_group_cnt = 1;
ShapedType input_shape =
mlir::dyn_cast<ShapedType>(op->getOperand(0).getType());
if (!input_shape) {
return op->emitError(
"Only input with known shape is supported for Uniform Quantized "
"opset.");
}
if (op->getParentOfType<func::FuncOp>().getName().contains("depthwise_")) {
feature_group_cnt = input_shape.getDimSize(3);
}
attrs.push_back(rewriter.getNamedAttr(
feature_group_cnt_attr, rewriter.getI64IntegerAttr(feature_group_cnt)));
if (quantization_method ==
tensorflow::quantization::QuantizationMethod::METHOD_DYNAMIC_RANGE_INT8) {
if (failed(FillQuantizationAttributes(rewriter, op, attrs,
identifier_to_attr,
OpType::kDynamicRangeOp))) {
return failure();
}
} else {
if (failed(FillQuantizationAttributes(
rewriter, op, attrs, identifier_to_attr, OpType::kBinaryOp))) {
return failure();
}
}
if (quantization_method !=
tensorflow::quantization::QuantizationMethod::METHOD_DYNAMIC_RANGE_INT8) {
attrs.push_back(rewriter.getNamedAttr("lhs_quantization_axis",
rewriter.getI64IntegerAttr(-1)));
}
std::unique_ptr<OpQuantSpec> spec = GetUniformOpQuantSpec(op);
absl::flat_hash_set<int> operands = spec->quantizable_operands;
int quant_dim = -1;
if (enable_per_channel_quantization && operands.size() == 1) {
quant_dim = spec->coeff_op_quant_dim[*(operands.begin())];
}
attrs.push_back(rewriter.getNamedAttr("rhs_quantization_axis",
rewriter.getI64IntegerAttr(quant_dim)));
attrs.push_back(rewriter.getNamedAttr("output_quantization_axis",
rewriter.getI64IntegerAttr(quant_dim)));
op->setAttrs(rewriter.getDictionaryAttr(attrs));
return success();
}
LogicalResult FillAttributesForUniformQuantizedAddOp(
PatternRewriter& rewriter, Operation* op,
llvm::StringMap<Attribute>& identifier_to_attr,
const QuantMethod quantization_method,
const bool enable_per_channel_quantization) {
NamedAttrList attrs;
if (failed(FillQuantizationAttributes(rewriter, op, attrs, identifier_to_attr,
OpType::kBinaryOp))) {
return failure();
}
Attribute activation_quantization_axis = rewriter.getI64IntegerAttr(-1);
if (enable_per_channel_quantization) {
activation_quantization_axis =
GetQuantizationAxis(rewriter, op, 0);
if (activation_quantization_axis == rewriter.getI64IntegerAttr(-1)) {
activation_quantization_axis =
GetQuantizationAxis(rewriter, op, 1);
}
}
attrs.push_back(rewriter.getNamedAttr("lhs_quantization_axis",
activation_quantization_axis));
attrs.push_back(rewriter.getNamedAttr("rhs_quantization_axis",
activation_quantization_axis));
attrs.push_back(rewriter.getNamedAttr("output_quantization_axis",
activation_quantization_axis));
op->setAttrs(rewriter.getDictionaryAttr(attrs));
return success();
}
LogicalResult FillAttributesForUniformQuantizedClipByValueOp(
PatternRewriter& rewriter, Operation* op,
llvm::StringMap<Attribute>& identifier_to_attr,
QuantMethod quantization_method, bool enable_per_channel_quantization) {
NamedAttrList attrs;
if (failed(FillQuantizationAttributes(rewriter, op, attrs, identifier_to_attr,
OpType::kUnaryOp))) {
return failure();
}
Attribute activation_quantization_axis = rewriter.getI64IntegerAttr(-1);
if (enable_per_channel_quantization) {
activation_quantization_axis =
GetQuantizationAxis(rewriter, op, 0);
}
attrs.push_back(
rewriter.getNamedAttr("quantization_axis", activation_quantization_axis));
op->setAttrs(rewriter.getDictionaryAttr(attrs));
return success();
}
LogicalResult FillAttributesForUniformRequantizeOp(
PatternRewriter& rewriter, Operation* op,
llvm::StringMap<Attribute>& identifier_to_attr,
QuantMethod quantization_method, bool enable_per_channel_quantization) {
NamedAttrList attrs;
if (failed(FillQuantizationAttributes(rewriter, op, attrs, identifier_to_attr,
OpType::kQuantizationOp))) {
return failure();
}
Attribute activation_quantization_axis = rewriter.getI64IntegerAttr(-1);
Attribute output_quantization_axis = rewriter.getI64IntegerAttr(-1);
if (enable_per_channel_quantization) {
activation_quantization_axis =
GetQuantizationAxis(rewriter, op, 0);
auto output_scale_type =
mlir::dyn_cast<ShapedType>(op->getOperand(3).getType());
if (!output_scale_type) {
return failure();
}
if (output_scale_type.hasRank() && 0 < output_scale_type.getRank()) {
output_quantization_axis = activation_quantization_axis;
}
}
attrs.push_back(rewriter.getNamedAttr("input_quantization_axis",
activation_quantization_axis));
attrs.push_back(rewriter.getNamedAttr("output_quantization_axis",
output_quantization_axis));
op->setAttrs(rewriter.getDictionaryAttr(attrs));
return success();
}
LogicalResult FillAttributesForUniformQuantizeOp(
PatternRewriter& rewriter, Operation* op,
llvm::StringMap<Attribute>& identifier_to_attr,
QuantMethod quantization_method, bool enable_per_channel_quantization) {
NamedAttrList attrs;
if (failed(FillQuantizationAttributes(rewriter, op, attrs, identifier_to_attr,
OpType::kUnaryOp))) {
return failure();
}
Attribute quantization_axis = rewriter.getI64IntegerAttr(-1);
if (enable_per_channel_quantization) {
quantization_axis = rewriter.getI64IntegerAttr(3);
}
attrs.push_back(
rewriter.getNamedAttr("quantization_axis", quantization_axis));
op->setAttrs(rewriter.getDictionaryAttr(attrs));
return success();
}
} | #include "tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_uniform_attribute_utils.h"
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "llvm/ADT/StringMap.h"
#include "mlir/IR/AsmState.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace mlir::quant {
namespace {
using QuantMethod = tensorflow::quantization::QuantizationMethod::PresetMethod;
class EmptyPatternRewriter : public mlir::PatternRewriter {
public:
explicit EmptyPatternRewriter(const OpBuilder& op_builder)
: mlir::PatternRewriter(op_builder) {}
~EmptyPatternRewriter() override = default;
};
class TfToUniformAttributeUtilsTestPeer {
public:
explicit TfToUniformAttributeUtilsTestPeer() = delete;
explicit TfToUniformAttributeUtilsTestPeer(MLIRContext* ctx)
: rewriter_(OpBuilder(ctx)) {}
EmptyPatternRewriter rewriter_;
};
class TfToUniformAttributeUtilsTest : public ::testing::Test {
protected:
TfToUniformAttributeUtilsTest() : ctx_() {
ctx_.loadDialect<TF::TensorFlowDialect>();
}
MLIRContext ctx_;
};
TF::UniformQuantizedAddOp ParseUniformQuantizedAddOp(
const absl::string_view add_op_str, Block& block, MLIRContext& ctx) {
const LogicalResult parse_result =
parseSourceString(add_op_str, &block, ParserConfig(&ctx));
EXPECT_TRUE(succeeded(parse_result));
auto uq_add_op = dyn_cast_or_null<TF::UniformQuantizedAddOp>(block.back());
EXPECT_TRUE(uq_add_op);
return uq_add_op;
}
TF::UniformRequantizeOp ParseUniformRequantizedOp(
const absl::string_view requant_op_str, Block& block, MLIRContext& ctx) {
const LogicalResult parse_result =
parseSourceString(requant_op_str, &block, ParserConfig(&ctx));
EXPECT_TRUE(succeeded(parse_result));
auto uq_requant_op = dyn_cast_or_null<TF::UniformRequantizeOp>(block.back());
EXPECT_TRUE(uq_requant_op);
return uq_requant_op;
}
TEST_F(TfToUniformAttributeUtilsTest, UniformQuantizedAddOpAttributes) {
TfToUniformAttributeUtilsTestPeer test_peer(&ctx_);
constexpr absl::string_view kAddOpExpr =
R"mlir(
%0 = "tf.Const"() {value = #tf_type<tensor_proto : "0x746674656"> : tensor<1x3x2x2x!tf_type.qint32>} : () -> tensor<1x3x2x2x!tf_type.qint32>
%1 = "tf.Const"() {value = #tf_type<tensor_proto : "0x746674656"> : tensor<2x!tf_type.qint32>} : () -> tensor<2x!tf_type.qint32>
%2 = "tf.Const"() {value = dense<1.0> : tensor<f32>} : () -> tensor<f32>
%3 = "tf.Const"() {value = dense<0> : tensor<i32>} : () -> tensor<i32>
%4 = "tf.UniformQuantizedAdd"(%0, %1, %2, %3, %2, %3, %2, %3) {device = "", lhs_quantization_axis = -1 : i64, lhs_quantization_max_val = 127 : i64, lhs_quantization_min_val = -127 : i64, output_quantization_axis = -1 : i64, output_quantization_max_val = 127 : i64, output_quantization_min_val = -127 : i64, rhs_quantization_axis = -1 : i64, rhs_quantization_max_val = 127 : i64, rhs_quantization_min_val = -127 : i64} : (tensor<1x3x2x2x!tf_type.qint32>, tensor<2x!tf_type.qint32>, tensor<f32>, tensor<i32>, tensor<f32>, tensor<i32>, tensor<f32>, tensor<i32>) -> tensor<1x3x2x2x!tf_type.qint32>
)mlir";
Block block{};
TF::UniformQuantizedAddOp op =
ParseUniformQuantizedAddOp(kAddOpExpr, block, ctx_);
llvm::StringMap<Attribute> identifier_to_attr;
QuantMethod quantization_method =
tensorflow::quantization::QuantizationMethod::METHOD_STATIC_RANGE_INT8;
auto res = FillAttributesForUniformQuantizedAddOp(
test_peer.rewriter_, op, identifier_to_attr, quantization_method,
false);
ASSERT_TRUE(succeeded(res));
ASSERT_EQ(2147483647, op.getLhsQuantizationMaxValAttr().getInt());
ASSERT_EQ(-2147483648, op.getLhsQuantizationMinValAttr().getInt());
ASSERT_EQ(2147483647, op.getRhsQuantizationMaxValAttr().getInt());
ASSERT_EQ(-2147483648, op.getRhsQuantizationMinValAttr().getInt());
ASSERT_EQ(2147483647, op.getOutputQuantizationMaxValAttr().getInt());
ASSERT_EQ(-2147483648, op.getOutputQuantizationMinValAttr().getInt());
ASSERT_EQ(-1, op.getLhsQuantizationAxisAttr().getInt());
ASSERT_EQ(-1, op.getRhsQuantizationAxisAttr().getInt());
ASSERT_EQ(-1, op.getOutputQuantizationAxisAttr().getInt());
}
TEST_F(TfToUniformAttributeUtilsTest, UniformQuantizedRequantizeOpAttributes) {
TfToUniformAttributeUtilsTestPeer test_peer(&ctx_);
constexpr absl::string_view kRequantOpExpr =
R"mlir(
%0 = "tf.Const"() {value = #tf_type<tensor_proto : "0x746674656"> : tensor<1x3x2x2x!tf_type.qint32>, quantization_axis = 3} : () -> tensor<1x3x2x2x!tf_type.qint32>
%1 = "tf.Const"() {value = dense<1.0> : tensor<2xf32>} : () -> tensor<2xf32>
%2 = "tf.Const"() {value = dense<2> : tensor<2xi32>} : () -> tensor<2xi32>
%3 = "tf.Const"() {value = dense<1.0> : tensor<f32>} : () -> tensor<f32>
%4 = "tf.Const"() {value = dense<0> : tensor<i32>} : () -> tensor<i32>
%5 = "tf.UniformRequantize"(%0, %1, %2, %3, %4) {device = "", input_quantization_axis = 3 : i64, input_quantization_max_val = 127 : i64, input_quantization_min_val = -127 : i64, output_quantization_axis = -1 : i64, output_quantization_max_val = 127 : i64, output_quantization_min_val = -127 : i64} : (tensor<1x3x2x2x!tf_type.qint32>, tensor<2xf32>, tensor<2xi32>, tensor<f32>, tensor<i32>) -> tensor<1x3x2x2x!tf_type.qint8>
)mlir";
Block block{};
TF::UniformRequantizeOp op =
ParseUniformRequantizedOp(kRequantOpExpr, block, ctx_);
llvm::StringMap<Attribute> identifier_to_attr;
QuantMethod quantization_method =
tensorflow::quantization::QuantizationMethod::METHOD_STATIC_RANGE_INT8;
auto res = FillAttributesForUniformRequantizeOp(
test_peer.rewriter_, op, identifier_to_attr, quantization_method,
true);
ASSERT_TRUE(succeeded(res));
ASSERT_EQ(2147483647, op.getInputQuantizationMaxValAttr().getInt());
ASSERT_EQ(-2147483648, op.getInputQuantizationMinValAttr().getInt());
ASSERT_EQ(127, op.getOutputQuantizationMaxValAttr().getInt());
ASSERT_EQ(-128, op.getOutputQuantizationMinValAttr().getInt());
ASSERT_EQ(3, op.getInputQuantizationAxisAttr().getInt());
ASSERT_EQ(-1, op.getOutputQuantizationAxisAttr().getInt());
}
TEST_F(TfToUniformAttributeUtilsTest,
UniformQuantizedRequantizeOpAttributes_OutputPerChannel) {
TfToUniformAttributeUtilsTestPeer test_peer(&ctx_);
constexpr absl::string_view kRequantOpExpr =
R"mlir(
%0 = "tf.Const"() {value = #tf_type<tensor_proto : "0x746674656"> : tensor<1x3x2x2x!tf_type.qint32>, quantization_axis = 3} : () -> tensor<1x3x2x2x!tf_type.qint32>
%1 = "tf.Const"() {value = dense<1.0> : tensor<2xf32>} : () -> tensor<2xf32>
%2 = "tf.Const"() {value = dense<2> : tensor<2xi32>} : () -> tensor<2xi32>
%3 = "tf.Const"() {value = dense<1.0> : tensor<2xf32>} : () -> tensor<2xf32>
%4 = "tf.Const"() {value = dense<0> : tensor<2xi32>} : () -> tensor<2xi32>
%5 = "tf.UniformRequantize"(%0, %1, %2, %3, %4) {device = "", input_quantization_axis = 3 : i64, input_quantization_max_val = 127 : i64, input_quantization_min_val = -127 : i64, output_quantization_axis = 1 : i64, output_quantization_max_val = 127 : i64, output_quantization_min_val = -127 : i64} : (tensor<1x3x2x2x!tf_type.qint32>, tensor<2xf32>, tensor<2xi32>, tensor<2xf32>, tensor<2xi32>) -> tensor<1x3x2x2x!tf_type.qint8>
)mlir";
Block block{};
TF::UniformRequantizeOp op =
ParseUniformRequantizedOp(kRequantOpExpr, block, ctx_);
llvm::StringMap<Attribute> identifier_to_attr;
QuantMethod quantization_method =
tensorflow::quantization::QuantizationMethod::METHOD_STATIC_RANGE_INT8;
auto res = FillAttributesForUniformRequantizeOp(
test_peer.rewriter_, op, identifier_to_attr, quantization_method,
true);
ASSERT_TRUE(succeeded(res));
ASSERT_EQ(2147483647, op.getInputQuantizationMaxValAttr().getInt());
ASSERT_EQ(-2147483648, op.getInputQuantizationMinValAttr().getInt());
ASSERT_EQ(127, op.getOutputQuantizationMaxValAttr().getInt());
ASSERT_EQ(-128, op.getOutputQuantizationMinValAttr().getInt());
ASSERT_EQ(3, op.getInputQuantizationAxisAttr().getInt());
ASSERT_EQ(3, op.getOutputQuantizationAxisAttr().getInt());
}
TEST_F(TfToUniformAttributeUtilsTest,
UniformQuantizedRequantizeOpAttributes_DisablePerChannelQuantization) {
TfToUniformAttributeUtilsTestPeer test_peer(&ctx_);
constexpr absl::string_view kRequantOpExpr =
R"mlir(
%0 = "tf.Const"() {value = #tf_type<tensor_proto : "0x746674656"> : tensor<1x3x2x2x!tf_type.qint32>, quantization_axis = 3} : () -> tensor<1x3x2x2x!tf_type.qint32>
%1 = "tf.Const"() {value = dense<1.0> : tensor<2xf32>} : () -> tensor<2xf32>
%2 = "tf.Const"() {value = dense<2> : tensor<2xi32>} : () -> tensor<2xi32>
%3 = "tf.Const"() {value = dense<1.0> : tensor<f32>} : () -> tensor<f32>
%4 = "tf.Const"() {value = dense<0> : tensor<i32>} : () -> tensor<i32>
%5 = "tf.UniformRequantize"(%0, %1, %2, %3, %4) {device = "", input_quantization_axis = 3 : i64, input_quantization_max_val = 127 : i64, input_quantization_min_val = -127 : i64, output_quantization_axis = -1 : i64, output_quantization_max_val = 127 : i64, output_quantization_min_val = -127 : i64} : (tensor<1x3x2x2x!tf_type.qint32>, tensor<2xf32>, tensor<2xi32>, tensor<f32>, tensor<i32>) -> tensor<1x3x2x2x!tf_type.qint8>
)mlir";
Block block{};
TF::UniformRequantizeOp op =
ParseUniformRequantizedOp(kRequantOpExpr, block, ctx_);
llvm::StringMap<Attribute> identifier_to_attr;
QuantMethod quantization_method =
tensorflow::quantization::QuantizationMethod::METHOD_STATIC_RANGE_INT8;
auto res = FillAttributesForUniformRequantizeOp(
test_peer.rewriter_, op, identifier_to_attr, quantization_method,
false);
ASSERT_TRUE(succeeded(res));
ASSERT_EQ(2147483647, op.getInputQuantizationMaxValAttr().getInt());
ASSERT_EQ(-2147483648, op.getInputQuantizationMinValAttr().getInt());
ASSERT_EQ(127, op.getOutputQuantizationMaxValAttr().getInt());
ASSERT_EQ(-128, op.getOutputQuantizationMinValAttr().getInt());
ASSERT_EQ(-1, op.getInputQuantizationAxisAttr().getInt());
ASSERT_EQ(-1, op.getOutputQuantizationAxisAttr().getInt());
}
}
} |
1,217 | cpp | tensorflow/tensorflow | tf_to_xla_attribute_utils | tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_xla_attribute_utils.cc | tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_xla_attribute_utils_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_UTILS_TF_TO_XLA_ATTRIBUTE_UTILS_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_UTILS_TF_TO_XLA_ATTRIBUTE_UTILS_H_
#include "mlir/IR/Builders.h"
namespace mlir::quant {
Value CalculatePaddingAndPadIfNeeded(OpBuilder &builder, Location loc,
Value input, Value filter,
int8_t input_zp_value, ArrayAttr strides,
ArrayAttr dilations,
StringAttr conv_padding,
ArrayAttr explicit_paddings,
Value &padding, int num_dims = 4);
Value PackOperand(OpBuilder &builder, Location loc, Value value, int pack_dim);
}
#endif
#include <algorithm>
#include <numeric>
#include <string>
#include "absl/algorithm/container.h"
#include "absl/strings/str_format.h"
#include "llvm/ADT/ArrayRef.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/lite/core/c/builtin_op_data.h"
#include "tensorflow/compiler/mlir/lite/kernels/padding.h"
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/cc/constant_fold.h"
#include "xla/xla_data.pb.h"
namespace mlir::quant {
namespace {
Value GetDimValue(OpBuilder &builder, Location loc, Value shape_value,
int32_t dim) {
Type attribute_type = builder.getI64Type();
return builder.create<TF::StridedSliceOp>(
loc,
RankedTensorType::get(
{}, mlir::cast<ShapedType>(shape_value.getType()).getElementType()),
shape_value,
Create1DConstValue<int32_t>(builder, loc, {dim}),
Create1DConstValue<int32_t>(builder, loc, {dim + 1}),
Create1DConstValue<int32_t>(builder, loc, {1}),
builder.getIntegerAttr(attribute_type, 0),
builder.getIntegerAttr(attribute_type, 0),
builder.getIntegerAttr(attribute_type, 0),
builder.getIntegerAttr(attribute_type, 0),
builder.getIntegerAttr(attribute_type, 1));
}
void GetSamePaddingValues(OpBuilder &builder, Location loc, Value input_size,
int64_t filter_sz, int64_t dilation_rate,
int64_t stride, Value &padding_low,
Value &padding_high) {
Value zero = CreateScalarConstValue<int32_t>(builder, loc, 0);
Value one = CreateScalarConstValue<int32_t>(builder, loc, 1);
Value two = CreateScalarConstValue<int32_t>(builder, loc, 2);
Value filter_size = CreateScalarConstValue<int32_t>(builder, loc, filter_sz);
Type int32_scalar_type = zero.getType();
auto scalar_add = [&](Value lhs, Value rhs) {
return builder.create<TF::AddOp>(loc, int32_scalar_type, lhs, rhs);
};
auto scalar_mul = [&](Value lhs, Value rhs) {
return builder.create<TF::MulOp>(loc, int32_scalar_type, lhs, rhs);
};
auto scalar_sub = [&](Value lhs, Value rhs) {
return builder.create<TF::SubOp>(loc, int32_scalar_type, lhs, rhs);
};
auto scalar_div = [&](Value lhs, Value rhs) {
return builder.create<TF::DivOp>(loc, int32_scalar_type, lhs, rhs);
};
Value stride_value = CreateScalarConstValue<int32_t>(builder, loc, stride);
Value dilation_rate_value =
CreateScalarConstValue<int32_t>(builder, loc, dilation_rate);
Value effective_filter_size_op = scalar_add(
scalar_mul(dilation_rate_value, scalar_sub(filter_size, one)), one);
Value output_size = scalar_div(
scalar_add(input_size, scalar_sub(stride_value, one)), stride_value);
Value padding_needed = scalar_sub(
scalar_add(effective_filter_size_op,
scalar_mul(stride_value, scalar_sub(output_size, one))),
input_size);
padding_needed = builder.create<TF::MaximumOp>(loc, padding_needed, zero);
padding_low = scalar_div(padding_needed, two);
padding_high = scalar_sub(padding_needed, padding_low);
}
Value PadForDynamicShapedInputSamePadding(
OpBuilder &builder, Location loc, Value input, Value filter,
int8_t input_zp_value, ArrayAttr strides, ArrayAttr dilations,
StringAttr conv_padding, Value &padding, int num_dims) {
Value zero_rank1 = CreateConstValue<int32_t>(builder, loc, {1}, {0});
SmallVector<Value> temp_padding_values{zero_rank1, zero_rank1};
auto reshape_op = [&](Value value, const SmallVector<int64_t> &shape) {
const int64_t rank = shape.size();
return builder.create<TF::ReshapeOp>(
loc, RankedTensorType::get(shape, builder.getI32Type()), value,
CreateConstValue<int64_t>(builder, loc, {rank}, shape));
};
ShapedType filter_shape = mlir::cast<ShapedType>(filter.getType());
Value input_shape_value = builder.create<TF::ShapeOp>(
loc, RankedTensorType::get({num_dims}, builder.getI32Type()), input);
auto scalar_to_rank1 = [&](Value value) { return reshape_op(value, {1}); };
for (int i : llvm::seq<int>(1, num_dims - 1)) {
Value input_size_i = GetDimValue(builder, loc, input_shape_value, i);
const int stride_i = mlir::cast<IntegerAttr>(strides[i]).getInt();
const int dilation_i = mlir::cast<IntegerAttr>(dilations[i]).getInt();
const int filter_i = filter_shape.getDimSize(i - 1);
Value pad_i_low, pad_i_high;
GetSamePaddingValues(builder, loc, input_size_i, filter_i, dilation_i,
stride_i, pad_i_low, pad_i_high);
temp_padding_values.push_back(scalar_to_rank1(pad_i_low));
temp_padding_values.push_back(scalar_to_rank1(pad_i_high));
}
temp_padding_values.push_back(zero_rank1);
temp_padding_values.push_back(zero_rank1);
padding = CreateConstValue<int32_t>(
builder, loc, {num_dims - 2, 2},
SmallVector<int32_t>(2 * (num_dims - 2), 0));
Value zero = CreateScalarConstValue(builder, loc, 0);
Value temp_padding_rank1 = builder.create<TF::ConcatOp>(
loc, RankedTensorType::get({2 * num_dims}, builder.getI32Type()), zero,
temp_padding_values);
Value temp_padding = reshape_op(temp_padding_rank1, {num_dims, 2});
return builder.create<TF::PadV2Op>(
loc, input.getType(), input, temp_padding,
CreateScalarConstValue<int8_t>(builder, loc, input_zp_value));
}
}
Value CalculatePaddingAndPadIfNeeded(OpBuilder &builder, Location loc,
Value input, Value filter,
int8_t input_zp_value, ArrayAttr strides,
ArrayAttr dilations,
StringAttr conv_padding,
ArrayAttr explicit_paddings,
Value &padding, int num_dims) {
ShapedType input_shape = mlir::cast<ShapedType>(input.getType());
SmallVector<int64_t> spatial_dims(num_dims - 2);
absl::c_iota(spatial_dims, 1);
bool has_dynamic_spatial_dim = absl::c_any_of(
spatial_dims,
[&input_shape](int64_t dim) { return input_shape.isDynamicDim(dim); });
if (conv_padding.strref() == "SAME" && has_dynamic_spatial_dim) {
return PadForDynamicShapedInputSamePadding(
builder, loc, input, filter, input_zp_value, strides, dilations,
conv_padding, padding, num_dims);
}
ShapedType filter_shape = mlir::cast<ShapedType>(filter.getType());
SmallVector<int32_t> padding_values(2 * num_dims, 0);
if (conv_padding.strref() == "EXPLICIT") {
if (explicit_paddings.size() != 2 * num_dims) {
emitError(loc,
absl::StrFormat(
"explicit_paddings are expected to be %d-element arrays",
2 * num_dims));
return {};
}
for (int i : spatial_dims) {
padding_values[2 * i] =
mlir::cast<IntegerAttr>(explicit_paddings[2 * i]).getInt();
padding_values[2 * i + 1] =
mlir::cast<IntegerAttr>(explicit_paddings[2 * i + 1]).getInt();
}
} else if (conv_padding.strref() == "SAME") {
for (int i : spatial_dims) {
int input_size = input_shape.getDimSize(i);
int filter_size = filter_shape.getDimSize(i - 1);
int stride_i = mlir::cast<IntegerAttr>(strides[i]).getInt();
int dilation_i = mlir::cast<IntegerAttr>(dilations[i]).getInt();
int out_size = tflite_migration::ComputeOutSize(
kTfLitePaddingSame, input_size, filter_size, stride_i, dilation_i);
int offset = 0;
int padding_before = tflite_migration::ComputePaddingWithOffset(
stride_i, dilation_i, input_size, filter_size, out_size, &offset);
int padding_after = padding_before + offset;
padding_values[2 * i] = padding_before;
padding_values[2 * i + 1] = padding_after;
}
}
if (input_zp_value == 0 ||
absl::c_all_of(padding_values, [](int v) { return v == 0; })) {
padding = CreateConstValue<int32_t>(
builder, loc, {num_dims - 2, 2},
SmallVector<int32_t>(padding_values.begin() + 2,
padding_values.end() - 2));
return input;
}
padding =
CreateConstValue<int32_t>(builder, loc, {num_dims - 2, 2},
SmallVector<int32_t>(2 * (num_dims - 2), 0));
Value temp_padding =
CreateConstValue<int32_t>(builder, loc, {num_dims, 2}, padding_values);
SmallVector<int64_t> output_shape(input_shape.getShape().begin(),
input_shape.getShape().end());
for (int i : spatial_dims) {
output_shape[i] += padding_values[2 * i] + padding_values[2 * i + 1];
}
return builder.create<TF::PadV2Op>(
loc, RankedTensorType::get(output_shape, builder.getI8Type()), input,
temp_padding,
CreateScalarConstValue<int8_t>(builder, loc, input_zp_value));
}
Value PackOperand(OpBuilder &builder, Location loc, Value value, int pack_dim) {
ShapedType value_type = mlir::cast<ShapedType>(value.getType());
const int rank = value_type.getRank();
SmallVector<int64_t> packed_shape(value_type.getShape().begin(),
value_type.getShape().end());
RankedTensorType shape_type =
RankedTensorType::get({rank}, builder.getI64Type());
Value shape_value = builder.create<TF::ShapeOp>(loc, shape_type, value);
if (packed_shape[pack_dim] % 2 != 0) {
packed_shape[pack_dim] += 1;
SmallVector<int32_t> padding(rank * 2, 0);
padding[pack_dim * 2 + 1] = 1;
Value padding_value =
CreateConstValue<int32_t>(builder, loc, {rank, 2}, padding);
value = builder.create<TF::PadV2Op>(
loc, RankedTensorType::get(packed_shape, builder.getI8Type()), value,
padding_value, CreateScalarConstValue<int8_t>(builder, loc, 0));
SmallVector<int64_t> shape_add(rank, 0);
shape_add[pack_dim] = 1;
shape_value = builder.create<TF::AddOp>(
loc, shape_type, shape_value,
CreateConstValue<int64_t>(builder, loc, {rank}, shape_add));
}
packed_shape[pack_dim] /= 2;
SmallVector<int64_t> divisor(rank, 1);
divisor[pack_dim] = 2;
RankedTensorType packed_output_type =
RankedTensorType::get(packed_shape, builder.getI8Type());
Value packed_shape_value = builder.create<TF::DivOp>(
loc, shape_type, shape_value,
CreateConstValue<int64_t>(builder, loc, {rank}, divisor));
Value packed_low_begin_value = CreateConstValue<int64_t>(
builder, loc, {rank}, SmallVector<int64_t>(rank, 0));
Value packed_low_value =
builder.create<TF::SliceOp>(loc, packed_output_type, value,
packed_low_begin_value, packed_shape_value);
packed_low_value = builder.create<TF::BitwiseAndOp>(
loc, packed_output_type, packed_low_value,
CreateScalarConstValue<int8_t>(builder, loc, 0x0F));
SmallVector<int64_t> packed_high_begin(rank, 0);
packed_high_begin[pack_dim] = packed_shape[pack_dim];
Value packed_high_begin_value =
CreateConstValue<int64_t>(builder, loc, {rank}, packed_high_begin);
Value packed_high_value =
builder.create<TF::SliceOp>(loc, packed_output_type, value,
packed_high_begin_value, packed_shape_value);
packed_high_value = builder.create<TF::LeftShiftOp>(
loc, packed_output_type, packed_high_value,
CreateScalarConstValue<int8_t>(builder, loc, 4));
Operation *packed = builder.create<TF::BitwiseOrOp>(
loc, packed_output_type, packed_low_value, packed_high_value);
return ConstantFoldOpIfPossible(packed).front();
}
} | #include "tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_xla_attribute_utils.h"
#include <cstdint>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Debug.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
namespace mlir::quant {
namespace {
void PackOperandTestHelper(
const llvm::SmallVector<int64_t>& unpacked_shape,
const llvm::SmallVector<int8_t>& unpacked_values, int pack_dim,
const llvm::SmallVector<int64_t>& expected_packed_shape,
const llvm::SmallVector<int8_t>& expected_packed_values) {
MLIRContext context;
OwningOpRef<ModuleOp> module(ModuleOp::create(UnknownLoc::get(&context)));
OpBuilder builder(&module->getBodyRegion());
context.loadDialect<TF::TensorFlowDialect>();
Value value = CreateConstValue<int8_t>(builder, module->getLoc(),
unpacked_shape, unpacked_values);
Value packed_value = PackOperand(builder, module->getLoc(), value, pack_dim);
DenseIntElementsAttr packed_value_attr;
ASSERT_TRUE(matchPattern(packed_value, m_Constant(&packed_value_attr)));
ShapedType packed_shape_type =
mlir::dyn_cast<ShapedType>(packed_value.getType());
llvm::SmallVector<int64_t> packed_shape(packed_shape_type.getShape().begin(),
packed_shape_type.getShape().end());
EXPECT_THAT(packed_shape, testing::ElementsAreArray(expected_packed_shape));
llvm::SmallVector<int8_t> packed_value_vector(
packed_value_attr.getValues<int8_t>());
EXPECT_THAT(packed_value_vector,
testing::ElementsAreArray(expected_packed_values));
}
TEST(TfToXlaAttributeUtilsTest, PackOperandPackDimSizeEven) {
PackOperandTestHelper({2, 2},
{0x01, 0x02, 0x03, 0x04},
0,
{1, 2},
{0x31, 0x42});
}
TEST(TfToXlaAttributeUtilsTest, PackOperandPackDimSizeOdd) {
PackOperandTestHelper(
{2, 3},
{0x01, 0x02, 0x03, 0x04, 0x05, 0x06},
1,
{2, 2},
{0x31, 0x02, 0x64, 0x05});
}
}
} |
1,218 | cpp | tensorflow/tensorflow | mlir_dump | tensorflow/compiler/mlir/quantization/tensorflow/debugging/mlir_dump.cc | tensorflow/compiler/mlir/quantization/tensorflow/debugging/mlir_dump_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_DEBUGGING_MLIR_DUMP_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_DEBUGGING_MLIR_DUMP_H_
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "mlir/Pass/PassManager.h"
namespace tensorflow {
namespace quantization {
void EnableIrPrinting(mlir::PassManager &pm,
absl::string_view file_name_prefix);
absl::Status MaybeEnableIrPrinting(mlir::PassManager &pm,
absl::string_view file_name_prefix);
}
}
#endif
#include "tensorflow/compiler/mlir/quantization/tensorflow/debugging/mlir_dump.h"
#include <cstdint>
#include <cstdlib>
#include <memory>
#include <string>
#include <utility>
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/file_system.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/stringpiece.h"
namespace tensorflow {
namespace quantization {
namespace {
absl::StatusOr<std::string> GetMlirDumpDir() {
auto dump_dir = std::string(
absl::NullSafeStringView(std::getenv("TF_QUANT_MLIR_DUMP_PREFIX")));
if (dump_dir.empty()) {
return absl::FailedPreconditionError(
"Environment variable not set: TF_QUANT_MLIR_DUMP_PREFIX, "
"IR dump file for TF quantization is not created.");
}
if (absl::EqualsIgnoreCase(dump_dir, "sponge")) {
if (!tsl::io::GetTestUndeclaredOutputsDir(&dump_dir)) {
return absl::FailedPreconditionError(
"Environment variable TF_QUANT_MLIR_DUMP_PREFIX=sponge but "
"TEST_UNDECLARED_OUTPUT_DIRS not set.");
}
}
return dump_dir;
}
class WritableFileWrapper : public llvm::raw_ostream {
public:
~WritableFileWrapper() override { flush(); }
static absl::StatusOr<std::unique_ptr<WritableFileWrapper>> Create(
const std::string& filepath) {
std::unique_ptr<tsl::WritableFile> file;
TF_RETURN_IF_ERROR(tsl::Env::Default()->NewWritableFile(filepath, &file));
return absl::WrapUnique(new WritableFileWrapper(std::move(file)));
}
private:
explicit WritableFileWrapper(std::unique_ptr<tsl::WritableFile> file)
: file_(std::move(file)) {
SetBuffered();
}
uint64_t current_pos() const override {
int64_t position;
if (file_->Tell(&position).ok()) {
return position;
} else {
return -1;
}
}
void write_impl(const char* ptr, size_t size) override {
if (file_ && !file_->Append(tsl::StringPiece(ptr, size)).ok()) {
file_ = nullptr;
}
}
std::unique_ptr<tsl::WritableFile> file_;
};
absl::StatusOr<std::unique_ptr<llvm::raw_ostream>> CreateMlirDumpFile(
const absl::string_view dump_file_name) {
const absl::StatusOr<std::string> dump_dir = GetMlirDumpDir();
if (!dump_dir.ok()) {
return dump_dir.status();
}
auto* env = tsl::Env::Default();
TF_RETURN_IF_ERROR(env->RecursivelyCreateDir(*dump_dir));
const std::string dump_file_path =
tsl::io::JoinPath(*dump_dir, dump_file_name);
TF_ASSIGN_OR_RETURN(std::unique_ptr<llvm::raw_ostream> file,
WritableFileWrapper::Create(dump_file_path));
LOG(INFO) << "IR dump file created: " << dump_file_path;
return file;
}
class PrinterConfig : public mlir::PassManager::IRPrinterConfig {
public:
explicit PrinterConfig(
absl::string_view dump_file_prefix, bool print_module_scope = false,
bool print_after_only_on_change = true,
mlir::OpPrintingFlags op_printing_flags = mlir::OpPrintingFlags())
: mlir::PassManager::IRPrinterConfig(
print_module_scope, print_after_only_on_change,
false, op_printing_flags),
mlir_pass_count_(1),
dump_file_prefix_(dump_file_prefix) {}
void printBeforeIfEnabled(mlir::Pass* pass, mlir::Operation* op,
PrintCallbackFn print_callback) override {
Dump(pass, print_callback, true);
}
void printAfterIfEnabled(mlir::Pass* pass, mlir::Operation* op,
PrintCallbackFn print_callback) override {
Dump(pass, print_callback, false);
}
private:
int64_t mlir_pass_count_;
absl::string_view dump_file_prefix_;
llvm::DenseMap<mlir::Pass*, std::unique_ptr<llvm::raw_ostream>>
pass_to_dump_file_before_map_;
llvm::DenseMap<mlir::Pass*, std::unique_ptr<llvm::raw_ostream>>
pass_to_dump_file_after_map_;
llvm::DenseMap<mlir::Pass*, int64_t> pass_to_number_map_;
int64_t GetPassNumber(mlir::Pass* pass) {
if (!pass_to_number_map_.contains(pass)) {
pass_to_number_map_[pass] = mlir_pass_count_++;
}
return pass_to_number_map_[pass];
}
void Dump(mlir::Pass* pass, PrintCallbackFn print_callback, bool is_before) {
auto& pass_to_dump_file_map = is_before ? pass_to_dump_file_before_map_
: pass_to_dump_file_after_map_;
if (!pass_to_dump_file_map.contains(pass)) {
std::string filename = llvm::formatv(
"{0}_{1,0+4}_{2}_{3}.mlir", dump_file_prefix_, GetPassNumber(pass),
pass->getName().str(), is_before ? "before" : "after");
absl::StatusOr<std::unique_ptr<llvm::raw_ostream>> dump_file =
CreateMlirDumpFile(filename);
if (!dump_file.ok()) {
LOG(WARNING) << "Failed to dump MLIR module to " << filename;
return;
}
pass_to_dump_file_map[pass] = std::move(*dump_file);
}
return print_callback(*(pass_to_dump_file_map[pass]));
}
};
}
void EnableIrPrinting(mlir::PassManager& pm,
absl::string_view file_name_prefix) {
mlir::OpPrintingFlags flag{};
flag.useLocalScope().elideLargeElementsAttrs().enableDebugInfo();
if (pm.getContext()->isMultithreadingEnabled()) {
pm.getContext()->disableMultithreading();
}
pm.enableIRPrinting(std::make_unique<PrinterConfig>(
file_name_prefix, false,
true, flag));
}
absl::Status MaybeEnableIrPrinting(mlir::PassManager& pm,
absl::string_view file_name_prefix) {
if (!VLOG_IS_ON(1)) {
LOG(INFO) << "Verbosity level too low to enable IR printing.";
return absl::OkStatus();
}
EnableIrPrinting(pm, file_name_prefix);
LOG(INFO) << "IR dump for TensorFlow quantization pipeline enabled.";
return absl::OkStatus();
}
}
} | #include "tensorflow/compiler/mlir/quantization/tensorflow/debugging/mlir_dump.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/cleanup/cleanup.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinDialect.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Support/TypeID.h"
#include "mlir/Transforms/Passes.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/path.h"
#include "tsl/platform/test.h"
namespace tensorflow {
namespace quantization {
namespace mlir_dump_test {
class NoOpPass
: public mlir::PassWrapper<NoOpPass, mlir::OperationPass<mlir::ModuleOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(NoOpPass)
NoOpPass() = default;
llvm::StringRef getArgument() const final { return "no-op-pass"; }
void runOnOperation() override {
}
};
std::unique_ptr<mlir::OperationPass<mlir::ModuleOp>> CreateNoOpPass() {
return std::make_unique<NoOpPass>();
}
class ParentPass
: public mlir::PassWrapper<ParentPass,
mlir::OperationPass<mlir::ModuleOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(ParentPass)
ParentPass() = default;
llvm::StringRef getArgument() const final { return "parent-pass"; }
void runOnOperation() override {
mlir::MLIRContext* ctx = &getContext();
mlir::ModuleOp module_op = getOperation();
mlir::PassManager pm(ctx);
pm.addPass(CreateNoOpPass());
EnableIrPrinting(pm, "dump2");
if (failed(pm.run(module_op))) {
signalPassFailure();
}
}
};
std::unique_ptr<mlir::OperationPass<mlir::ModuleOp>> CreateParentPass() {
return std::make_unique<ParentPass>();
}
}
namespace {
using namespace tensorflow::quantization::mlir_dump_test;
class EnableIrPrintingTest : public ::testing::Test {
protected:
EnableIrPrintingTest() : env_(tsl::Env::Default()) {
if (!tsl::io::GetTestUndeclaredOutputsDir(&test_dir_)) {
test_dir_ = tsl::testing::TmpDir();
}
}
void SetUp() override {
tsl::setenv("TF_QUANT_MLIR_DUMP_PREFIX", test_dir_.c_str(), 1);
mlir::DialectRegistry dialects;
dialects.insert<mlir::BuiltinDialect, mlir::func::FuncDialect,
mlir::stablehlo::StablehloDialect>();
ctx_ = std::make_unique<mlir::MLIRContext>(dialects);
ctx_->loadAllAvailableDialects();
}
void TearDown() override {
std::vector<std::string> files;
TF_ASSERT_OK(
env_->GetMatchingPaths(tsl::io::JoinPath(test_dir_, "*"), &files));
for (const std::string& file : files) {
TF_ASSERT_OK(env_->DeleteFile(file));
}
}
tsl::Env* env_;
std::string test_dir_;
std::unique_ptr<mlir::MLIRContext> ctx_;
};
TEST_F(EnableIrPrintingTest, PassSuccessfullyRuns) {
mlir::PassManager pm = {ctx_.get()};
pm.addPass(CreateNoOpPass());
pm.addNestedPass<mlir::func::FuncOp>(mlir::createCanonicalizerPass());
pm.addNestedPass<mlir::func::FuncOp>(mlir::createCanonicalizerPass());
EnableIrPrinting(pm, "dump");
constexpr absl::string_view program = R"mlir(
module{
func.func @main(%arg0: tensor<10xf32>) -> tensor<10xf32> {
return %arg0 : tensor<10xf32>
}
func.func @func1(%arg0: tensor<10xf32>, %arg1: tensor<10xf32>) -> tensor<10xf32> {
%0 = stablehlo.add %arg0, %arg1 : tensor<10xf32>
%1 = stablehlo.add %arg0, %arg1 : tensor<10xf32>
return %0 : tensor<10xf32>
}
})mlir";
auto module_op = mlir::parseSourceString<mlir::ModuleOp>(program, ctx_.get());
const mlir::LogicalResult result = pm.run(module_op.get());
EXPECT_FALSE(failed(result));
TF_EXPECT_OK(tsl::Env::Default()->FileExists(
tsl::io::JoinPath(test_dir_,
"dump_0001_tensorflow::quantization::mlir_dump_test"
"::NoOpPass_before.mlir")));
TF_EXPECT_OK(tsl::Env::Default()->FileExists(
tsl::io::JoinPath(test_dir_, "dump_0002_Canonicalizer_before.mlir")));
TF_EXPECT_OK(tsl::Env::Default()->FileExists(
tsl::io::JoinPath(test_dir_, "dump_0002_Canonicalizer_after.mlir")));
TF_EXPECT_OK(tsl::Env::Default()->FileExists(
tsl::io::JoinPath(test_dir_, "dump_0003_Canonicalizer_before.mlir")));
}
TEST_F(EnableIrPrintingTest, NestedPassSuccessfullyRuns) {
mlir::MLIRContext ctx{};
mlir::PassManager pm = {&ctx};
pm.addPass(CreateParentPass());
EnableIrPrinting(pm, "dump");
mlir::OpBuilder builder(&ctx);
auto module_op = builder.create<mlir::ModuleOp>(builder.getUnknownLoc());
const absl::Cleanup module_op_cleanup = [module_op] { module_op->destroy(); };
const mlir::LogicalResult result = pm.run(module_op);
EXPECT_FALSE(failed(result));
TF_EXPECT_OK(tsl::Env::Default()->FileExists(
tsl::io::JoinPath(test_dir_,
"dump_0001_tensorflow::quantization::mlir_dump_test"
"::ParentPass_before.mlir")));
TF_EXPECT_OK(tsl::Env::Default()->FileExists(
tsl::io::JoinPath(test_dir_,
"dump2_0001_tensorflow::quantization::mlir_dump_test"
"::NoOpPass_before.mlir")));
}
}
}
} |
1,219 | cpp | tensorflow/tensorflow | tf_quantize_op | tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.cc | tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_OPS_TF_QUANTIZE_OP_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_OPS_TF_QUANTIZE_OP_H_
#include <optional>
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Traits.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace mlir {
namespace quant {
std::optional<TF::PartitionedCallOp> ApplyUniformQuantization(
PatternRewriter& rewriter, TF::ConstOp op,
tensorflow::quantization::QuantizationComponentSpec& weight_spec);
}
}
#endif
#include "tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.h"
#include <functional>
#include <optional>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/types/optional.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Quant/QuantTypes.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Matchers.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_quantize_op_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace mlir {
namespace quant {
namespace {
constexpr StringRef kDequantizeFunctionName = "composite_dequantize";
constexpr StringRef kUniformQuantizationFunctionName = "uniform";
func::FuncOp PrepareFunctionRegister(PatternRewriter& rewriter, Value input_val,
ShapedType result_type,
StringRef func_name,
Value& func_input_arg) {
Operation* input_op = input_val.getDefiningOp();
Operation* insertion_point = input_op->getParentOfType<func::FuncOp>();
if (!insertion_point) insertion_point = input_op->getParentOfType<ModuleOp>();
rewriter.setInsertionPointAfter(insertion_point);
UnrankedTensorType create_unknown_input_shape =
CreateUnknownShapeFromElementType(input_val.getType());
UnrankedTensorType create_unknown_output_shape =
CreateUnknownShapeFromElementType(result_type);
FunctionType func_type =
FunctionType::get(rewriter.getContext(), {create_unknown_input_shape},
{create_unknown_output_shape});
func::FuncOp quantization_func =
rewriter.create<func::FuncOp>(input_op->getLoc(), func_name, func_type);
OpBuilder::InsertionGuard guard = OpBuilder::InsertionGuard(rewriter);
ArrayRef<Type> inputs = quantization_func.getFunctionType().getInputs();
Block* block = rewriter.createBlock(
&quantization_func.getBody(), quantization_func.begin(), inputs,
SmallVector<Location>(inputs.size(), quantization_func.getLoc()));
func_input_arg = block->getArgument(0);
return quantization_func;
}
TF::PartitionedCallOp FinalizeFunctionRegister(
PatternRewriter& rewriter, Value input, Value output,
func::FuncOp& quantization_func, Operation* quantized_op,
StringRef func_name, IRRewriter::InsertPoint original_point,
Type quantize_result_type) {
rewriter.create<func::ReturnOp>(input.getLoc(), ArrayRef<Value>({output}));
quantization_func.setVisibility(func::FuncOp::Visibility::Private);
SymbolTable symbol_table(quantized_op->getParentOfType<ModuleOp>());
symbol_table.insert(quantization_func);
FlatSymbolRefAttr func_name_attr =
FlatSymbolRefAttr::get(rewriter.getStringAttr(func_name));
rewriter.restoreInsertionPoint(original_point);
auto quantize_call = rewriter.create<TF::PartitionedCallOp>(
quantized_op->getLoc(), quantize_result_type, input, func_name_attr,
"", "", "");
return quantize_call;
}
std::optional<TF::PartitionedCallOp> RegisterOperationsInFuncOp(
StringRef func_name, PatternRewriter& rewriter, QuantizedType quant_type,
Value input_val, ShapedType result_type,
std::function<Operation*(PatternRewriter&, Operation*, Value, ShapedType,
QuantizedType)>
quantization_operations_func) {
Operation* input_op = input_val.getDefiningOp();
auto original_point = rewriter.saveInsertionPoint();
auto unique_func_name = func_name.str();
SymbolTable symbol_table(input_op->getParentOfType<ModuleOp>());
while (symbol_table.lookup(unique_func_name)) {
absl::StrAppend(&unique_func_name, "_");
}
Value func_input_arg;
func::FuncOp func_op = PrepareFunctionRegister(
rewriter, input_val, result_type, unique_func_name, func_input_arg);
Operation* last_op_in_func =
quantization_operations_func(rewriter, func_op.getOperation(),
func_input_arg, result_type, quant_type);
auto end_call_op = FinalizeFunctionRegister(
rewriter, input_val, last_op_in_func->getResult(0), func_op, input_op,
unique_func_name, original_point, result_type);
return end_call_op;
}
QuantizedType CalculateUniformQuantParams(
PatternRewriter& rewriter, TF::ConstOp op,
tensorflow::quantization::QuantizationComponentSpec& weight_spec) {
const bool kIsNarrowRange = true;
const bool kIsSigned = true;
const int kBitWidth = 8;
DenseFPElementsAttr attr;
if (!matchPattern(op->getResult(0), m_Constant(&attr))) return nullptr;
QuantizedType quant_type = mlir::dyn_cast<quant::QuantizedType>(
quant::GetUniformQuantizedTypeForWeight(
attr, kIsNarrowRange && kIsSigned, kBitWidth, kIsSigned,
kIsNarrowRange, false));
return quant_type;
}
std::optional<Value> AddUniformQuantizeOps(PatternRewriter& rewriter,
TF::ConstOp op,
QuantizedType quant_type) {
DenseFPElementsAttr attr;
if (!matchPattern(op->getResult(0), m_Constant(&attr))) {
return nullptr;
}
Type expressed_type = op.getResult().getType();
Type quantized_type = quant_type.castFromExpressedType(expressed_type);
ShapedType shaped_quantized_type = mlir::cast<ShapedType>(quantized_type);
DenseElementsAttr tensor_proto_attr =
mlir::dyn_cast<DenseElementsAttr>(Quantize(attr, shaped_quantized_type));
if (!tensor_proto_attr) {
return nullptr;
}
Type storage_type =
mlir::cast<QuantizedType>(shaped_quantized_type.getElementType())
.getStorageType();
ShapedType new_type = shaped_quantized_type.clone(storage_type);
rewriter.setInsertionPointAfter(op);
auto const_op =
rewriter.create<TF::ConstOp>(op.getLoc(), new_type, tensor_proto_attr);
auto new_identity_op = rewriter.create<TF::IdentityOp>(
op->getLoc(), const_op.getType(), const_op);
return new_identity_op.getResult();
}
Operation* LogicsForUniformDequanization(PatternRewriter& rewriter,
Operation* func_op, Value input_val,
ShapedType original_input_tensor_type,
QuantizedType quant_type) {
auto loc = input_val.getLoc();
rewriter.setInsertionPointToStart(
&(cast<func::FuncOp>(func_op)).getBody().front());
UnrankedTensorType create_unknown_input_shape =
CreateUnknownShapeFromElementType(original_input_tensor_type);
auto new_cast_op =
rewriter.create<TF::CastOp>(loc, create_unknown_input_shape, input_val);
auto qtype = mlir::dyn_cast<UniformQuantizedType>(quant_type);
TensorType scale_type = RankedTensorType::get({}, rewriter.getF32Type());
Value scale_op = rewriter.create<TF::ConstOp>(
loc, scale_type,
DenseFPElementsAttr::get(scale_type,
{static_cast<float>(qtype.getScale())}));
if (original_input_tensor_type.getElementType().isBF16()) {
scale_op = rewriter.create<TF::CastOp>(
loc, UnrankedTensorType::get(rewriter.getBF16Type()), scale_op);
}
auto mul_op = rewriter.create<TF::MulOp>(loc, new_cast_op.getType(), scale_op,
new_cast_op);
return mul_op;
}
std::optional<TF::PartitionedCallOp> AddUniformDequantizeOps(
PatternRewriter& rewriter, QuantizedType quant_type,
Value val_to_dequantize, ShapedType result_type) {
auto func_name = absl::StrJoin(
{kDequantizeFunctionName, kUniformQuantizationFunctionName}, "_");
std::optional<TF::PartitionedCallOp> dequant_op = RegisterOperationsInFuncOp(
func_name, rewriter, quant_type, val_to_dequantize, result_type,
LogicsForUniformDequanization);
return dequant_op;
}
}
std::optional<TF::PartitionedCallOp> ApplyUniformQuantization(
PatternRewriter& rewriter, TF::ConstOp op,
tensorflow::quantization::QuantizationComponentSpec& weight_spec) {
QuantizedType quant_type =
CalculateUniformQuantParams(rewriter, op, weight_spec);
if (!quant_type) return nullptr;
std::optional<Value> quantized_val =
AddUniformQuantizeOps(rewriter, op, quant_type);
if (!quantized_val.has_value()) return std::nullopt;
std::optional<TF::PartitionedCallOp> dequantized_val =
AddUniformDequantizeOps(rewriter, quant_type, quantized_val.value(),
mlir::cast<ShapedType>(op.getType()));
return dequantized_val;
}
}
} | #include "tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.h"
#include <optional>
#include <gtest/gtest.h>
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Quant/QuantOps.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace mlir::quant {
namespace {
using QuantizationComponentSpec =
tensorflow::quantization::QuantizationComponentSpec;
class EmptyPatternRewriter : public mlir::PatternRewriter {
public:
explicit EmptyPatternRewriter(const OpBuilder& other_builder)
: mlir::PatternRewriter(other_builder) {}
~EmptyPatternRewriter() override = default;
};
TEST(TfQuantOpTest, applyUniformQuantization) {
MLIRContext context;
OwningOpRef<ModuleOp> module(ModuleOp::create(UnknownLoc::get(&context)));
OpBuilder builder(&module->getBodyRegion());
context.loadDialect<TF::TensorFlowDialect, quant::QuantizationDialect,
func::FuncDialect>();
EmptyPatternRewriter pattern_rewriter(builder);
Value value = CreateConstValue<float>(builder, module->getLoc(), {1024, 2},
SmallVector<float>(2048, 0));
QuantizationComponentSpec quant_spec;
quant_spec.set_quantization_component(
QuantizationComponentSpec::COMPONENT_WEIGHT);
quant_spec.set_tensor_type(QuantizationComponentSpec::TENSORTYPE_INT_8);
std::optional<TF::PartitionedCallOp> dequantize_op = ApplyUniformQuantization(
pattern_rewriter, cast<TF::ConstOp>(value.getDefiningOp()), quant_spec);
EXPECT_TRUE(dequantize_op.has_value());
EXPECT_EQ(dequantize_op.value().func().getName().str(),
"composite_dequantize_uniform");
}
}
} |
1,220 | cpp | tensorflow/tensorflow | tf_op_quant_spec | tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.cc | tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_OPS_TF_OP_QUANT_SPEC_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_OPS_TF_OP_QUANT_SPEC_H_
#include <memory>
#include <optional>
#include "mlir/IR/Operation.h"
#include "mlir/IR/Value.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
namespace mlir {
namespace quant {
bool IsOpWithDataMovementTrait(Operation* op);
bool IsOpWithQuantizableTrait(Operation* op);
bool IsOpWithInt8TypeOperand(Operation* op);
bool IsValueWithQuantizablePrecision(Value val);
std::optional<tensorflow::quantization::QuantizationComponentSpec>
GetWeightComponentSpec(
const tensorflow::quantization::QuantizationOptions& quantization_options);
std::unique_ptr<OpQuantSpec> GetTFOpQuantSpec(Operation* op);
std::unique_ptr<OpQuantScaleSpec> GetTfQuantScaleSpec(Operation* op);
}
}
#endif
#include "tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.h"
#include <memory>
#include <optional>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "llvm/Support/Casting.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace mlir {
namespace quant {
bool IsOpWithDataMovementTrait(Operation* op) {
return isa<TF::IdentityOp, TF::CastOp, TF::ReshapeOp, TF::XlaShardingOp,
TF::GatherOp, TF::GatherV2Op, TF::XlaGatherOp, TF::ExpandDimsOp,
TF::SqueezeOp, TF::TransposeOp>(op);
}
bool IsOpWithQuantizableTrait(Operation* op) {
return isa<TF::XlaConvV2Op, TF::XlaDotV2Op, TF::MatMulOp, TF::Conv2DOp,
TF::GatherOp, TF::GatherV2Op, TF::XlaGatherOp,
TF::ResourceGatherOp, TF::DepthwiseConv2dNativeOp, TF::Conv3DOp,
TF::BatchMatMulV2Op, TF::EinsumOp>(op);
}
bool IsOpWithInt8TypeOperand(Operation* op) {
return (isa<TF::XlaConvV2Op, TF::XlaDotV2Op, TF::XlaGatherOp, TF::GatherOp,
TF::GatherV2Op>(op));
}
bool IsValueWithQuantizablePrecision(Value val) {
auto type = mlir::dyn_cast<ShapedType>(val.getType());
if (!type) return false;
if (type.getElementType().isF32() || type.getElementType().isBF16())
return true;
return false;
}
std::optional<tensorflow::quantization::QuantizationComponentSpec>
GetWeightComponentSpec(
const tensorflow::quantization::QuantizationOptions& quantization_options) {
for (auto& cur_spec : quantization_options.quantization_method()
.quantization_component_specs()) {
if (cur_spec.quantization_component() ==
tensorflow::quantization::QuantizationComponentSpec::COMPONENT_WEIGHT)
return cur_spec;
}
return std::nullopt;
}
std::unique_ptr<OpQuantSpec> GetTFOpQuantSpec(Operation* op) {
auto spec = std::make_unique<OpQuantSpec>();
if (auto call_op = dyn_cast<TF::PartitionedCallOp>(op)) {
StringRef function_name =
mlir::cast<FlatSymbolRefAttr>(call_op.getFAttr()).getValue();
if (!function_name.starts_with("composite_")) {
return spec;
}
if (function_name.contains("depthwise_conv2d")) {
spec->coeff_op_quant_dim[1] = 3;
if (function_name.contains("with_bias")) {
spec->biases_params[2] = {{0, 1},
quant::GetUniformQuantizedTypeForBias};
}
} else if (function_name.contains("conv2d")) {
spec->coeff_op_quant_dim[1] = 3;
if (function_name.contains("with_bias")) {
spec->biases_params[2] = {{0, 1},
quant::GetUniformQuantizedTypeForBias};
}
} else if (function_name.contains("matmul")) {
spec->coeff_op_quant_dim[1] = -1;
if (function_name.contains("with_bias") ||
function_name.contains("and_bias")) {
spec->biases_params[2] = {{0, 1},
quant::GetUniformQuantizedTypeForBias};
}
} else if (function_name.contains("einsum")) {
spec->coeff_op_quant_dim[1] = -1;
if (function_name.contains("with_bias")) {
spec->biases_params[2] = {{0, 1},
quant::GetUniformQuantizedTypeForBias};
}
} else if (function_name.contains("conv3d")) {
spec->coeff_op_quant_dim[1] = 4;
if (function_name.contains("with_bias")) {
spec->biases_params[2] = {{0, 1},
quant::GetUniformQuantizedTypeForBias};
}
} else if (function_name.contains("batch_matmul")) {
spec->coeff_op_quant_dim[1] = -1;
if (function_name.contains("with_bias")) {
spec->biases_params[2] = {{0, 1},
quant::GetUniformQuantizedTypeForBias};
}
} else if (function_name.contains("gather")) {
spec->coeff_op_quant_dim[0] = -1;
}
for (auto quantizable_operand : spec->coeff_op_quant_dim) {
spec->quantizable_operands.insert(quantizable_operand.first);
}
}
return spec;
}
std::unique_ptr<OpQuantScaleSpec> GetTfQuantScaleSpec(Operation* op) {
auto scale_spec = std::make_unique<OpQuantScaleSpec>();
if (llvm::isa<
TF::AvgPoolOp,
TF::ConcatOp,
TF::ConcatV2Op,
TF::ExpandDimsOp,
TF::IdentityNOp,
TF::IdentityOp,
TF::MaxPoolOp,
TF::PadV2Op,
TF::RankOp,
TF::ReshapeOp,
TF::SelectOp,
TF::SelectV2Op,
TF::ShapeNOp,
TF::ShapeOp,
TF::SizeOp,
TF::SqueezeOp,
TF::TransposeOp
>(op)) {
scale_spec->has_same_scale_requirement = true;
}
return scale_spec;
}
}
} | #include "tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.h"
#include <gtest/gtest.h>
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
namespace mlir::quant {
namespace {
using QuantizationOptions = tensorflow::quantization::QuantizationOptions;
using QuantizationComponentSpec =
tensorflow::quantization::QuantizationComponentSpec;
TEST(TfOpQuantSpecTest, WeightComponentSpecExist) {
QuantizationOptions quant_options;
QuantizationComponentSpec quant_spec;
quant_spec.set_quantization_component(
QuantizationComponentSpec::COMPONENT_WEIGHT);
quant_spec.set_tensor_type(QuantizationComponentSpec::TENSORTYPE_INT_8);
auto mutable_quant_method = quant_options.mutable_quantization_method();
*mutable_quant_method->add_quantization_component_specs() = quant_spec;
auto output = GetWeightComponentSpec(quant_options);
EXPECT_TRUE(output.has_value());
}
TEST(TfOpQuantSpecTest, WeightComponentSpecDoNotExist) {
QuantizationOptions quant_options;
auto output = GetWeightComponentSpec(quant_options);
EXPECT_FALSE(output.has_value());
}
}
} |
1,221 | cpp | tensorflow/tensorflow | convert_asset_args | tensorflow/compiler/mlir/quantization/tensorflow/cc/convert_asset_args.cc | tensorflow/compiler/mlir/quantization/tensorflow/cc/convert_asset_args_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_CC_CONVERT_ASSET_ARGS_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_CC_CONVERT_ASSET_ARGS_H_
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
namespace mlir::quant {
FailureOr<SmallVector<tensorflow::AssetFileDef>> ConvertAssetArgs(
ModuleOp module_op);
}
#endif
#include "tensorflow/compiler/mlir/quantization/tensorflow/cc/convert_asset_args.h"
#include "absl/algorithm/container.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/quantization/common/func.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/import_model.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
namespace mlir::quant {
namespace {
using ::mlir::tf_saved_model::AssetOp;
using ::mlir::tf_saved_model::kTfSavedModelIndexPathAttr;
using ::mlir::tf_saved_model::LookupBoundInputOfType;
using ::tensorflow::AssetFileDef;
SmallVector<NamedAttribute> ReplaceBoundInputAttrWithIndexPathAttr(
const ArrayRef<NamedAttribute> arg_attrs, const StringRef index_path,
Builder& builder) {
SmallVector<NamedAttribute> new_arg_attrs;
for (auto arg_attr : arg_attrs) {
if (arg_attr.getName() == "tf_saved_model.bound_input") continue;
new_arg_attrs.emplace_back(arg_attr);
}
const NamedAttribute index_path_attr(
builder.getStringAttr(kTfSavedModelIndexPathAttr),
builder.getStrArrayAttr({index_path}));
new_arg_attrs.emplace_back(index_path_attr);
return new_arg_attrs;
}
StringRef MaybeStripAssetDirectoryPrefix(const StringRef filename) {
if (filename.find("assets/") == 0) {
return filename.drop_front(7);
} else {
return filename;
}
}
AssetFileDef CreateAssetFileDef(const StringRef filename,
const StringRef tensor_name) {
AssetFileDef asset_file_def{};
asset_file_def.set_filename(MaybeStripAssetDirectoryPrefix(filename).str());
tensorflow::TensorInfo tensor_info{};
tensor_info.set_name(tensor_name.str());
*asset_file_def.mutable_tensor_info() = tensor_info;
return asset_file_def;
}
SmallVector<StringRef> GetEntryFunctionInputs(func::FuncOp func_op) {
auto entry_function_attr =
func_op->getAttrOfType<DictionaryAttr>("tf.entry_function");
SmallVector<StringRef> inputs;
mlir::dyn_cast_or_null<StringAttr>(entry_function_attr.get("inputs"))
.strref()
.split(inputs, ",");
return inputs;
}
void ConvertMainArgAttrs(func::FuncOp main_func_op, const int arg_idx,
const StringRef index_path) {
const ArrayRef<NamedAttribute> arg_attrs =
main_func_op.getArgAttrDict(arg_idx).getValue();
Builder builder(main_func_op.getContext());
SmallVector<NamedAttribute> new_arg_attrs =
ReplaceBoundInputAttrWithIndexPathAttr(arg_attrs, index_path, builder);
main_func_op.setArgAttrs(arg_idx, new_arg_attrs);
}
}
FailureOr<SmallVector<AssetFileDef>> ConvertAssetArgs(ModuleOp module_op) {
func::FuncOp main_func_op = FindMainFuncOp(module_op);
if (!main_func_op) return failure();
SmallVector<StringRef> input_names = GetEntryFunctionInputs(main_func_op);
SymbolTable symbol_table(module_op);
SmallVector<AssetFileDef> asset_file_defs;
for (BlockArgument argument : main_func_op.getArguments()) {
const int arg_idx = argument.getArgNumber();
auto asset_op =
LookupBoundInputOfType<AssetOp>(main_func_op, arg_idx, symbol_table);
if (!asset_op) continue;
const StringRef input_name = input_names[arg_idx];
ConvertMainArgAttrs(main_func_op, arg_idx, input_name);
asset_file_defs.emplace_back(CreateAssetFileDef(
asset_op.getFilenameAttr(), input_name));
}
return asset_file_defs;
}
} | #include "tensorflow/compiler/mlir/quantization/tensorflow/cc/convert_asset_args.h"
#include <gmock/gmock.h>
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
namespace mlir::quant {
namespace {
using ::tensorflow::AssetFileDef;
using ::testing::Eq;
using ::testing::IsEmpty;
using ::testing::IsNull;
using ::testing::NotNull;
using ::testing::SizeIs;
class ConvertAssetArgsTest : public ::testing::Test {
protected:
ConvertAssetArgsTest() {
ctx_.loadDialect<func::FuncDialect, TF::TensorFlowDialect,
tf_saved_model::TensorFlowSavedModelDialect>();
}
OwningOpRef<ModuleOp> ParseModuleOpString(
const absl::string_view module_op_str) {
auto module_op_ref = parseSourceString<ModuleOp>(module_op_str, &ctx_);
EXPECT_TRUE(module_op_ref);
return module_op_ref;
}
mlir::MLIRContext ctx_{};
};
func::FuncOp GetMainFuncOp(ModuleOp module_op) {
for (auto func_op : module_op.getOps<func::FuncOp>()) {
if (func_op.getSymName() == "main") {
return func_op;
}
}
return {};
}
TEST_F(ConvertAssetArgsTest, ConvertsSingleAssetArg) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(
module {
"tf_saved_model.asset"() {filename = "assets/file_0.txt", sym_name = "__tf_saved_model_asset0"} : () -> ()
func.func @main(%arg_0: tensor<!tf_type.string> {tf_saved_model.bound_input = @__tf_saved_model_asset0}) -> () attributes {tf.entry_function = {inputs = "arg_0:0", outputs = ""}} {
return
}
}
)mlir");
FailureOr<SmallVector<AssetFileDef>> asset_file_defs =
ConvertAssetArgs(*module_op);
EXPECT_TRUE(succeeded(asset_file_defs));
EXPECT_THAT(*asset_file_defs, SizeIs(1));
const AssetFileDef& asset_file_def = *asset_file_defs->begin();
EXPECT_THAT(asset_file_def.filename(), Eq("file_0.txt"));
EXPECT_THAT(asset_file_def.tensor_info().name(), Eq("arg_0:0"));
func::FuncOp main_func_op = GetMainFuncOp(*module_op);
DictionaryAttr arg_attrs = main_func_op.getArgAttrDict(0);
EXPECT_THAT(arg_attrs.get("tf_saved_model.bound_input"), IsNull());
const ArrayRef<Attribute> index_path_attrs =
mlir::cast<ArrayAttr>(arg_attrs.get("tf_saved_model.index_path"))
.getValue();
EXPECT_THAT(index_path_attrs, SizeIs(1));
StringAttr index_path =
mlir::dyn_cast_or_null<StringAttr>(index_path_attrs[0]);
EXPECT_THAT(index_path, NotNull());
EXPECT_THAT(index_path, Eq("arg_0:0"));
}
TEST_F(ConvertAssetArgsTest, NonBoundedArgsNotModified) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(
module {
func.func @main(%arg_0: tensor<!tf_type.string> {tf_saved_model.index_path = ["arg_0:0"]}) -> () attributes {tf.entry_function = {inputs = "arg_0:0", outputs = ""}} {
return
}
}
)mlir");
FailureOr<SmallVector<AssetFileDef>> asset_file_defs =
ConvertAssetArgs(*module_op);
EXPECT_TRUE(succeeded(asset_file_defs));
EXPECT_THAT(*asset_file_defs, IsEmpty());
func::FuncOp main_func_op = GetMainFuncOp(*module_op);
DictionaryAttr arg_attrs = main_func_op.getArgAttrDict(0);
EXPECT_THAT(arg_attrs.get("tf_saved_model.bound_input"), IsNull());
const ArrayRef<Attribute> index_path_attrs =
mlir::cast<ArrayAttr>(arg_attrs.get("tf_saved_model.index_path"))
.getValue();
EXPECT_THAT(index_path_attrs, SizeIs(1));
StringAttr index_path =
mlir::dyn_cast_or_null<StringAttr>(index_path_attrs[0]);
EXPECT_THAT(index_path, NotNull());
EXPECT_THAT(index_path, Eq("arg_0:0"));
}
TEST_F(ConvertAssetArgsTest, ArgsBoundedToGlobalTensorNotModified) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(
module {
"tf_saved_model.global_tensor"() {type = tensor<2xi32>, value = dense<2> : tensor<2xi32>, sym_name = "__tf_saved_model_x"} : () -> ()
func.func @main(%arg_0: tensor<!tf_type.resource<tensor<2xi32>>> {tf_saved_model.bound_input = @__tf_saved_model_x}) -> () attributes {tf.entry_function = {inputs = "arg_0:0", outputs = ""}} {
return
}
}
)mlir");
FailureOr<SmallVector<AssetFileDef>> asset_file_defs =
ConvertAssetArgs(*module_op);
EXPECT_TRUE(succeeded(asset_file_defs));
EXPECT_THAT(*asset_file_defs, IsEmpty());
func::FuncOp main_func_op = GetMainFuncOp(*module_op);
DictionaryAttr arg_attrs = main_func_op.getArgAttrDict(0);
EXPECT_THAT(arg_attrs.get("tf_saved_model.bound_input"), NotNull());
}
TEST_F(ConvertAssetArgsTest, FailsWhenNoMain) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(module {})mlir");
FailureOr<SmallVector<AssetFileDef>> asset_file_defs =
ConvertAssetArgs(*module_op);
EXPECT_TRUE(failed(asset_file_defs));
}
}
} |
1,222 | cpp | tensorflow/tensorflow | constant_fold | tensorflow/compiler/mlir/tensorflow/transforms/constant_fold.cc | tensorflow/compiler/mlir/quantization/tensorflow/cc/constant_fold_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TENSORFLOW_TRANSFORMS_CONSTANT_FOLD_H_
#define TENSORFLOW_COMPILER_MLIR_TENSORFLOW_TRANSFORMS_CONSTANT_FOLD_H_
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Operation.h"
#include "mlir/Support/LogicalResult.h"
namespace mlir {
namespace TF {
LogicalResult ConstantFoldFallbackHook(
Operation *inst, ArrayRef<Attribute> operands,
SmallVectorImpl<OpFoldResult> &results);
}
}
#endif
#include "tensorflow/compiler/mlir/tensorflow/transforms/constant_fold.h"
#include <algorithm>
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/constant_fold_utils.h"
#include "tensorflow/core/platform/mutex.h"
namespace mlir {
namespace TF {
static bool IsFoldedByDefaultPolicy(Operation* inst) {
bool has_unknown_shape = false;
auto get_size = [&](TypeRange types) {
int64_t size = 0;
for (auto t : types) {
auto tensor_type = mlir::cast<TensorType>(t);
if (!tensor_type.getElementType().isIntOrFloat()) continue;
if (!tensor_type.hasStaticShape()) {
has_unknown_shape = true;
return size;
}
size += tensor_type.getNumElements() *
tensor_type.getElementType().getIntOrFloatBitWidth();
}
return size;
};
int64_t results_size = get_size(inst->getResultTypes());
int64_t operands_size = get_size(inst->getOperandTypes());
constexpr int kSizeFactor = 2;
#ifdef TF_DISABLE_CONSTANT_FOLDING
constexpr int64_t kResultsSizeThreshold = 0;
#else
constexpr int64_t kResultsSizeThreshold = (1 << 23);
#endif
constexpr int64_t kOperandsSizeThreshold = (1 << 30);
return (operands_size <= kOperandsSizeThreshold) &&
(has_unknown_shape || (results_size <= kResultsSizeThreshold) ||
(results_size <= kSizeFactor * operands_size));
}
LogicalResult ConstantFoldFallbackHook(
Operation* inst, ArrayRef<Attribute> operands,
SmallVectorImpl<OpFoldResult>& results) {
if (!CanBeFolded(inst)) return failure();
if (!IsFoldedByDefaultPolicy(inst)) return failure();
bool has_empty_numerical_results =
llvm::all_of(inst->getResultTypes(), [](Type ty) {
ShapedType shaped_ty = mlir::cast<ShapedType>(ty);
Type element_ty = shaped_ty.getElementType();
return shaped_ty.hasStaticShape() && shaped_ty.getNumElements() == 0 &&
element_ty.isIntOrFloat();
});
if (has_empty_numerical_results &&
inst->isRegistered()) {
for (Type ty : inst->getResultTypes()) {
auto shaped_ty = mlir::cast<ShapedType>(ty);
results.push_back(
DenseElementsAttr::get(shaped_ty, llvm::ArrayRef<Attribute>()));
}
return success();
}
if (std::any_of(operands.begin(), operands.end(), [](Attribute attr) {
return !attr || !mlir::isa<ElementsAttr>(attr);
}))
return failure();
SmallVector<ElementsAttr, 4> inputs;
inputs.reserve(operands.size());
for (auto input : operands) {
inputs.push_back(mlir::cast<ElementsAttr>(input));
}
SmallVector<Attribute> constants;
LogicalResult status = EvaluateOperation(inst, inputs, constants);
results.assign(constants.begin(), constants.end());
return status;
}
static bool init_hooks = ([] () {
TensorFlowDialect::RegisterConstantFoldHook(ConstantFoldFallbackHook);
}(), true);
}
} | #include "tensorflow/compiler/mlir/quantization/tensorflow/cc/constant_fold.h"
#include <utility>
#include <gmock/gmock.h>
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace quant {
namespace {
using ::testing::NotNull;
using ::testing::SizeIs;
using ConstantFoldingTest = ::mlir::quant::QuantizationTestBase;
TEST_F(ConstantFoldingTest, FoldLargeConstant) {
constexpr absl::string_view kModuleCode = R"mlir(
module {
func.func @test_fold_constant() -> (tensor<1024x24x24x3xf32>) {
%zp = "tf.Const"() {value = dense<2> : tensor<i32>} : () -> tensor<i32>
%scale = "tf.Const"() {value = dense<2.0> : tensor<f32>} : () -> tensor<f32>
%weight = "tf.Const"() {value = dense<1> : tensor<1024x24x24x3xi8>} : () -> tensor<1024x24x24x3xi8>
%input_i32 = "tf.Cast"(%weight) : (tensor<1024x24x24x3xi8>) -> tensor<1024x24x24x3xi32>
%output = "tf.Sub"(%input_i32, %zp) : (tensor<1024x24x24x3xi32>, tensor<i32>) -> tensor<1024x24x24x3xi32>
%cast = "tf.Cast"(%output) : (tensor<1024x24x24x3xi32>) -> tensor<1024x24x24x3xf32>
%mul = "tf.Mul"(%cast, %scale) : (tensor<1024x24x24x3xf32>, tensor<f32>) -> tensor<1024x24x24x3xf32>
func.return %mul : tensor<1024x24x24x3xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op_ref = ParseModuleOpString(kModuleCode);
const auto test_func =
module_op_ref->lookupSymbol<func::FuncOp>("test_fold_constant");
ASSERT_THAT(test_func, NotNull());
Operation* mul_op = FindOperationOfType<TF::MulOp>(test_func);
SmallVector<Value> results = ConstantFoldOpIfPossible(mul_op);
EXPECT_THAT(results, SizeIs(1));
EXPECT_TRUE(isa<TF::ConstOp>(results[0].getDefiningOp()));
}
TEST_F(ConstantFoldingTest, NotFoldingIdentity) {
constexpr absl::string_view kModuleCode = R"mlir(
module {
func.func @test_fold_constant() -> (tensor<1024x24x24x3xf32>) {
%zp = "tf.Const"() {value = dense<2> : tensor<i32>} : () -> tensor<i32>
%scale = "tf.Const"() {value = dense<2.0> : tensor<f32>} : () -> tensor<f32>
%weight = "tf.Const"() {value = dense<1> : tensor<1024x24x24x3xi8>} : () -> tensor<1024x24x24x3xi8>
%input_i32 = "tf.Cast"(%weight) : (tensor<1024x24x24x3xi8>) -> tensor<1024x24x24x3xi32>
%output = "tf.Sub"(%input_i32, %zp) : (tensor<1024x24x24x3xi32>, tensor<i32>) -> tensor<1024x24x24x3xi32>
%cast = "tf.Cast"(%output) : (tensor<1024x24x24x3xi32>) -> tensor<1024x24x24x3xf32>
%identity = "tf.Identity"(%scale) : (tensor<f32>) -> tensor<f32>
%mul = "tf.Mul"(%cast, %identity) : (tensor<1024x24x24x3xf32>, tensor<f32>) -> tensor<1024x24x24x3xf32>
func.return %mul : tensor<1024x24x24x3xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op_ref = ParseModuleOpString(kModuleCode);
const auto test_func =
module_op_ref->lookupSymbol<func::FuncOp>("test_fold_constant");
ASSERT_THAT(test_func, NotNull());
Operation* op_to_fold = FindOperationOfType<TF::MulOp>(test_func);
SmallVector<Value> results = ConstantFoldOpIfPossible(op_to_fold);
EXPECT_THAT(results, SizeIs(1));
auto mul_op = dyn_cast_or_null<TF::MulOp>(results[0].getDefiningOp());
EXPECT_THAT(mul_op, NotNull());
EXPECT_TRUE(isa<TF::CastOp>(mul_op.getX().getDefiningOp()));
}
TEST_F(ConstantFoldingTest, NotFoldingArgument) {
constexpr absl::string_view kModuleCode = R"mlir(
module {
func.func @test_fold_constant(%arg0: tensor<f32>) -> (tensor<1024x24x24x3xf32>) {
%zp = "tf.Const"() {value = dense<2> : tensor<i32>} : () -> tensor<i32>
%weight = "tf.Const"() {value = dense<1> : tensor<1024x24x24x3xi8>} : () -> tensor<1024x24x24x3xi8>
%input_i32 = "tf.Cast"(%weight) : (tensor<1024x24x24x3xi8>) -> tensor<1024x24x24x3xi32>
%output = "tf.Sub"(%input_i32, %zp) : (tensor<1024x24x24x3xi32>, tensor<i32>) -> tensor<1024x24x24x3xi32>
%cast = "tf.Cast"(%output) : (tensor<1024x24x24x3xi32>) -> tensor<1024x24x24x3xf32>
%mul = "tf.Mul"(%cast, %arg0) : (tensor<1024x24x24x3xf32>, tensor<f32>) -> tensor<1024x24x24x3xf32>
func.return %mul : tensor<1024x24x24x3xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op_ref = ParseModuleOpString(kModuleCode);
const auto test_func =
module_op_ref->lookupSymbol<func::FuncOp>("test_fold_constant");
ASSERT_THAT(test_func, NotNull());
Operation* op_to_fold = FindOperationOfType<TF::MulOp>(test_func);
SmallVector<Value> results = ConstantFoldOpIfPossible(op_to_fold);
EXPECT_THAT(results, SizeIs(1));
TF::MulOp mul_op = dyn_cast_or_null<TF::MulOp>(results[0].getDefiningOp());
EXPECT_THAT(mul_op, NotNull());
EXPECT_TRUE(isa<TF::CastOp>(mul_op.getX().getDefiningOp()));
}
TEST_F(ConstantFoldingTest, FoldDepthwiseConvWeight) {
constexpr absl::string_view kModuleCode = R"mlir(
module {
func.func @test_fold_constant(%arg0: tensor<*xf32>) -> (tensor<?x?x?x3xf32>) {
%cst = "tf.Const"() {value = dense<2.000000e+00> : tensor<2x3x3x1xf32>} : () -> tensor<2x3x3x1xf32>
%cst_0 = "tf.Const"() {value = dense<0.400000e+00> : tensor<3xf32>} : () -> tensor<3xf32>
%cst_1 = "tf.Const"() {value = dense<0.500000e+00> : tensor<3xf32>} : () -> tensor<3xf32>
%cst_2 = "tf.Const"() {value = dense<3.0> : tensor<f32>} : () -> tensor<f32>
%w = "tf.Mul"(%cst, %cst_2) : (tensor<2x3x3x1xf32>, tensor<f32>) -> tensor<2x3x3x1xf32>
%0 = "tf.DepthwiseConv2dNative"(%arg0, %w) {data_format = "NHWC", device = "", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 1, 1]} : (tensor<*xf32>, tensor<2x3x3x1xf32>) -> tensor<?x?x?x3xf32>
%1 = "tf.BiasAdd"(%0, %cst_0) {data_format = "NHWC"} : (tensor<?x?x?x3xf32>, tensor<3xf32>) -> tensor<?x?x?x3xf32>
%2 = "tf.Mul"(%1, %cst_1) : (tensor<?x?x?x3xf32>, tensor<3xf32>) -> tensor<?x?x?x3xf32>
func.return %2 : tensor<?x?x?x3xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op_ref = ParseModuleOpString(kModuleCode);
const auto test_func =
module_op_ref->lookupSymbol<func::FuncOp>("test_fold_constant");
ASSERT_THAT(test_func, NotNull());
RewritePatternSet patterns(ctx_.get());
patterns.add<ConstantFoldQuantizableOperands>(ctx_.get());
EXPECT_TRUE(
succeeded(applyPatternsAndFoldGreedily(test_func, std::move(patterns))));
auto depthwise_conv_op =
FindOperationOfType<TF::DepthwiseConv2dNativeOp>(test_func);
EXPECT_THAT(depthwise_conv_op, NotNull());
EXPECT_TRUE(isa<TF::ConstOp>(depthwise_conv_op.getFilter().getDefiningOp()));
}
TEST_F(ConstantFoldingTest, DepthwiseConvWeightNotFoldable) {
constexpr absl::string_view kModuleCode = R"mlir(
module {
func.func @test_fold_constant(%arg0: tensor<*xf32>, %arg1: tensor<f32>) -> (tensor<?x?x?x3xf32>) {
%cst = "tf.Const"() {value = dense<2.000000e+00> : tensor<2x3x3x1xf32>} : () -> tensor<2x3x3x1xf32>
%cst_0 = "tf.Const"() {value = dense<0.400000e+00> : tensor<3xf32>} : () -> tensor<3xf32>
%cst_1 = "tf.Const"() {value = dense<0.500000e+00> : tensor<3xf32>} : () -> tensor<3xf32>
%w = "tf.Mul"(%cst, %arg1) : (tensor<2x3x3x1xf32>, tensor<f32>) -> tensor<2x3x3x1xf32>
%0 = "tf.DepthwiseConv2dNative"(%arg0, %w) {data_format = "NHWC", device = "", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 1, 1]} : (tensor<*xf32>, tensor<2x3x3x1xf32>) -> tensor<?x?x?x3xf32>
%1 = "tf.BiasAdd"(%0, %cst_0) {data_format = "NHWC"} : (tensor<?x?x?x3xf32>, tensor<3xf32>) -> tensor<?x?x?x3xf32>
%2 = "tf.Mul"(%1, %cst_1) : (tensor<?x?x?x3xf32>, tensor<3xf32>) -> tensor<?x?x?x3xf32>
func.return %2 : tensor<?x?x?x3xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op_ref = ParseModuleOpString(kModuleCode);
const auto test_func =
module_op_ref->lookupSymbol<func::FuncOp>("test_fold_constant");
ASSERT_THAT(test_func, NotNull());
RewritePatternSet patterns(ctx_.get());
patterns.add<ConstantFoldQuantizableOperands>(ctx_.get());
EXPECT_TRUE(
succeeded(applyPatternsAndFoldGreedily(test_func, std::move(patterns))));
auto depthwise_conv_op =
FindOperationOfType<TF::DepthwiseConv2dNativeOp>(test_func);
EXPECT_THAT(depthwise_conv_op, NotNull());
EXPECT_TRUE(isa<TF::MulOp>(depthwise_conv_op.getFilter().getDefiningOp()));
}
}
}
} |
1,223 | cpp | tensorflow/tensorflow | const_op_size | tensorflow/compiler/mlir/quantization/tensorflow/cc/const_op_size.cc | tensorflow/compiler/mlir/quantization/tensorflow/cc/const_op_size_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_CC_CONST_OP_SIZE_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_CC_CONST_OP_SIZE_H_
#include <cstdint>
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace mlir {
namespace quant {
int64_t GetSizeInBytes(TF::ConstOp const_op);
}
}
#endif
#include "tensorflow/compiler/mlir/quantization/tensorflow/cc/const_op_size.h"
#include <climits>
#include "mlir/IR/BuiltinAttributeInterfaces.h"
#include "mlir/IR/Types.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
namespace mlir {
namespace quant {
namespace {
constexpr int64_t kAssumedNumBytesPerElem = 4;
int64_t GetSizeOfIntOrFloatConst(TF::ConstOp const_op) {
const Type dtype = const_op.getDtype();
const ElementsAttr const_value = const_op.getValue();
const auto bytes_per_elem =
static_cast<int64_t>(dtype.getIntOrFloatBitWidth() / CHAR_BIT);
return bytes_per_elem * const_value.getNumElements();
}
int64_t GetSizeOfStringConst(TF::ConstOp const_op) {
const ElementsAttr const_value = const_op.getValue();
const auto str_attr = cast<DenseStringElementsAttr>(const_value);
return absl::c_accumulate(
str_attr.getRawStringData(), 0,
[](int64_t acc, const StringRef str_value) -> int64_t {
return acc + str_value.size();
});
}
int64_t GetSizeOfUnsupportedTypeConst(TF::ConstOp const_op) {
return kAssumedNumBytesPerElem * const_op.getValue().getNumElements();
}
}
int64_t GetSizeInBytes(TF::ConstOp const_op) {
const Type dtype = const_op.getDtype();
if (dtype.isIntOrFloat()) {
return GetSizeOfIntOrFloatConst(const_op);
} else if (isa<TF::StringType>(dtype)) {
return GetSizeOfStringConst(const_op);
} else {
return GetSizeOfUnsupportedTypeConst(const_op);
}
}
}
} | #include "tensorflow/compiler/mlir/quantization/tensorflow/cc/const_op_size.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/Casting.h"
#include "mlir/IR/AsmState.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Types.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace quant {
namespace {
using ::testing::Eq;
class GetSizeInBytesTest : public ::testing::Test {
protected:
GetSizeInBytesTest() : ctx_() { ctx_.loadDialect<TF::TensorFlowDialect>(); }
MLIRContext ctx_;
};
TF::ConstOp ParseConstOp(const absl::string_view const_op_str, Block& block,
MLIRContext& ctx) {
const LogicalResult parse_result =
parseSourceString(const_op_str, &block, ParserConfig(&ctx));
EXPECT_TRUE(succeeded(parse_result));
auto const_op = dyn_cast_or_null<TF::ConstOp>(block.front());
EXPECT_TRUE(const_op);
return const_op;
}
TEST_F(GetSizeInBytesTest, Int32ScalarConstOpSizeInBytes) {
constexpr absl::string_view kConstOpExpr =
R"mlir(%cst = "tf.Const"() {value = dense<1> : tensor<i32>} : () -> tensor<i32>)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(4));
}
TEST_F(GetSizeInBytesTest, Int32ConstOpSizeInBytes) {
constexpr absl::string_view kConstOpExpr =
R"mlir(%cst = "tf.Const"() {value = dense<1> : tensor<2xi32>} : () -> tensor<2xi32>)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(8));
}
TEST_F(GetSizeInBytesTest, Int8ConstOpSizeInBytes) {
constexpr absl::string_view kConstOpExpr =
R"mlir(%cst = "tf.Const"() {value = dense<2> : tensor<3xi8>} : () -> tensor<3xi8>)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(3));
}
TEST_F(GetSizeInBytesTest, Float32ConstOpSizeInBytes) {
constexpr absl::string_view kConstOpExpr =
R"mlir(%cst = "tf.Const"() {value = dense<3.0> : tensor<4xf32>} : () -> tensor<4xf32>)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(16));
}
TEST_F(GetSizeInBytesTest, Float64ConstOpSizeInBytes) {
constexpr absl::string_view kConstOpExpr =
R"mlir(%cst = "tf.Const"() {value = dense<3.0> : tensor<2xf64>} : () -> tensor<2xf64>)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(16));
}
TEST_F(GetSizeInBytesTest, Bfloat16ConstOpSizeInBytes) {
constexpr absl::string_view kConstOpExpr = R"mlir(
%cst = "tf.Const"() {value = dense<1.0> : tensor<7xbf16>} : () -> tensor<7xbf16>
)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(14));
}
TEST_F(GetSizeInBytesTest, TfStringConstOpSizeInBytes) {
constexpr absl::string_view kConstOpExpr = R"mlir(
%cst = "tf.Const"() {value = dense<["Hello World", "Quantization"]> : tensor<2x!tf_type.string>} : () -> tensor<2x!tf_type.string>
)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(23));
}
TEST_F(GetSizeInBytesTest, ConstOpWithUnknownSizeAssumes4BytesPerElement) {
constexpr absl::string_view kConstOpExpr = R"mlir(
%cst = "tf.Const"() {value = #tf_type<tensor_proto : "0xDEADBAAD"> : tensor<!tf_type.variant>} : () -> tensor<!tf_type.variant>
)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(4));
}
}
}
} |
1,224 | cpp | tensorflow/tensorflow | save_variables | tensorflow/compiler/mlir/quantization/tensorflow/cc/save_variables.cc | tensorflow/compiler/mlir/quantization/tensorflow/cc/save_variables_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_CC_SAVE_VARIABLES_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_CC_SAVE_VARIABLES_H_
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
namespace tensorflow {
namespace quantization {
absl::StatusOr<std::vector<std::string>> SaveVariablesToCheckpoint(
absl::string_view prefix, mlir::ModuleOp module_op);
}
}
#endif
#include "tensorflow/compiler/mlir/quantization/tensorflow/cc/save_variables.h"
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/ir/importexport/convert_tensor.h"
#include "tensorflow/core/util/tensor_bundle/tensor_bundle.h"
#include "tsl/platform/env.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace tensorflow {
namespace quantization {
namespace {
using ::mlir::func::FuncOp;
using ::mlir::tf_saved_model::GetInitializerFunction;
using ::mlir::tf_saved_model::kTfSavedModelInitializerRestoreType;
absl::StatusOr<std::string> AddTensorToBundleWriter(
mlir::TF::AssignVariableOp assign_var_op, BundleWriter& bundle_writer) {
auto resource_operand = assign_var_op.getOperand(0);
auto var_handle_op =
llvm::dyn_cast<mlir::TF::VarHandleOp>(resource_operand.getDefiningOp());
if (!var_handle_op) {
assign_var_op->emitRemark(
"Operand idx 0 is not a tf.VarHandleOp. The initializing tensor is not "
"saved to checkpoint.");
return "";
}
auto assigned_value_operand = assign_var_op.getOperand(1);
auto const_op =
llvm::dyn_cast<mlir::TF::ConstOp>(assigned_value_operand.getDefiningOp());
if (!const_op) {
assign_var_op->emitRemark(
"Operand idx 1 is not a tf.ConstOp. The initializing tensor is not "
"saved to checkpoint.");
return "";
}
Tensor const_tensor{};
if (const absl::Status status = mlir::tfg::ConvertToTensor(
const_op.getValue(), &const_tensor);
!status.ok()) {
return status;
}
if (!bundle_writer.Add(var_handle_op.getSharedName(), const_tensor)
.ok()) {
return bundle_writer.status();
}
return var_handle_op.getSharedName().str();
}
}
absl::StatusOr<std::vector<std::string>> SaveVariablesToCheckpoint(
const absl::string_view prefix, mlir::ModuleOp module_op) {
FuncOp session_init_func_type_restore_op = GetInitializerFunction(
module_op, kTfSavedModelInitializerRestoreType);
if (!session_init_func_type_restore_op) {
LOG(INFO) << "No session initializer function with type 'restore_op'. No "
"variables are saved to checkpoint.";
return std::vector<std::string>{};
}
BundleWriter bundle_writer(Env::Default(), prefix);
if (!bundle_writer.status().ok()) {
return bundle_writer.status();
}
std::vector<std::string> saved_variable_shared_names;
for (auto assign_variable_op :
session_init_func_type_restore_op.getOps<mlir::TF::AssignVariableOp>()) {
if (const absl::StatusOr<std::string> variable_shared_name =
AddTensorToBundleWriter(assign_variable_op, bundle_writer);
!variable_shared_name.ok()) {
return variable_shared_name.status();
} else if (!variable_shared_name->empty()) {
saved_variable_shared_names.emplace_back(
std::move(*variable_shared_name));
VLOG(1) << "Saved a variable with shared_name: " << *variable_shared_name;
}
}
if (saved_variable_shared_names.empty()) {
LOG(INFO) << "No variables are saved to checkpoint";
return saved_variable_shared_names;
}
if (!bundle_writer.Finish().ok()) {
return bundle_writer.status();
}
return saved_variable_shared_names;
}
}
} | #include "tensorflow/compiler/mlir/quantization/tensorflow/cc/save_variables.h"
#include <string>
#include <vector>
#include "absl/cleanup/cleanup.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/tensor_bundle/tensor_bundle.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
namespace tensorflow {
namespace quantization {
namespace {
using ::tensorflow::test::AsTensor;
using ::tensorflow::test::ExpectEqual;
using ::testing::IsEmpty;
using ::testing::Not;
using ::testing::UnorderedElementsAre;
using ::tsl::testing::IsOk;
class SaveVariablesToCheckpointTest : public ::testing::Test {
protected:
SaveVariablesToCheckpointTest() : env_(Env::Default()) {
ctx_.loadDialect<mlir::func::FuncDialect, mlir::TF::TensorFlowDialect,
mlir::tf_saved_model::TensorFlowSavedModelDialect>();
}
absl::StatusOr<std::string> MakeTempDir() {
std::string tmp_dir{};
if (!env_->LocalTempFilename(&tmp_dir)) {
return absl::InternalError("Failed to create temp file.");
}
TF_CHECK_OK(env_->CreateDir(tmp_dir));
return tmp_dir;
}
mlir::OwningOpRef<mlir::ModuleOp> ParseModuleOpString(
const absl::string_view module_op_str) {
auto module_op_ref =
mlir::parseSourceString<mlir::ModuleOp>(module_op_str, &ctx_);
EXPECT_TRUE(module_op_ref);
return module_op_ref;
}
Env* env_{};
mlir::MLIRContext ctx_{};
};
TEST_F(SaveVariablesToCheckpointTest, VariableSavedToCheckpoint) {
constexpr absl::string_view kModuleCode = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = [@init_func_restore_op]} : () -> ()
func.func @init_func_restore_op() -> () attributes {tf_saved_model.exported_names = ["restore"], tf_saved_model.initializer_type = "restore_op"} {
%cst = "tf.Const"() {device = "", value = dense<[1.0, 2.0]> : tensor<2xf32>} : () -> tensor<2xf32>
%0 = "tf.VarHandleOp"() {container = "", device = "/device:CPU:0", shared_name = "var_0"} : () -> tensor<!tf_type.resource<tensor<2xf32>>>
"tf.AssignVariableOp"(%0, %cst) : (tensor<!tf_type.resource<tensor<2xf32>>>, tensor<2xf32>) -> ()
return
}
}
)mlir";
mlir::OwningOpRef<mlir::ModuleOp> module_op_ref =
ParseModuleOpString(kModuleCode);
const absl::StatusOr<std::string> checkpoint_prefix = MakeTempDir();
EXPECT_TRUE(checkpoint_prefix.ok());
const absl::Cleanup checkpoint_prefix_cleanup = [this, &checkpoint_prefix]() {
int64_t undeleted_files, undeleted_dirs;
TF_CHECK_OK(env_->DeleteRecursively(*checkpoint_prefix, &undeleted_files,
&undeleted_dirs));
};
const absl::StatusOr<std::vector<std::string>> variable_shared_names =
SaveVariablesToCheckpoint(*checkpoint_prefix, *module_op_ref);
EXPECT_TRUE(variable_shared_names.ok());
EXPECT_THAT(*variable_shared_names, UnorderedElementsAre("var_0"));
BundleReader bundle_reader(env_, *checkpoint_prefix);
Tensor loaded_tensor{};
EXPECT_TRUE(bundle_reader.Lookup("var_0", &loaded_tensor).ok());
ExpectEqual(loaded_tensor, AsTensor<float>({1.0, 2.0}));
}
TEST_F(SaveVariablesToCheckpointTest, MultipleVariablesSavedToCheckpoint) {
constexpr absl::string_view kModuleCode = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = [@init_func_restore_op]} : () -> ()
func.func @init_func_restore_op() -> () attributes {tf_saved_model.exported_names = ["restore"], tf_saved_model.initializer_type = "restore_op"} {
%cst = "tf.Const"() {device = "", value = dense<[1.0, 2.0]> : tensor<2xf32>} : () -> tensor<2xf32>
%0 = "tf.VarHandleOp"() {container = "", device = "/device:CPU:0", shared_name = "var_0"} : () -> tensor<!tf_type.resource<tensor<2xf32>>>
"tf.AssignVariableOp"(%0, %cst) : (tensor<!tf_type.resource<tensor<2xf32>>>, tensor<2xf32>) -> ()
%cst_0 = "tf.Const"() {device = "", value = dense<[3, 4, 5, 6]> : tensor<4xi32>} : () -> tensor<4xi32>
%1 = "tf.VarHandleOp"() {container = "", device = "/device:CPU:0", shared_name = "var_1"} : () -> tensor<!tf_type.resource<tensor<4xi32>>>
"tf.AssignVariableOp"(%1, %cst_0) : (tensor<!tf_type.resource<tensor<4xi32>>>, tensor<4xi32>) -> ()
return
}
}
)mlir";
mlir::OwningOpRef<mlir::ModuleOp> module_op_ref =
ParseModuleOpString(kModuleCode);
const absl::StatusOr<std::string> checkpoint_prefix = MakeTempDir();
EXPECT_TRUE(checkpoint_prefix.ok());
const absl::Cleanup checkpoint_prefix_cleanup = [this, &checkpoint_prefix]() {
int64_t undeleted_files, undeleted_dirs;
TF_CHECK_OK(env_->DeleteRecursively(*checkpoint_prefix, &undeleted_files,
&undeleted_dirs));
};
const absl::StatusOr<std::vector<std::string>> variable_shared_names =
SaveVariablesToCheckpoint(*checkpoint_prefix, *module_op_ref);
EXPECT_TRUE(variable_shared_names.ok());
EXPECT_THAT(*variable_shared_names, UnorderedElementsAre("var_0", "var_1"));
BundleReader bundle_reader(env_, *checkpoint_prefix);
Tensor loaded_var_0{};
EXPECT_TRUE(bundle_reader.Lookup("var_0", &loaded_var_0).ok());
ExpectEqual(loaded_var_0, AsTensor<float>({1.0, 2.0}));
Tensor loaded_var_1{};
EXPECT_TRUE(bundle_reader.Lookup("var_1", &loaded_var_1).ok());
ExpectEqual(loaded_var_1, AsTensor<int>({3, 4, 5, 6}));
}
TEST_F(SaveVariablesToCheckpointTest,
NoVariablesSavedWhenNoInitializerFunction) {
constexpr absl::string_view kModuleCode = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = []} : () -> ()
}
)mlir";
mlir::OwningOpRef<mlir::ModuleOp> module_op_ref =
ParseModuleOpString(kModuleCode);
const absl::StatusOr<std::string> checkpoint_prefix = MakeTempDir();
EXPECT_TRUE(checkpoint_prefix.ok());
const absl::Cleanup checkpoint_prefix_cleanup = [this, &checkpoint_prefix]() {
int64_t undeleted_files, undeleted_dirs;
TF_CHECK_OK(env_->DeleteRecursively(*checkpoint_prefix, &undeleted_files,
&undeleted_dirs));
};
const absl::StatusOr<std::vector<std::string>> variable_shared_names =
SaveVariablesToCheckpoint(*checkpoint_prefix, *module_op_ref);
EXPECT_TRUE(variable_shared_names.ok());
EXPECT_THAT(*variable_shared_names, IsEmpty());
BundleReader bundle_reader(env_, *checkpoint_prefix);
EXPECT_THAT(bundle_reader.status(), Not(IsOk()));
}
TEST_F(SaveVariablesToCheckpointTest,
NoVariablesSavedWhenNoSessionInitializerOp) {
constexpr absl::string_view kModuleCode = R"mlir(
module {
func.func @my_func() -> () {
return
}
}
)mlir";
mlir::OwningOpRef<mlir::ModuleOp> module_op_ref =
ParseModuleOpString(kModuleCode);
const absl::StatusOr<std::string> checkpoint_prefix = MakeTempDir();
EXPECT_TRUE(checkpoint_prefix.ok());
const absl::Cleanup checkpoint_prefix_cleanup = [this, &checkpoint_prefix]() {
int64_t undeleted_files, undeleted_dirs;
TF_CHECK_OK(env_->DeleteRecursively(*checkpoint_prefix, &undeleted_files,
&undeleted_dirs));
};
EXPECT_TRUE(
SaveVariablesToCheckpoint(*checkpoint_prefix, *module_op_ref).ok());
BundleReader bundle_reader(env_, *checkpoint_prefix);
EXPECT_THAT(bundle_reader.status(), Not(IsOk()));
}
TEST_F(SaveVariablesToCheckpointTest,
NoVariablesSavedWhenNoSessionInitializerOpTypeRestoreOp) {
constexpr absl::string_view kModuleCode = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = [@init_func_init_op]} : () -> ()
func.func @init_func_init_op() -> () attributes {tf_saved_model.exported_names = ["init"], tf_saved_model.initializer_type = "init_op"} {
%cst = "tf.Const"() {device = "", value = dense<[1.0, 2.0]> : tensor<2xf32>} : () -> tensor<2xf32>
%0 = "tf.VarHandleOp"() {container = "", device = "/device:CPU:0", shared_name = "var_0"} : () -> tensor<!tf_type.resource<tensor<2xf32>>>
"tf.AssignVariableOp"(%0, %cst) : (tensor<!tf_type.resource<tensor<2xf32>>>, tensor<2xf32>) -> ()
return
}
}
)mlir";
mlir::OwningOpRef<mlir::ModuleOp> module_op_ref =
ParseModuleOpString(kModuleCode);
const absl::StatusOr<std::string> checkpoint_prefix = MakeTempDir();
EXPECT_TRUE(checkpoint_prefix.ok());
const absl::Cleanup checkpoint_prefix_cleanup = [this, &checkpoint_prefix]() {
int64_t undeleted_files, undeleted_dirs;
TF_CHECK_OK(env_->DeleteRecursively(*checkpoint_prefix, &undeleted_files,
&undeleted_dirs));
};
const absl::StatusOr<std::vector<std::string>> variable_shared_names =
SaveVariablesToCheckpoint(*checkpoint_prefix, *module_op_ref);
EXPECT_TRUE(variable_shared_names.ok());
EXPECT_THAT(*variable_shared_names, IsEmpty());
BundleReader bundle_reader(env_, *checkpoint_prefix);
EXPECT_THAT(bundle_reader.status(), Not(IsOk()));
}
TEST_F(SaveVariablesToCheckpointTest, MutableVariablesNotSaved) {
constexpr absl::string_view kModuleCode = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = [@init_func_restore_op]} : () -> ()
func.func @init_func_restore_op() -> () attributes {tf_saved_model.exported_names = ["init"], tf_saved_model.initializer_type = "restore_op"} {
%cst = "tf.Const"() {device = "", value = dense<[1.0, 2.0]> : tensor<2xf32>} : () -> tensor<2xf32>
%add = "tf.AddV2"(%cst, %cst) : (tensor<2xf32>, tensor<2xf32>) -> tensor<2xf32>
%var_handle = "tf.VarHandleOp"() {container = "", device = "/device:CPU:0", shared_name = "var_0"} : () -> tensor<!tf_type.resource<tensor<2xf32>>>
"tf.AssignVariableOp"(%var_handle, %add) : (tensor<!tf_type.resource<tensor<2xf32>>>, tensor<2xf32>) -> ()
return
}
}
)mlir";
mlir::OwningOpRef<mlir::ModuleOp> module_op_ref =
ParseModuleOpString(kModuleCode);
const absl::StatusOr<std::string> checkpoint_prefix = MakeTempDir();
EXPECT_TRUE(checkpoint_prefix.ok());
const absl::Cleanup checkpoint_prefix_cleanup = [this, &checkpoint_prefix]() {
int64_t undeleted_files, undeleted_dirs;
TF_CHECK_OK(env_->DeleteRecursively(*checkpoint_prefix, &undeleted_files,
&undeleted_dirs));
};
const absl::StatusOr<std::vector<std::string>> variable_shared_names =
SaveVariablesToCheckpoint(*checkpoint_prefix, *module_op_ref);
EXPECT_TRUE(variable_shared_names.ok());
EXPECT_THAT(*variable_shared_names, IsEmpty());
BundleReader bundle_reader(env_, *checkpoint_prefix);
EXPECT_THAT(bundle_reader.status(), Not(IsOk()));
}
TEST_F(SaveVariablesToCheckpointTest,
VariableNotSavedWhenNonVarHandleOpOperandForAssignVariableOp) {
constexpr absl::string_view kModuleCode = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = [@init_func_restore_op]} : () -> ()
func.func @init_func_restore_op() -> () attributes {tf_saved_model.exported_names = ["init"], tf_saved_model.initializer_type = "restore_op"} {
%cst = "tf.Const"() {device = "", value = dense<[1.0, 2.0]> : tensor<2xf32>} : () -> tensor<2xf32>
%var_handle = "tf.VarHandleOp"() {container = "", device = "/device:CPU:0", shared_name = "var_0"} : () -> tensor<!tf_type.resource<tensor<2xf32>>>
%var_handle_cast = "tf.Cast"(%var_handle) : (tensor<!tf_type.resource<tensor<2xf32>>>) -> tensor<!tf_type.resource>
"tf.AssignVariableOp"(%var_handle_cast, %cst) : (tensor<!tf_type.resource>, tensor<2xf32>) -> ()
return
}
}
)mlir";
mlir::OwningOpRef<mlir::ModuleOp> module_op_ref =
ParseModuleOpString(kModuleCode);
const absl::StatusOr<std::string> checkpoint_prefix = MakeTempDir();
EXPECT_TRUE(checkpoint_prefix.ok());
const absl::Cleanup checkpoint_prefix_cleanup = [this, &checkpoint_prefix]() {
int64_t undeleted_files, undeleted_dirs;
TF_CHECK_OK(env_->DeleteRecursively(*checkpoint_prefix, &undeleted_files,
&undeleted_dirs));
};
const absl::StatusOr<std::vector<std::string>> variable_shared_names =
SaveVariablesToCheckpoint(*checkpoint_prefix, *module_op_ref);
EXPECT_TRUE(variable_shared_names.ok());
EXPECT_THAT(*variable_shared_names, IsEmpty());
BundleReader bundle_reader(env_, *checkpoint_prefix);
EXPECT_THAT(bundle_reader.status(), Not(IsOk()));
}
TEST_F(SaveVariablesToCheckpointTest, FailsWhenDuplicateSharedName) {
constexpr absl::string_view kModuleCode = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = [@init_func_restore_op]} : () -> ()
func.func @init_func_restore_op() -> () attributes {tf_saved_model.exported_names = ["restore"], tf_saved_model.initializer_type = "restore_op"} {
%cst = "tf.Const"() {device = "", value = dense<[1.0, 2.0]> : tensor<2xf32>} : () -> tensor<2xf32>
%0 = "tf.VarHandleOp"() {container = "", device = "/device:CPU:0", shared_name = "var_0"} : () -> tensor<!tf_type.resource<tensor<2xf32>>>
"tf.AssignVariableOp"(%0, %cst) : (tensor<!tf_type.resource<tensor<2xf32>>>, tensor<2xf32>) -> ()
%cst_0 = "tf.Const"() {device = "", value = dense<[3, 4, 5, 6]> : tensor<4xi32>} : () -> tensor<4xi32>
%1 = "tf.VarHandleOp"() {container = "", device = "/device:CPU:0", shared_name = "var_0"} : () -> tensor<!tf_type.resource<tensor<4xi32>>>
"tf.AssignVariableOp"(%1, %cst_0) : (tensor<!tf_type.resource<tensor<4xi32>>>, tensor<4xi32>) -> ()
return
}
}
)mlir";
mlir::OwningOpRef<mlir::ModuleOp> module_op_ref =
ParseModuleOpString(kModuleCode);
const absl::StatusOr<std::string> checkpoint_prefix = MakeTempDir();
EXPECT_TRUE(checkpoint_prefix.ok());
const absl::Cleanup checkpoint_prefix_cleanup = [this, &checkpoint_prefix]() {
int64_t undeleted_files, undeleted_dirs;
TF_CHECK_OK(env_->DeleteRecursively(*checkpoint_prefix, &undeleted_files,
&undeleted_dirs));
};
EXPECT_FALSE(
SaveVariablesToCheckpoint(*checkpoint_prefix, *module_op_ref).ok());
}
}
}
} |
1,225 | cpp | tensorflow/tensorflow | tfr_decompose_ctx | tensorflow/compiler/mlir/tfr/integration/tfr_decompose_ctx.cc | tensorflow/compiler/mlir/tfr/integration/tfr_decompose_ctx_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TFR_INTEGRATION_TFR_DECOMPOSE_CTX_H_
#define TENSORFLOW_COMPILER_MLIR_TFR_INTEGRATION_TFR_DECOMPOSE_CTX_H_
#include "absl/status/statusor.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Pass/PassManager.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tfr {
extern const char* const kTFRLibEnv;
using tsl::StatusOr;
class TFRDecomposeContext {
public:
static absl::StatusOr<std::unique_ptr<TFRDecomposeContext>> Get(
mlir::MLIRContext* mlir_ctx);
explicit TFRDecomposeContext(mlir::ModuleOp tfr_module);
static std::unique_ptr<TFRDecomposeContext> GetFromText(
StringPiece tfr_raw_text, mlir::MLIRContext* mlir_ctx);
absl::StatusOr<FunctionDef> ExpandNode(const NodeDef& node_def,
StringPiece func_name);
Status DecomposeGraph(mlir::ModuleOp user_module);
void Destroy();
private:
mlir::ModuleOp tfr_module_;
mlir::PassManager pm_;
GraphExportConfig export_confs_;
};
absl::StatusOr<FunctionDef> ExpandNode(const NodeDef& node_def,
StringPiece func_name);
Status DecomposeGraph(mlir::ModuleOp user_module);
}
}
#endif
#include "tensorflow/compiler/mlir/tfr/integration/tfr_decompose_ctx.h"
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/SourceMgr.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/Extensions/AllExtensions.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/Dialect/Shape/IR/Shape.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Verifier.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_executor.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_attr.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.h"
#include "tensorflow/compiler/mlir/tfr/ir/tfr_ops.h"
#include "tensorflow/compiler/mlir/tfr/passes/passes.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/env_var.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tfr {
const char* const kTFRLibEnv = "TF_MLIR_TFR_LIB_DIR";
absl::StatusOr<std::unique_ptr<TFRDecomposeContext>> TFRDecomposeContext::Get(
mlir::MLIRContext* mlir_ctx) {
Env* env = Env::Default();
std::string tfr_lib_dir;
TF_RETURN_IF_ERROR(ReadStringFromEnvVar(
kTFRLibEnv, "tensorflow/compiler/mlir/tfr/resources", &tfr_lib_dir));
string composite_mlir_dir = io::JoinPath(env->GetRunfilesDir(), tfr_lib_dir);
std::vector<string> files;
TF_RETURN_IF_ERROR(env->GetChildren(composite_mlir_dir, &files));
if (files.empty()) {
return errors::Internal(absl::StrCat(
"Failed to find the decomposition lib from path ", composite_mlir_dir));
}
std::string tfr_raw_text;
for (const auto& file : files) {
string fullpath = io::JoinPath(composite_mlir_dir, file);
if (env->MatchPath(fullpath, io::JoinPath(composite_mlir_dir, "*.mlir"))) {
std::string text;
TF_RETURN_IF_ERROR(ReadFileToString(env, fullpath, &text));
tfr_raw_text.append(text);
}
}
auto ctx = TFRDecomposeContext::GetFromText(tfr_raw_text, mlir_ctx);
if (!ctx) {
return errors::Internal(absl::StrCat(
"Failed to load the imported decomposition lib: ", tfr_raw_text));
}
return ctx;
}
std::unique_ptr<TFRDecomposeContext> TFRDecomposeContext::GetFromText(
StringPiece tfr_raw_text, mlir::MLIRContext* mlir_ctx) {
mlir_ctx->allowUnregisteredDialects(true);
mlir::DialectRegistry registry;
registry.insert<mlir::arith::ArithDialect,
mlir::func::FuncDialect,
mlir::scf::SCFDialect,
mlir::shape::ShapeDialect,
mlir::TF::TensorFlowDialect,
mlir::tf_device::TensorFlowDeviceDialect,
mlir::tf_executor::TensorFlowExecutorDialect,
mlir::TFR::TFRDialect>();
mlir::func::registerAllExtensions(registry);
mlir_ctx->appendDialectRegistry(registry);
mlir_ctx->loadAllAvailableDialects();
auto memory_buffer = llvm::MemoryBuffer::getMemBuffer(
llvm::StringRef(tfr_raw_text.data(), tfr_raw_text.size()));
llvm::SourceMgr source_mgr;
source_mgr.AddNewSourceBuffer(std::move(memory_buffer), llvm::SMLoc());
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceFile<mlir::ModuleOp>(source_mgr, mlir_ctx);
auto module_op = module.release();
return std::make_unique<TFRDecomposeContext>(module_op);
}
absl::StatusOr<FunctionDef> TFRDecomposeContext::ExpandNode(
const NodeDef& node_def, StringPiece func_name) {
const OpDef* op_def;
TF_RETURN_IF_ERROR(OpRegistry::Global()->LookUpOpDef(node_def.op(), &op_def));
DataTypeVector input_dtys, output_dtys;
TF_RETURN_IF_ERROR(InputTypesForNode(node_def, *op_def, &input_dtys));
TF_RETURN_IF_ERROR(OutputTypesForNode(node_def, *op_def, &output_dtys));
mlir::MLIRContext* context = tfr_module_.getContext();
llvm::SmallVector<mlir::Type, 4> input_tys, output_tys;
mlir::Builder builder(context);
for (auto ty : input_dtys) {
mlir::Type elt_ty;
TF_RETURN_IF_ERROR(ConvertDataType(ty, builder, &elt_ty));
mlir::TensorType mlir_ty = mlir::UnrankedTensorType::get(elt_ty);
input_tys.push_back(mlir_ty);
}
for (auto ty : output_dtys) {
mlir::Type elt_ty;
TF_RETURN_IF_ERROR(ConvertDataType(ty, builder, &elt_ty));
mlir::TensorType mlir_ty = mlir::UnrankedTensorType::get(elt_ty);
output_tys.push_back(mlir_ty);
}
llvm::SmallVector<mlir::NamedAttribute, 4> attrs;
for (const auto& attr : node_def.attr()) {
TF_ASSIGN_OR_RETURN(auto mlir_attr,
ConvertAttributeValue(attr.second, &builder));
attrs.push_back({mlir::StringAttr::get(context, attr.first), mlir_attr});
}
mlir::Location loc = mlir::UnknownLoc::get(context);
mlir::ModuleOp module = mlir::ModuleOp::create(loc);
mlir::FunctionType func_type =
mlir::FunctionType::get(context, input_tys, output_tys);
llvm::StringRef func_name_str(func_name.data(), func_name.size());
auto func = mlir::func::FuncOp::create(loc, func_name_str, func_type, {});
module.push_back(func);
func.addEntryBlock();
mlir::OpBuilder op_builder(func.getBody());
const std::string tf_op_full_name = absl::StrCat("tf.", node_def.op());
mlir::OperationState op_state(loc, tf_op_full_name);
op_state.addOperands(func.getArguments());
op_state.addTypes(output_tys);
op_state.addAttributes(attrs);
mlir::Operation* tf_op = op_builder.create(op_state);
op_builder.create<mlir::func::ReturnOp>(loc, tf_op->getResults());
TF_RETURN_IF_ERROR(DecomposeGraph(module));
FunctionDef func_def;
TF_RETURN_IF_ERROR(
tensorflow::tf2xla::v2::ConvertMlirFunctionToFunctionLibraryDef(
func, export_confs_, &func_def));
module.erase();
return func_def;
}
Status TFRDecomposeContext::DecomposeGraph(mlir::ModuleOp user_module) {
if (failed(pm_.run(user_module))) {
return errors::Internal("Failed to run the decompose passes.");
}
return absl::OkStatus();
}
TFRDecomposeContext::TFRDecomposeContext(mlir::ModuleOp tfr_module)
: tfr_module_(tfr_module), pm_(tfr_module_.getContext()) {
mlir::OpPassManager& func_pm = pm_.nest<mlir::func::FuncOp>();
func_pm.addPass(mlir::CreateExecutorDialectToFunctionalConversionPass());
func_pm.addPass(mlir::TFR::CreateDecomposeTFOpsPass(tfr_module_));
func_pm.addPass(mlir::TFR::CreateRaiseToTFOpsPass(
tfr_module_, true));
func_pm.addPass(mlir::CreateFunctionalToExecutorDialectConversionPass());
pm_.addPass(mlir::CreateBreakUpIslandsPass());
}
void TFRDecomposeContext::Destroy() { tfr_module_.erase(); }
absl::StatusOr<FunctionDef> ExpandNode(const NodeDef& node_def,
StringPiece func_name) {
mlir::MLIRContext mlir_ctx;
TF_ASSIGN_OR_RETURN(auto ctx, TFRDecomposeContext::Get(&mlir_ctx));
return ctx->ExpandNode(node_def, func_name);
}
Status DecomposeGraph(mlir::ModuleOp user_module) {
mlir::MLIRContext* mlir_ctx = user_module.getContext();
TF_ASSIGN_OR_RETURN(auto ctx, TFRDecomposeContext::Get(mlir_ctx));
return ctx->DecomposeGraph(user_module);
}
}
} | #include "tensorflow/compiler/mlir/tfr/integration/tfr_decompose_ctx.h"
#include <string>
#include <vector>
#include "absl/types/span.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/test.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/test.h"
using testing::ElementsAreArray;
using testing::Test;
using NodeAndType = std::pair<std::string, tensorflow::DataType>;
namespace tensorflow {
namespace {
REGISTER_OP("MyAddN")
.Input("inputs: N * T")
.Output("sum: T")
.Attr("N: int >= 1")
.Attr("T: {numbertype, variant}")
.SetIsCommutative()
.SetIsAggregate()
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("RiscAddDummy")
.Input("x: T")
.Input("y: T")
.Output("z: T")
.Attr(
"T: {bfloat16, half, float, double, uint8, int8, int16, int32, int64, "
"complex64, complex128, string}")
.SetShapeFn(shape_inference::UnchangedShape);
constexpr char tfr_raw_text[] = R"(
tfr.func @tf__my_add_n(%values: !tfr.tensor_list,
%n: i64 {tfr.name="N"}) -> !tfr.tensor {
%index = arith.constant 0 : index
%cst = arith.constant 1 : i64
%eq = arith.cmpi "eq", %n, %cst : i64
%v1 = tfr.get_element %values[%index] : (!tfr.tensor_list, index) -> !tfr.tensor
%res = scf.if %eq -> !tfr.tensor {
scf.yield %v1 : !tfr.tensor
} else {
%step = arith.index_cast %cst : i64 to index
%end = arith.index_cast %n : i64 to index
%reduce = scf.for %i = %step to %end step %step iter_args(%reduce_iter=%v1) -> !tfr.tensor {
%v = tfr.get_element %values[%i] : (!tfr.tensor_list, index) -> !tfr.tensor
%reduce_next = tfr.call @tf__risc_add_dummy(%reduce_iter, %v) : (!tfr.tensor, !tfr.tensor) -> !tfr.tensor
scf.yield %reduce_next : !tfr.tensor
}
scf.yield %reduce : !tfr.tensor
}
tfr.return %res : !tfr.tensor
}
tfr.func @tf__my_add_n_(!tfr.tensor_list<N,T>, i64 {tfr.name="N"}) -> !tfr.tensor attributes{N,T}
tfr.func @tf__risc_add_dummy_(!tfr.tensor<T>, !tfr.tensor<T>) -> !tfr.tensor<T> attributes{T}
)";
class TFRDecomposeContextTest : public Test {
protected:
void SetUp() override {
test_ctx_ = tfr::TFRDecomposeContext::GetFromText(tfr_raw_text, &ctx_);
}
void TearDown() override { test_ctx_->Destroy(); }
mlir::MLIRContext ctx_;
std::unique_ptr<tfr::TFRDecomposeContext> test_ctx_;
};
std::vector<NodeAndType> NodesSequenceOf(const FunctionDef& graph) {
std::vector<NodeAndType> nodes;
for (auto& node : graph.node_def()) {
nodes.push_back({node.op(), node.attr().at("T").type()});
}
return nodes;
}
TEST_F(TFRDecomposeContextTest, FLOAT_1_ins) {
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.emplace_back("input", 0, DT_FLOAT);
NodeDef test_node;
auto status = NodeDefBuilder("float_add", "MyAddN")
.Input(src_list)
.Finalize(&test_node);
EXPECT_TRUE(status.ok());
auto decomposed = test_ctx_->ExpandNode(test_node, "test");
EXPECT_TRUE(decomposed.ok());
std::vector<NodeAndType> expected_results{{"Identity", DT_FLOAT}};
EXPECT_THAT(NodesSequenceOf(decomposed.value()),
ElementsAreArray(expected_results));
}
TEST_F(TFRDecomposeContextTest, FLOAT_3_ins) {
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.emplace_back("in0", 0, DT_FLOAT);
src_list.emplace_back("in1", 0, DT_FLOAT);
src_list.emplace_back("in2", 0, DT_FLOAT);
NodeDef test_node;
auto status = NodeDefBuilder("float_add_3", "MyAddN")
.Input(src_list)
.Finalize(&test_node);
EXPECT_TRUE(status.ok());
auto decomposed = test_ctx_->ExpandNode(test_node, "test");
EXPECT_TRUE(decomposed.ok());
std::vector<NodeAndType> expected_results{{"RiscAddDummy", DT_FLOAT},
{"RiscAddDummy", DT_FLOAT}};
EXPECT_THAT(NodesSequenceOf(decomposed.value()),
ElementsAreArray(expected_results));
}
TEST_F(TFRDecomposeContextTest, INT32_3_ins) {
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.emplace_back("in0", 0, DT_INT32);
src_list.emplace_back("in1", 0, DT_INT32);
src_list.emplace_back("in2", 0, DT_INT32);
NodeDef test_node;
auto status =
NodeDefBuilder("int_add", "MyAddN").Input(src_list).Finalize(&test_node);
EXPECT_TRUE(status.ok());
auto decomposed = test_ctx_->ExpandNode(test_node, "test");
EXPECT_TRUE(decomposed.ok());
std::vector<NodeAndType> expected_results{{"RiscAddDummy", DT_INT32},
{"RiscAddDummy", DT_INT32}};
EXPECT_THAT(NodesSequenceOf(decomposed.value()),
ElementsAreArray(expected_results));
}
}
} |
1,226 | cpp | tensorflow/tensorflow | dump_graph | tensorflow/core/util/dump_graph.cc | tensorflow/core/util/dump_graph_test.cc | #ifndef TENSORFLOW_CORE_UTIL_DUMP_GRAPH_H_
#define TENSORFLOW_CORE_UTIL_DUMP_GRAPH_H_
#include <functional>
#include <string>
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/graph/graph.h"
namespace tensorflow {
string DumpGraphDefToFile(const string& name, GraphDef const& graph_def,
const string& dirname = "");
string DumpCostGraphDefToFile(const string& name, CostGraphDef const& graph_def,
const string& dirname = "");
string DumpGraphToFile(const string& name, Graph const& graph,
const FunctionLibraryDefinition* flib_def = nullptr,
const string& dirname = "");
string DumpFunctionDefToFile(const string& name, FunctionDef const& fdef,
const string& dirname = "");
string DumpProtoToFile(const string& name,
tensorflow::protobuf::Message const& proto,
const string& dirname = "");
void SetGraphDumper(
std::function<Status(const Graph& graph,
const FunctionLibraryDefinition* flib_def,
WritableFile*)>
dumper,
string suffix = ".pbtxt");
string DumpToFile(const string& name, const string& dirname,
const string& suffix, const string& type_name,
std::function<Status(WritableFile*)> dumper);
}
#endif
#include "tensorflow/core/util/dump_graph.h"
#include <functional>
#include <memory>
#include <unordered_map>
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/lib/strings/proto_serialization.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/file_system.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/strcat.h"
namespace tensorflow {
namespace {
using strings::StrCat;
struct NameCounts {
mutex counts_mutex;
std::unordered_map<string, int> counts;
};
string MakeUniqueFilename(string name, const string& suffix = ".pbtxt") {
static NameCounts& instance = *new NameCounts;
for (int i = 0; i < name.size(); ++i) {
char ch = name[i];
if (ch == '/' || ch == '[' || ch == ']' || ch == '*' || ch == '?' ||
ch == '\\') {
name[i] = '_';
}
}
int count;
{
mutex_lock lock(instance.counts_mutex);
count = instance.counts[name]++;
}
string filename = name;
if (count > 0) {
absl::StrAppend(&filename, "_", count);
}
absl::StrAppend(&filename, suffix);
return filename;
}
struct GraphDumperConfig {
mutex mu;
struct Config {
bool IsSet() const { return dumper != nullptr; }
std::function<Status(const Graph& graph,
const FunctionLibraryDefinition* flib_def,
WritableFile*)>
dumper = nullptr;
string suffix = ".pbtxt";
} config TF_GUARDED_BY(mu);
bool IsSet() TF_LOCKS_EXCLUDED(mu) {
mutex_lock lock(mu);
return config.IsSet();
}
};
GraphDumperConfig& GetGraphDumperConfig() {
static GraphDumperConfig config;
return config;
}
class StderrWritableFile : public WritableFile {
public:
StderrWritableFile() = default;
Status Append(StringPiece data) override {
fprintf(stderr, "%.*s", static_cast<int>(data.size()), data.data());
return absl::OkStatus();
}
Status Close() override { return absl::OkStatus(); }
Status Flush() override {
fflush(stderr);
return absl::OkStatus();
}
Status Name(StringPiece* result) const override {
*result = "stderr";
return absl::OkStatus();
}
Status Sync() override { return absl::OkStatus(); }
Status Tell(int64_t* position) override {
return errors::Unimplemented("Stream not seekable");
}
};
Status CreateWritableFile(Env* env, const string& dirname, const string& name,
const string& suffix, string* filepath,
std::unique_ptr<WritableFile>* file) {
string dir;
if (!dirname.empty()) {
dir = dirname;
} else {
const char* prefix = getenv("TF_DUMP_GRAPH_PREFIX");
if (prefix != nullptr) dir = prefix;
}
if (dir.empty()) {
LOG(WARNING)
<< "Failed to dump " << name << " because dump location is not "
<< " specified through either TF_DUMP_GRAPH_PREFIX environment "
<< "variable or function argument.";
return errors::InvalidArgument("TF_DUMP_GRAPH_PREFIX not specified");
}
if (absl::EqualsIgnoreCase(dir, "sponge") ||
absl::EqualsIgnoreCase(dir, "test_undeclared_outputs_dir")) {
if (!io::GetTestUndeclaredOutputsDir(&dir)) {
LOG(WARNING) << "TF_DUMP_GRAPH_PREFIX=sponge, but "
"TEST_UNDECLARED_OUTPUT_DIRS is not set, dumping to log";
dir = "-";
}
}
*filepath = "NULL";
if (dir == "-") {
*file = std::make_unique<StderrWritableFile>();
*filepath = "(stderr)";
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(env->RecursivelyCreateDir(dir));
*filepath = io::JoinPath(dir, MakeUniqueFilename(name, suffix));
return env->NewWritableFile(*filepath, file);
}
Status WriteTextProtoToUniqueFile(const tensorflow::protobuf::Message& proto,
WritableFile* file) {
string s;
if (!::tensorflow::protobuf::TextFormat::PrintToString(proto, &s)) {
return errors::FailedPrecondition("Unable to convert proto to text.");
}
TF_RETURN_IF_ERROR(file->Append(s));
StringPiece name;
TF_RETURN_IF_ERROR(file->Name(&name));
VLOG(5) << name;
VLOG(5) << s;
return file->Close();
}
Status WriteTextProtoToUniqueFile(
const tensorflow::protobuf::MessageLite& proto, WritableFile* file) {
string s;
if (!SerializeToStringDeterministic(proto, &s)) {
return errors::Internal("Failed to serialize proto to string.");
}
StringPiece name;
TF_RETURN_IF_ERROR(file->Name(&name));
VLOG(5) << name;
VLOG(5) << s;
TF_RETURN_IF_ERROR(file->Append(s));
return file->Close();
}
}
string DumpToFile(const string& name, const string& dirname,
const string& suffix, const string& type_name,
std::function<Status(WritableFile*)> dumper) {
string filepath;
std::unique_ptr<WritableFile> file;
Status status = CreateWritableFile(Env::Default(), dirname, name, suffix,
&filepath, &file);
if (!status.ok()) {
return StrCat("(failed to create writable file: ", status.ToString(), ")");
}
status = dumper(file.get());
if (!status.ok()) {
return StrCat("(failed to dump ", type_name, " to '", filepath,
"': ", status.ToString(), ")");
}
LOG(INFO) << "Dumped " << type_name << " to " << filepath;
return filepath;
}
void SetGraphDumper(
std::function<Status(const Graph& graph,
const FunctionLibraryDefinition* flib_def,
WritableFile*)>
dumper,
string suffix) {
GraphDumperConfig& dumper_config = GetGraphDumperConfig();
mutex_lock lock(dumper_config.mu);
dumper_config.config.dumper = dumper;
dumper_config.config.suffix = suffix;
}
string DumpGraphDefToFile(const string& name, GraphDef const& graph_def,
const string& dirname) {
return DumpToFile(name, dirname, ".pbtxt", "Graph", [&](WritableFile* file) {
return WriteTextProtoToUniqueFile(graph_def, file);
});
}
string DumpCostGraphDefToFile(const string& name, CostGraphDef const& graph_def,
const string& dirname) {
return DumpToFile(name, dirname, ".pbtxt", "Graph", [&](WritableFile* file) {
return WriteTextProtoToUniqueFile(graph_def, file);
});
}
string DumpGraphToFile(const string& name, Graph const& graph,
const FunctionLibraryDefinition* flib_def,
const string& dirname) {
auto& dumper_config = GetGraphDumperConfig();
if (dumper_config.IsSet()) {
GraphDumperConfig::Config config;
{
mutex_lock lock(dumper_config.mu);
config = dumper_config.config;
}
if (config.IsSet()) {
return DumpToFile(name, dirname, config.suffix, "Graph",
[&](WritableFile* file) {
return config.dumper(graph, flib_def, file);
});
}
}
GraphDef graph_def;
graph.ToGraphDef(&graph_def);
if (flib_def) {
*graph_def.mutable_library() = flib_def->ToProto();
}
return DumpGraphDefToFile(name, graph_def, dirname);
}
string DumpFunctionDefToFile(const string& name, FunctionDef const& fdef,
const string& dirname) {
return DumpToFile(name, dirname, ".pbtxt", "FunctionDef",
[&](WritableFile* file) {
return WriteTextProtoToUniqueFile(fdef, file);
});
}
string DumpProtoToFile(const string& name,
tensorflow::protobuf::Message const& proto,
const string& dirname) {
return DumpToFile(name, dirname, ".pbtxt", proto.GetTypeName(),
[&](WritableFile* file) {
return WriteTextProtoToUniqueFile(proto, file);
});
}
} | #include "tensorflow/core/util/dump_graph.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/proto_serialization.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(DumpGraph, DumpGraphToFileSuccess) {
Graph graph(OpRegistry::Global());
Node* node;
TF_CHECK_OK(NodeBuilder("A", "NoOp").Finalize(&graph, &node));
setenv("TF_DUMP_GRAPH_PREFIX", testing::TmpDir().c_str(), 1);
string ret = DumpGraphToFile("graph", graph);
EXPECT_EQ(ret, io::JoinPath(testing::TmpDir(), "graph.pbtxt"));
ret = DumpGraphToFile("graph", graph);
EXPECT_EQ(ret, io::JoinPath(testing::TmpDir(), "graph_1.pbtxt"));
GraphDef gdef;
TF_ASSERT_OK(ReadTextProto(
Env::Default(), io::JoinPath(testing::TmpDir(), "graph.pbtxt"), &gdef));
string read, written;
gdef.AppendToString(&read);
graph.ToGraphDefDebug().AppendToString(&written);
EXPECT_EQ(read, written);
}
TEST(DumpGraph, DumpGraphToFileNoEnvPrefix) {
Graph graph(OpRegistry::Global());
unsetenv("TF_DUMP_GRAPH_PREFIX");
string ret = DumpGraphToFile("graph", graph);
EXPECT_TRUE(str_util::StrContains(ret, "TF_DUMP_GRAPH_PREFIX not specified"));
}
TEST(DumpGraph, DumpFunctionDefToFileSuccess) {
FunctionDef fdef;
setenv("TF_DUMP_GRAPH_PREFIX", testing::TmpDir().c_str(), 1);
string ret = DumpFunctionDefToFile("function", fdef);
EXPECT_EQ(ret, io::JoinPath(testing::TmpDir(), "function.pbtxt"));
}
TEST(DumpGraph, DumpProtoToFileSuccess) {
NodeDef ndef_in;
ndef_in.set_name("foo");
setenv("TF_DUMP_GRAPH_PREFIX", testing::TmpDir().c_str(), 1);
string expected_filepath = io::JoinPath(testing::TmpDir(), "node_def.pbtxt");
string actual_filepath = DumpProtoToFile("node_def", ndef_in);
EXPECT_EQ(expected_filepath, actual_filepath);
NodeDef ndef_out;
TF_ASSERT_OK(ReadTextProto(Env::Default(), expected_filepath, &ndef_out));
EXPECT_EQ(ndef_in.DebugString(), ndef_out.DebugString());
}
}
} |
1,227 | cpp | tensorflow/tensorflow | dump_mlir_util | tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc | tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TENSORFLOW_UTILS_DUMP_MLIR_UTIL_H_
#define TENSORFLOW_COMPILER_MLIR_TENSORFLOW_UTILS_DUMP_MLIR_UTIL_H_
#include <memory>
#include <string>
#include "absl/strings/string_view.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/IR/Operation.h"
#include "mlir/Pass/PassManager.h"
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
inline constexpr absl::string_view kCrashReproducerStdErr = "-";
inline constexpr absl::string_view kCrashReproducerCrashAnalysis =
"crash_analysis";
Status CreateFileForDumping(llvm::StringRef name,
std::unique_ptr<llvm::raw_ostream>* os,
std::string* filepath,
llvm::StringRef dirname = "");
std::string DumpMlirOpToFile(llvm::StringRef name, mlir::Operation* op,
llvm::StringRef dirname = "",
const mlir::PassManager* pass_manager = nullptr);
std::string GetDumpDirFromEnvVar();
std::string DumpRawStringToFile(llvm::StringRef name, llvm::StringRef content,
llvm::StringRef dirname = "");
void SetCrashReproducer(mlir::PassManager& pm, llvm::StringRef dir_path = "");
void applyTensorflowAndCLOptions(mlir::PassManager& pm,
llvm::StringRef dir_path = "");
void PrintPassPipeline(const mlir::PassManager& pass_manager,
mlir::Operation* op, llvm::raw_ostream& os);
}
#endif
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/Operation.h"
#include "mlir/Pass/Pass.h"
#include "tensorflow/core/platform/crash_analysis.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/path.h"
#include "tsl/lib/io/buffered_file.h"
using llvm::raw_ostream;
namespace tensorflow {
namespace {
struct NameCounts {
mutex counts_mutex;
llvm::StringMap<int64_t> counts;
};
std::string MakeUniqueFilename(string name) {
static NameCounts& instance = *new NameCounts;
for (int i = 0, e = name.size(); i < e; ++i) {
char ch = name[i];
if (ch == '/' || ch == '[' || ch == ']' || ch == '*' || ch == '?' ||
ch == '\\') {
name[i] = '_';
}
}
int count;
{
mutex_lock lock(instance.counts_mutex);
count = instance.counts[name]++;
}
std::string filename = name;
if (count > 0) {
filename = llvm::formatv("{0}_{1}", filename, count).str();
}
filename = llvm::Twine(filename).concat(".mlir").str();
return filename;
}
struct LogInfoRawStream : public llvm::raw_ostream {
LogInfoRawStream() { SetUnbuffered(); }
~LogInfoRawStream() override = default;
uint64_t current_pos() const override { return 0; }
void write_impl(const char* ptr, size_t size) override {
fprintf(stderr, "%.*s", static_cast<int>(size), ptr);
}
};
struct WritableFileRawStream : public llvm::raw_ostream {
explicit WritableFileRawStream(std::unique_ptr<WritableFile> file)
: file(std::move(file)) {
SetUnbuffered();
}
~WritableFileRawStream() override = default;
uint64_t current_pos() const override {
int64_t position;
if (file->Tell(&position).ok()) {
return position;
} else {
LOG(WARNING)
<< "Couldn't query file position. Stream might be malformed.\n";
return -1;
}
}
void write_impl(const char* ptr, size_t size) override {
if (file && !file->Append(StringPiece(ptr, size)).ok()) {
file = nullptr;
}
}
std::unique_ptr<WritableFile> file;
};
struct CrashReproducerStream : public mlir::ReproducerStream {
CrashReproducerStream(llvm::StringRef name,
std::unique_ptr<llvm::raw_ostream> file)
: name(name), ostream(std::move(file)) {}
llvm::StringRef description() override { return name; }
raw_ostream& os() override { return *ostream; }
private:
std::string name;
std::unique_ptr<llvm::raw_ostream> ostream;
};
struct CrashAnalysisCrashReproducerStream : public mlir::ReproducerStream {
public:
CrashAnalysisCrashReproducerStream()
: internal_str(""), string_stream(internal_str) {}
~CrashAnalysisCrashReproducerStream() override {
crash_analysis::ReportEvent(
"mlir_crash_reproducer.mlir",
"Pass pipeline failure; crash reproducer attached",
string_stream.str());
}
llvm::StringRef description() override { return "mlir_crash_reproducer"; }
raw_ostream& os() override { return string_stream; }
private:
std::string internal_str;
llvm::raw_string_ostream string_stream;
};
}
Status CreateFileForDumping(llvm::StringRef name,
std::unique_ptr<raw_ostream>* os,
std::string* filepath, llvm::StringRef dirname) {
std::string dir;
if (!dirname.empty())
dir = std::string(dirname);
else
dir = GetDumpDirFromEnvVar();
if (dir.empty()) {
return Status(absl::StatusCode::kInvalidArgument,
"(TF_DUMP_GRAPH_PREFIX not specified)");
}
if (dir == kCrashReproducerStdErr) {
*os = std::make_unique<LogInfoRawStream>();
*filepath =
llvm::formatv("(stderr; requested filename: '{0}')", name).str();
return Status();
}
Env* env = Env::Default();
Status status = env->RecursivelyCreateDir(dir);
if (!status.ok()) {
LOG(WARNING) << "Failed to create '" << dir
<< "' directory for dumping: " << status;
return Status(absl::StatusCode::kUnavailable, "(unavailable)");
}
*filepath = io::JoinPath(dir, MakeUniqueFilename(std::string(name)));
std::unique_ptr<WritableFile> file;
status = env->NewWritableFile(*filepath, &file);
if (!status.ok()) {
LOG(WARNING) << "Failed to create file '" << filepath << "': " << status;
return Status(absl::StatusCode::kUnavailable, "(unavailable)");
}
file = std::make_unique<tsl::BufferedWritableFile>(std::move(file));
*os = std::make_unique<WritableFileRawStream>(std::move(file));
return Status();
}
void PrintPassPipeline(const mlir::PassManager& pass_manager,
mlir::Operation* op, llvm::raw_ostream& os) {
std::string str;
llvm::raw_string_ostream passOS(str);
llvm::interleaveComma(
pass_manager.getPasses(), passOS,
[&](mlir::Pass& pass) { pass.printAsTextualPipeline(passOS); });
os << "{-# external_resources: { mlir_reproducer: { pipeline: "
"\"builtin.module("
<< passOS.str() << ")\", ";
os << "disable_threading: true, ";
os << "verify_each: true } } #-}";
os << "\n\n";
}
std::string DumpMlirOpToFile(llvm::StringRef name, mlir::Operation* op,
llvm::StringRef dirname,
const mlir::PassManager* pass_manager) {
std::unique_ptr<raw_ostream> os;
std::string filepath;
Status result = CreateFileForDumping(name, &os, &filepath, dirname);
if (!result.ok()) return std::string(result.message());
LOG(INFO) << "Dumping MLIR operation '" << op->getName().getStringRef().str()
<< "' to '" << filepath << "'";
if (pass_manager) PrintPassPipeline(*pass_manager, op, *os);
op->print(*os, mlir::OpPrintingFlags().useLocalScope());
return filepath;
}
std::string GetDumpDirFromEnvVar() {
const char* prefix_env = getenv("TF_DUMP_GRAPH_PREFIX");
if (!prefix_env) {
LOG(WARNING)
<< "Failed to dump MLIR module because dump location is not "
<< "specified through TF_DUMP_GRAPH_PREFIX environment variable.";
return "";
}
std::string result = prefix_env;
if (absl::EqualsIgnoreCase(result, "sponge") &&
!io::GetTestUndeclaredOutputsDir(&result)) {
LOG(WARNING) << "TF_DUMP_GRAPH_PREFIX=sponge but "
"TEST_UNDECLARED_OUTPUT_DIRS is not set";
return "";
}
return result;
}
std::string DumpRawStringToFile(llvm::StringRef name, llvm::StringRef content,
llvm::StringRef dirname) {
std::unique_ptr<raw_ostream> os;
std::string filepath;
Status result = CreateFileForDumping(name, &os, &filepath, dirname);
if (!result.ok()) return std::string(result.message());
(*os) << content;
LOG(INFO) << "Outputted requested string to '" << filepath << "'";
return filepath;
}
void SetCrashReproducer(mlir::PassManager& pm, llvm::StringRef dir_path) {
std::string path = dir_path.str();
if (path.empty() || path == kCrashReproducerCrashAnalysis) {
if (getenv("MLIR_CRASH_REPRODUCER_DIRECTORY"))
path = getenv("MLIR_CRASH_REPRODUCER_DIRECTORY");
else if (getenv("TEST_UNDECLARED_OUTPUTS_DIR"))
path = "sponge";
}
if (path.empty()) {
LOG_FIRST_N(INFO, 1) << "disabling MLIR crash reproducer, set env var "
"`MLIR_CRASH_REPRODUCER_DIRECTORY` to enable.";
return;
}
string lower_path = absl::AsciiStrToLower(path);
if (lower_path == "sponge") {
if (!tensorflow::io::GetTestUndeclaredOutputsDir(&path)) {
LOG(ERROR) << "MLIR crash reproducer is set to '" << dir_path.str()
<< "', but environment variable TEST_UNDECLARED_OUTPUTS_DIR "
"is not set, so cannot dump anywhere.";
return;
}
}
if (path != kCrashReproducerStdErr && path != kCrashReproducerCrashAnalysis) {
auto* env = tensorflow::Env::Default();
auto status = env->RecursivelyCreateDir(path);
if (!status.ok()) {
LOG(WARNING) << "cannot create directory '" << path
<< "': " << status.message();
return;
}
path += "/mlir_reproducer_";
if (!tensorflow::Env::Default()->CreateUniqueFileName(&path, ".mlir")) {
LOG(WARNING) << "cannot create unique filename, won't enable MLIR crash "
"reproducer.";
return;
}
}
mlir::ReproducerStreamFactory factory =
[path](std::string& error) -> std::unique_ptr<mlir::ReproducerStream> {
if (path == kCrashReproducerStdErr)
return std::make_unique<CrashReproducerStream>(
"(stderr)", std::make_unique<LogInfoRawStream>());
if (path == kCrashReproducerCrashAnalysis) {
return std::make_unique<CrashAnalysisCrashReproducerStream>();
}
std::unique_ptr<WritableFile> file;
Status status = tensorflow::Env::Default()->NewWritableFile(path, &file);
file = std::make_unique<tsl::BufferedWritableFile>(std::move(file));
if (!status.ok()) {
error = absl::StrCat("Failed to create file '", path,
"': ", status.message());
return nullptr;
}
return std::make_unique<CrashReproducerStream>(
path, std::make_unique<WritableFileRawStream>(std::move(file)));
};
pm.enableCrashReproducerGeneration(factory, false);
}
void applyTensorflowAndCLOptions(mlir::PassManager& pm,
llvm::StringRef dir_path) {
mlir::registerPassManagerCLOptions();
if (!mlir::succeeded(mlir::applyPassManagerCLOptions(pm))) {
LOG(ERROR) << "cannot apply MLIR pass manager CL options";
return;
}
SetCrashReproducer(pm, dir_path);
}
} | #include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/ToolOutputFile.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/InitAllDialects.h"
#include "mlir/InitAllPasses.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Support/FileUtilities.h"
#include "mlir/Tools/mlir-opt/MlirOptMain.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/bridge.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
using ::testing::IsNull;
TEST(DumpMlirModuleTest, NoEnvPrefix) {
mlir::MLIRContext context;
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
unsetenv("TF_DUMP_GRAPH_PREFIX");
std::string filepath = DumpMlirOpToFile("module", module_ref.get());
EXPECT_EQ(filepath, "(TF_DUMP_GRAPH_PREFIX not specified)");
}
TEST(DumpMlirModuleTest, LogInfo) {
mlir::MLIRContext context;
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
setenv("TF_DUMP_GRAPH_PREFIX", "-", 1);
std::string filepath = DumpMlirOpToFile("module", module_ref.get());
EXPECT_EQ(filepath, "(stderr; requested filename: 'module')");
}
TEST(DumpMlirModuleTest, Valid) {
mlir::MLIRContext context;
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
setenv("TF_DUMP_GRAPH_PREFIX", testing::TmpDir().c_str(), 1);
std::string expected_txt_module;
{
llvm::raw_string_ostream os(expected_txt_module);
module_ref->getOperation()->print(os,
mlir::OpPrintingFlags().useLocalScope());
os.flush();
}
std::string filepath = DumpMlirOpToFile("module", module_ref.get());
ASSERT_NE(filepath, "(TF_DUMP_GRAPH_PREFIX not specified)");
ASSERT_NE(filepath, "LOG(INFO)");
ASSERT_NE(filepath, "(unavailable)");
Env* env = Env::Default();
std::string file_txt_module;
TF_ASSERT_OK(ReadFileToString(env, filepath, &file_txt_module));
EXPECT_EQ(file_txt_module, expected_txt_module);
}
TEST(DumpCrashReproducerTest, RoundtripDumpAndReadValid) {
mlir::registerPassManagerCLOptions();
mlir::MLIRContext context;
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
setenv("TF_DUMP_GRAPH_PREFIX", testing::TmpDir().c_str(), 1);
std::string filepath =
testing::TmpDir() + "/" + mlir::TF::kStandardPipelineBefore + ".mlir";
std::string output_dump = testing::TmpDir() + "/" + "output_dump.txt";
TF_ASSERT_OK(mlir::TF::RunBridgeWithStandardPipeline(
module_ref.get(),
true, false));
std::string errorMessage;
auto input_file = mlir::openInputFile(filepath, &errorMessage);
EXPECT_THAT(input_file, Not(IsNull()));
auto output_stream = mlir::openOutputFile(output_dump, &errorMessage);
EXPECT_THAT(output_stream, Not(IsNull()));
mlir::PassPipelineCLParser passPipeline(
"", "Compiler passes to run", "p");
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::registerAllPasses();
mlir::registerTensorFlowPasses();
EXPECT_TRUE(mlir::MlirOptMain(output_stream->os(), std::move(input_file),
registry,
mlir::MlirOptMainConfig{}
.splitInputFile("")
.verifyDiagnostics(false)
.verifyPasses(false)
.allowUnregisteredDialects(false)
.setPassPipelineParser(passPipeline))
.succeeded());
}
TEST(DumpRawStringToFileTest, Valid) {
llvm::StringRef example = "module {\n}";
setenv("TF_DUMP_GRAPH_PREFIX", testing::TmpDir().c_str(), 1);
std::string filepath = DumpRawStringToFile("example", example);
ASSERT_NE(filepath, "(TF_DUMP_GRAPH_PREFIX not specified)");
ASSERT_NE(filepath, "LOG(INFO)");
ASSERT_NE(filepath, "(unavailable)");
Env* env = Env::Default();
std::string file_txt_module;
TF_ASSERT_OK(ReadFileToString(env, filepath, &file_txt_module));
EXPECT_EQ(file_txt_module, example);
}
}
} |
1,228 | cpp | tensorflow/tensorflow | xla_sharding_util | tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.cc | tensorflow/compiler/mlir/tensorflow/tests/xla_sharding_util_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TENSORFLOW_UTILS_XLA_SHARDING_UTIL_H_
#define TENSORFLOW_COMPILER_MLIR_TENSORFLOW_UTILS_XLA_SHARDING_UTIL_H_
#include <map>
#include <string>
#include "absl/status/statusor.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
namespace tensorflow {
inline constexpr llvm::StringRef kInputShardingAttr =
"input_sharding_configuration";
inline constexpr llvm::StringRef kOutputShardingAttr =
"output_sharding_configuration";
mlir::LogicalResult DecodeShardingAttribute(const std::string& shard_str,
xla::OpSharding& sharding,
bool report_error = true);
mlir::LogicalResult DecodeShardingAttribute(mlir::Attribute shard_attr,
xla::OpSharding& sharding,
bool report_error = true);
void EncodeSharding(mlir::Operation* op, llvm::StringRef shard_str);
mlir::LogicalResult ExtractInputsForLogicalDevices(
int num_cores_per_replica, mlir::tf_device::ClusterFuncOp cluster_func,
mlir::OpBuilder* builder,
llvm::SmallVectorImpl<llvm::SmallVector<mlir::Value, 4>>* input_list);
mlir::LogicalResult ParseAndValidateOutputSharding(
int num_cores_per_replica, mlir::tf_device::ClusterFuncOp cluster_func,
mlir::SmallVector<xla::OpSharding, 4>* output_sharding_list);
mlir::LogicalResult GetOutputTypesForLogicalDeviceComputation(
int core_id, llvm::ArrayRef<xla::OpSharding> output_sharding_config,
mlir::tf_device::ClusterFuncOp cluster_func,
llvm::SmallVectorImpl<mlir::Type>* output_types,
llvm::SmallVectorImpl<int>* cluster_to_core_index);
mlir::LogicalResult RemapOutputsFromLogicalDevices(
const mlir::Location& location,
llvm::ArrayRef<xla::OpSharding> output_sharding_config,
llvm::SmallVector<llvm::SmallVector<int, 4>, 4> cluster_to_core_index,
int num_results_pre_cluster,
mlir::tf_device::ParallelExecuteOp old_parallel_execute, int cluster_idx,
mlir::tf_device::ParallelExecuteOp new_parallel_execute,
mlir::OpBuilder* builder);
llvm::SmallVector<llvm::SmallVector<int64_t, 4>, 4> GetMetadataArgumentMapping(
const tpu::TPUCompileMetadataProto& metadata);
int GetDimsFromXLAShardingTiled(const xla::OpSharding& xla_sharding);
bool IsOtherReplicatedSharding(const xla::OpSharding& xla_sharding);
bool IsSplitSharding(const xla::OpSharding& sharding);
bool IsReplicatedSharding(const xla::OpSharding& sharding);
absl::StatusOr<std::map<int, int>> GetDimensionIndicesAndNumSplitsFromSharding(
const xla::OpSharding& sharding);
}
#endif
#include "tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.h"
#include <cstdint>
#include <map>
#include <numeric>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/FormatVariadic.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "xla/client/sharding_builder.h"
#include "xla/service/hlo_parser.h"
#include "xla/xla_data.pb.h"
namespace tensorflow {
namespace {
constexpr char kNumSplitAttr[] = "num_split";
mlir::LogicalResult CreateSplitOp(const int num_split,
const int split_dimension,
const mlir::Location& location,
mlir::Value src_input,
mlir::OpBuilder* builder,
mlir::TF::SplitOp* split_op) {
auto split_dim_type =
mlir::RankedTensorType::get({}, builder->getIntegerType(32));
auto split_dimension_attr =
mlir::DenseElementsAttr::get(split_dim_type, split_dimension);
auto split_dimension_op = builder->create<mlir::TF::ConstOp>(
location, split_dim_type, split_dimension_attr);
mlir::Type output_type;
auto input_type = mlir::cast<mlir::TensorType>(src_input.getType());
if (input_type.hasRank()) {
if (input_type.getShape()[split_dimension] == mlir::ShapedType::kDynamic) {
output_type = input_type;
} else {
auto shape = llvm::to_vector<4>(input_type.getShape());
if (shape[split_dimension] % num_split != 0) {
return mlir::emitError(
location,
llvm::formatv(
"incorrect input sharding configuration received. "
"{0}-th dimension of the input must be evenly divisible by {1}",
split_dimension, num_split));
}
shape[split_dimension] = shape[split_dimension] / num_split;
output_type =
mlir::RankedTensorType::get(shape, input_type.getElementType());
}
} else {
output_type = input_type;
}
llvm::SmallVector<mlir::Type, 4> output_types(num_split, output_type);
*split_op = builder->create<mlir::TF::SplitOp>(
location, output_types, split_dimension_op.getOutput(), src_input);
(*split_op)->setAttr(
kNumSplitAttr,
builder->getIntegerAttr(builder->getIntegerType(32), num_split));
return mlir::success();
}
mlir::TF::ConcatOp CreateConcatOp(const int concat_dimension,
const mlir::Location& location,
mlir::ArrayRef<mlir::Value> inputs,
mlir::OpBuilder* builder) {
auto concat_dim_type =
mlir::RankedTensorType::get({}, builder->getIntegerType(32));
auto concat_dimension_attr =
mlir::DenseElementsAttr::get(concat_dim_type, concat_dimension);
auto concat_dimension_op = builder->create<mlir::TF::ConstOp>(
location, concat_dim_type, concat_dimension_attr);
mlir::Type output_type;
auto input_type = mlir::cast<mlir::TensorType>(inputs[0].getType());
if (input_type.hasRank()) {
if (input_type.getShape()[concat_dimension] == mlir::ShapedType::kDynamic) {
output_type = input_type;
} else {
auto shape = llvm::to_vector<4>(input_type.getShape());
shape[concat_dimension] = shape[concat_dimension] * inputs.size();
output_type =
mlir::RankedTensorType::get(shape, input_type.getElementType());
}
} else {
output_type = input_type;
}
return builder->create<mlir::TF::ConcatOp>(
location, output_type, concat_dimension_op.getOutput(), inputs);
}
mlir::LogicalResult HandleTileShardedInputs(
const mlir::Location& location, const xla::OpSharding& input_sharding,
const mlir::Value& original_source, mlir::OpBuilder* builder,
llvm::SmallVectorImpl<mlir::Value>* tiled_inputs) {
llvm::SmallVector<mlir::TF::SplitOp, 4> split_ops_for_tiled_input;
split_ops_for_tiled_input.reserve(
input_sharding.tile_assignment_devices_size());
auto dimension_to_splits_map =
GetDimensionIndicesAndNumSplitsFromSharding(input_sharding);
if (!dimension_to_splits_map.ok()) {
LOG(ERROR) << dimension_to_splits_map.status();
return mlir::failure();
}
for (const auto& dimension_and_num_splits : *dimension_to_splits_map) {
const int dimension = dimension_and_num_splits.first;
const int num_splits = dimension_and_num_splits.second;
if (split_ops_for_tiled_input.empty()) {
mlir::TF::SplitOp root_split_op;
auto result = CreateSplitOp(num_splits, dimension, location,
original_source, builder, &root_split_op);
if (mlir::failed(result)) return mlir::failure();
split_ops_for_tiled_input.emplace_back(root_split_op);
continue;
}
llvm::SmallVector<mlir::TF::SplitOp, 4> new_split_ops;
new_split_ops.reserve(split_ops_for_tiled_input.size() * num_splits);
for (auto split_op : split_ops_for_tiled_input) {
for (auto parent_split_output_value : split_op.getResults()) {
mlir::TF::SplitOp child_split_op;
auto result =
CreateSplitOp(num_splits, dimension, location,
parent_split_output_value, builder, &child_split_op);
if (mlir::failed(result)) return mlir::failure();
new_split_ops.emplace_back(child_split_op);
}
}
std::swap(new_split_ops, split_ops_for_tiled_input);
}
tiled_inputs->clear();
tiled_inputs->reserve(input_sharding.tile_assignment_devices_size());
for (auto split_op : split_ops_for_tiled_input) {
for (auto split_op_output : split_op.getResults()) {
int64_t repeat_count =
input_sharding.replicate_on_last_tile_dim()
? *input_sharding.tile_assignment_dimensions().rbegin()
: 1;
for (int64_t i = 0; i < repeat_count; ++i) {
tiled_inputs->push_back(split_op_output);
}
}
}
return mlir::success();
}
bool UnsupportedPartitionedShardingType(xla::OpSharding::Type sharding) {
return sharding != xla::OpSharding::REPLICATED &&
sharding != xla::OpSharding::OTHER;
}
}
absl::StatusOr<std::map<int, int>> GetDimensionIndicesAndNumSplitsFromSharding(
const xla::OpSharding& sharding) {
int64_t tensor_tile_rank = sharding.tile_assignment_dimensions_size();
if (sharding.replicate_on_last_tile_dim()) {
tensor_tile_rank--;
}
std::map<int, int> dimension_to_splits_map;
for (int dim_index = 0; dim_index < tensor_tile_rank; ++dim_index) {
if (sharding.tile_assignment_dimensions(dim_index) > 1) {
dimension_to_splits_map.emplace(
dim_index, sharding.tile_assignment_dimensions(dim_index));
}
}
if (dimension_to_splits_map.empty()) {
return absl::InvalidArgumentError(absl::StrCat(
"Arg has unnecessary tiled sharding: ", sharding.DebugString()));
}
return dimension_to_splits_map;
}
int GetDimsFromXLAShardingTiled(const xla::OpSharding& xla_sharding) {
return xla_sharding.tile_assignment_dimensions_size() -
(xla_sharding.replicate_on_last_tile_dim() ? 1 : 0) -
xla_sharding.last_tile_dims_size();
}
bool IsOtherReplicatedSharding(const xla::OpSharding& xla_sharding) {
int max_dim = GetDimsFromXLAShardingTiled(xla_sharding);
for (int i = 0; i < max_dim; ++i) {
if (xla_sharding.tile_assignment_dimensions(i) != 1) {
return false;
}
}
return xla_sharding.type() == xla::OpSharding::OTHER &&
(xla_sharding.replicate_on_last_tile_dim() ||
!xla_sharding.last_tile_dims().empty());
}
bool IsSplitSharding(const xla::OpSharding& sharding) {
return sharding.type() == xla::OpSharding::OTHER &&
!IsOtherReplicatedSharding(sharding);
}
bool IsReplicatedSharding(const xla::OpSharding& sharding) {
return sharding.type() == xla::OpSharding::REPLICATED ||
IsOtherReplicatedSharding(sharding);
}
mlir::LogicalResult DecodeShardingAttribute(const std::string& shard_str,
xla::OpSharding& sharding,
bool report_error) {
if (sharding.ParseFromString(shard_str)) return mlir::success();
absl::StatusOr<xla::HloSharding> sharding_hlo = xla::ParseSharding(shard_str);
if (sharding_hlo.ok()) {
sharding = sharding_hlo->ToProto();
return mlir::success();
}
if (report_error)
llvm::errs() << std::string(sharding_hlo.status().message()) << "\n";
return mlir::failure();
}
mlir::LogicalResult DecodeShardingAttribute(mlir::Attribute shard_attr,
xla::OpSharding& sharding,
bool report_error) {
if (!mlir::isa<mlir::StringAttr>(shard_attr)) return mlir::failure();
auto shard_str = mlir::cast<mlir::StringAttr>(shard_attr).getValue().str();
return DecodeShardingAttribute(shard_str, sharding, report_error);
}
void EncodeSharding(mlir::Operation* op, llvm::StringRef shard_str) {
if (!op->hasAttrOfType<mlir::StringAttr>(shard_str)) return;
::xla::OpSharding sharding;
auto sharding_proto_str =
op->getAttrOfType<mlir::StringAttr>(shard_str).getValue().str();
if (!sharding.ParseFromString(sharding_proto_str)) return;
auto hlosharding = xla::HloSharding::FromProto(sharding);
if (!hlosharding.ok()) {
op->emitError("Unable to encode sharding to human readable ")
<< hlosharding.status().message();
return;
}
op->setAttr(shard_str,
mlir::StringAttr::get(op->getContext(), hlosharding->ToString()));
}
mlir::LogicalResult ExtractInputsForLogicalDevices(
const int num_cores_per_replica,
mlir::tf_device::ClusterFuncOp cluster_func, mlir::OpBuilder* builder,
llvm::SmallVectorImpl<llvm::SmallVector<mlir::Value, 4>>* input_list) {
input_list->reserve(num_cores_per_replica);
for (int i = 0; i < num_cores_per_replica; ++i)
input_list->emplace_back(llvm::SmallVector<mlir::Value, 4>());
llvm::SmallVector<mlir::Value, 4> cluster_func_inputs(
cluster_func.getOperands());
auto sharding_attrs =
cluster_func.getOperation()->getAttrOfType<mlir::ArrayAttr>(
kInputShardingAttr);
if (!sharding_attrs) {
(*input_list)[0] = cluster_func_inputs;
return mlir::success();
}
for (const auto& sharding_attr_and_index : llvm::enumerate(sharding_attrs)) {
const auto& sharding_attr = sharding_attr_and_index.value();
const auto input_index = sharding_attr_and_index.index();
const auto& input_value = cluster_func_inputs[input_index];
xla::OpSharding sharding;
if (DecodeShardingAttribute(
mlir::cast<mlir::StringAttr>(sharding_attr).getValue().str(),
sharding)
.failed()) {
return cluster_func.emitError("incorrect sharding format for inputs");
}
const auto input_sharding_type = sharding.type();
auto tiled_sharding_mismatched = [&](int tiled_input_size) {
return cluster_func.emitError(
llvm::formatv("incorrect {0}-th tiled input sharding received. "
"Product of tile sharding splits({1}) must be equal to "
"number of logical devices : {2}",
input_index, tiled_input_size, num_cores_per_replica));
};
if (auto partitioned_input =
llvm::dyn_cast_or_null<mlir::TF::TPUPartitionedInputV2Op>(
input_value.getDefiningOp())) {
if (UnsupportedPartitionedShardingType(input_sharding_type))
return cluster_func->emitOpError()
<< "unsupported input sharding type "
<< OpSharding_Type_Name(input_sharding_type) << " for "
<< input_index << "-th input";
if (input_sharding_type == xla::OpSharding::REPLICATED) {
for (const auto& index_and_inputs : llvm::enumerate(*input_list)) {
index_and_inputs.value().emplace_back(
partitioned_input.getOperand(index_and_inputs.index()));
}
} else {
assert(input_sharding_type == xla::OpSharding::OTHER);
if (partitioned_input.getInputs().size() != num_cores_per_replica)
return tiled_sharding_mismatched(
partitioned_input.getInputs().size());
for (int i = 0; i < sharding.tile_assignment_devices_size(); ++i) {
const int assigned_logical_device =
sharding.tile_assignment_devices(i);
(*input_list)[assigned_logical_device].emplace_back(
partitioned_input.getInputs()[i]);
}
}
continue;
}
if (IsSplitSharding(sharding)) {
llvm::SmallVector<mlir::Value, 4> tiled_inputs;
auto result = HandleTileShardedInputs(
cluster_func.getLoc(), sharding, input_value, builder, &tiled_inputs);
if (mlir::failed(result)) return mlir::failure();
const int64_t tiled_inputs_size = tiled_inputs.size();
if (tiled_inputs_size != num_cores_per_replica)
return tiled_sharding_mismatched(tiled_inputs.size());
for (int i = 0; i < sharding.tile_assignment_devices_size(); ++i) {
const int assigned_logical_device = sharding.tile_assignment_devices(i);
(*input_list)[assigned_logical_device].emplace_back(tiled_inputs[i]);
}
} else if (IsReplicatedSharding(sharding)) {
for (auto& inputs : *input_list) inputs.emplace_back(input_value);
} else {
assert(input_sharding_type == xla::OpSharding::MAXIMAL);
const int logical_device_id = sharding.tile_assignment_devices(0);
(*input_list)[logical_device_id].emplace_back(input_value);
}
}
return mlir::success();
}
mlir::LogicalResult ParseAndValidateOutputSharding(
const int num_cores_per_replica,
mlir::tf_device::ClusterFuncOp cluster_func,
mlir::SmallVector<xla::OpSharding, 4>* output_sharding_list) {
output_sharding_list->reserve(cluster_func.getNumResults());
const auto output_sharding_attrs =
cluster_func.getOperation()->getAttrOfType<mlir::ArrayAttr>(
kOutputShardingAttr);
if (!output_sharding_attrs)
return cluster_func.emitError(
"output_sharding_configuration missing from cluster func");
if (output_sharding_attrs.size() != cluster_func.getNumResults())
return cluster_func.emitError("incorrect number of output sharding");
for (const auto& output_sharding_and_index :
llvm::enumerate(output_sharding_attrs)) {
const auto& output_sharding = output_sharding_and_index.value();
const int sharding_index = output_sharding_and_index.index();
if (!mlir::isa<mlir::StringAttr>(output_sharding))
return cluster_func.emitError(llvm::formatv(
"non-string output sharding at index {0}", sharding_index));
xla::OpSharding sharding;
if (DecodeShardingAttribute(
mlir::cast<mlir::StringAttr>(output_sharding).getValue().str(),
sharding)
.failed()) {
return cluster_func.emitError("incorrect sharding format for outputs");
}
if (sharding.type() == xla::OpSharding::OTHER &&
sharding.tile_assignment_devices_size() != num_cores_per_replica)
return cluster_func.emitError(llvm::formatv(
"incorrect sharding format for outputs. Number of "
"tiled outputs({0}) must match the number of logical "
"devices({1})",
sharding.tile_assignment_devices_size(), num_cores_per_replica));
if (sharding.type() == xla::OpSharding::MAXIMAL &&
((sharding.tile_assignment_devices(0) >= num_cores_per_replica) ||
(sharding.tile_assignment_devices(0) < 0)))
return cluster_func.emitError(llvm::formatv(
"incorrect sharding format for outputs. Maximal "
"sharding should be assigned to device id in range "
"[0, {0}). Currently assigned to {1}",
num_cores_per_replica, sharding.tile_assignment_devices(0)));
output_sharding_list->emplace_back(std::move(sharding));
}
return mlir::success();
}
namespace {
bool IsAssignedToLogicalDevice(const int core_id,
const xla::OpSharding& sharding) {
return sharding.type() == xla::OpSharding::MAXIMAL &&
sharding.tile_assignment_devices(0) == core_id;
}
mlir::LogicalResult LookupClusterToCoreIndex(
const mlir::Location& location,
llvm::SmallVector<llvm::SmallVector<int, 4>, 4> cluster_to_core_index,
const int core_id, const int cluster_func_output_index,
int* core_output_index) {
*core_output_index =
cluster_to_core_index[core_id][cluster_func_output_index];
if (*core_output_index == -1) {
mlir::emitError(
location,
llvm::formatv("Attempted to map cluster_func output index {0} to "
"program assigned to core {1}. The tensor at this output "
"index was not assigned or sharded to this core.",
cluster_func_output_index, core_id));
return mlir::failure();
}
return mlir::success();
}
mlir::LogicalResult GetTileShardedOutputsToMerge(
const mlir::Location& location, const int cluster_func_output_index,
llvm::ArrayRef<xla::OpSharding> output_sharding_config,
llvm::SmallVector<llvm::SmallVector<int, 4>, 4> cluster_to_core_index,
int cluster_idx, mlir::tf_device::ParallelExecuteOp new_parallel_execute,
llvm::SmallVector<mlir::Value, 4>* outputs_to_merge) {
const xla::OpSharding& sharding =
output_sharding_config[cluster_func_output_index];
outputs_to_merge->reserve(sharding.tile_assignment_devices_size());
for (const auto& core_id_and_index :
llvm::enumerate(sharding.tile_assignment_devices())) {
auto core_id = core_id_and_index.value();
auto tile_index = core_id_and_index.index();
int last_tile_dim_size = *sharding.tile_assignment_dimensions().rbegin();
if (sharding.replicate_on_last_tile_dim() &&
tile_index % last_tile_dim_size != 0) {
continue;
}
int region_output_index;
auto status = LookupClusterToCoreIndex(location, cluster_to_core_index,
core_id, cluster_func_output_index,
®ion_output_index);
if (failed(status)) return mlir::failure();
const auto output_from_logical_device =
new_parallel_execute.GetRegionOutputs(cluster_idx +
core_id)[region_output_index];
outputs_to_merge->emplace_back(output_from_logical_device);
}
return mlir::success();
}
mlir::LogicalResult HandleTileShardedOutputs(
const int cluster_func_output_index,
llvm::ArrayRef<xla::OpSharding> output_sharding_config,
llvm::SmallVector<llvm::SmallVector<int, 4>, 4> cluster_to_core_index,
const mlir::Location& location, mlir::Value cluster_func_output,
int cluster_idx, mlir::tf_device::ParallelExecuteOp new_parallel_execute,
mlir::OpBuilder* builder) {
builder->setInsertionPointAfter(new_parallel_execute);
llvm::SmallVector<mlir::Value, 4> outputs_to_merge;
auto status = GetTileShardedOutputsToMerge(
location, cluster_func_output_index, output_sharding_config,
cluster_to_core_index, cluster_idx, new_parallel_execute,
&outputs_to_merge);
if (failed(status)) return mlir::failure();
const xla::OpSharding& sharding =
output_sharding_config[cluster_func_output_index];
auto dimension_to_splits_map =
GetDimensionIndicesAndNumSplitsFromSharding(sharding);
if (!dimension_to_splits_map.ok()) {
LOG(ERROR) << dimension_to_splits_map.status();
return mlir::failure();
}
for (auto it = dimension_to_splits_map->rbegin();
it != dimension_to_splits_map->rend(); ++it) {
int concat_dimension = it->first;
int num_splits = it->second;
llvm::SmallVector<mlir::Value, 4> new_outputs;
new_outputs.reserve(num_splits);
for (int i = 0, end = outputs_to_merge.size(); i < end;
i = i + num_splits) {
mlir::TF::ConcatOp concat_op =
CreateConcatOp(concat_dimension, location,
llvm::ArrayRef<mlir::Value>{
outputs_to_merge.begin() + i,
outputs_to_merge.begin() + i + num_splits},
builder);
new_outputs.emplace_back(concat_op.getResult());
}
std::swap(new_outputs, outputs_to_merge);
}
assert(outputs_to_merge.size() == 1);
cluster_func_output.replaceAllUsesWith(outputs_to_merge[0]);
return mlir::success();
}
mlir::LogicalResult ValidateAndGetTiledExecuteOutputShape(
const mlir::Location& location,
const mlir::TensorType cluster_func_output_type,
const xla::OpSharding& output_sharding,
mlir::Type* tiled_logical_computation_type) {
const auto output_shape = cluster_func_output_type.getShape();
auto new_output_shape = llvm::to_vector<4>(output_shape);
auto dimension_to_splits_map =
GetDimensionIndicesAndNumSplitsFromSharding(output_sharding);
if (!dimension_to_splits_map.ok()) {
LOG(ERROR) << dimension_to_splits_map.status();
return mlir::failure();
}
for (const auto& dimension_and_output_splits : *dimension_to_splits_map) {
const auto dimension = dimension_and_output_splits.first;
const auto output_splits = dimension_and_output_splits.second;
if (output_shape[dimension] == mlir::ShapedType::kDynamic) {
*tiled_logical_computation_type = cluster_func_output_type;
break;
}
if (output_shape[dimension] % output_splits != 0) {
mlir::emitError(
location,
llvm::formatv("incorrect output sharding received. "
"{0}-th dimension of the output must be "
"evenly divisible by {1}, got dimension "
"shape {2}",
dimension, output_splits, output_shape[dimension]));
}
new_output_shape[dimension] = output_shape[dimension] / output_splits;
}
*tiled_l | #include "tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.h"
#include <string>
#include <gtest/gtest.h>
#include "mlir/Support/LogicalResult.h"
#include "xla/xla_data.pb.h"
inline constexpr llvm::StringRef kXlaShardingAttrName = "_XlaSharding";
namespace tensorflow {
namespace {
TEST(DecodeShardingAttributeTest, CheckInvalidString) {
xla::OpSharding sharding;
EXPECT_TRUE(DecodeShardingAttribute("", sharding).succeeded());
EXPECT_TRUE(DecodeShardingAttribute("manual", sharding).failed());
}
TEST(DecodeShardingAttributeTest, CheckManualShardString) {
xla::OpSharding sharding;
EXPECT_TRUE(DecodeShardingAttribute("{manual}", sharding).succeeded());
EXPECT_TRUE(sharding.type() == sharding.MANUAL);
EXPECT_EQ(0, sharding.tile_assignment_devices_size());
}
TEST(DecodeShardingAttributeTest, CheckMaximalShardString) {
xla::OpSharding sharding;
EXPECT_TRUE(
DecodeShardingAttribute("{maximal device=0}", sharding).succeeded());
EXPECT_TRUE(sharding.type() == sharding.MAXIMAL);
EXPECT_EQ(1, sharding.tile_assignment_devices_size());
}
}
} |
1,229 | cpp | tensorflow/tensorflow | topological_sort | tensorflow/core/grappler/utils/topological_sort.cc | tensorflow/core/grappler/utils/topological_sort_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_UTILS_TOPOLOGICAL_SORT_H_
#define TENSORFLOW_CORE_GRAPPLER_UTILS_TOPOLOGICAL_SORT_H_
#include "absl/types/span.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace grappler {
struct TopologicalDependency {
TopologicalDependency(const NodeDef* from, const NodeDef* to)
: from(from), to(to) {}
const NodeDef* from;
const NodeDef* to;
};
Status ComputeTopologicalOrder(
const GraphDef& graph,
absl::Span<const TopologicalDependency> extra_dependencies,
std::vector<const NodeDef*>* topo_order);
Status ComputeTopologicalOrder(const GraphDef& graph,
std::vector<const NodeDef*>* topo_order);
Status TopologicalSort(GraphDef* graph);
Status ReversedTopologicalSort(GraphDef* graph);
}
}
#endif
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include <algorithm>
#include <deque>
#include <unordered_map>
#include "absl/types/span.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/graph_topology_view.h"
#include "tensorflow/core/grappler/graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace grappler {
namespace {
std::vector<GraphView::Edge> MakeEphemeralEdges(
const absl::Span<const TopologicalDependency> extra_dependencies) {
std::vector<GraphView::Edge> ephemeral_edges;
ephemeral_edges.reserve(extra_dependencies.size());
for (const auto& dep : extra_dependencies) {
ephemeral_edges.emplace_back(
GraphView::OutputPort(dep.from, Graph::kControlSlot),
GraphView::InputPort(dep.to, Graph::kControlSlot));
}
return ephemeral_edges;
}
Status ComputeTopologicalOrder(
const GraphDef& graph,
const absl::Span<const TopologicalDependency> extra_dependencies,
std::vector<int>* ready_nodes) {
GraphTopologyView graph_view;
TF_RETURN_IF_ERROR(graph_view.InitializeFromGraph(
graph, MakeEphemeralEdges(extra_dependencies)));
std::vector<int> num_ready_inputs(graph.node_size(), 0);
ready_nodes->reserve(graph.node_size());
int front = 0;
int back = 0;
for (int i = 0; i < graph.node_size(); i++) {
if (graph_view.GetFanin(i).empty()) {
ready_nodes->push_back(i);
back++;
}
if (IsMerge(graph.node(i))) {
for (int input : graph_view.GetFanin(i)) {
if (IsNextIteration(graph.node(input))) {
num_ready_inputs[i]++;
}
}
}
}
while (front != back) {
int ready_node = (*ready_nodes)[front];
for (int fanout : graph_view.GetFanout(ready_node)) {
++num_ready_inputs[fanout];
const int max_size = graph_view.GetFanin(fanout).size();
if (num_ready_inputs[fanout] == max_size) {
ready_nodes->push_back(fanout);
++back;
}
}
++front;
}
if (back != graph_view.num_nodes()) {
if (VLOG_IS_ON(1)) {
VLOG(1) << "The graph couldn't be sorted in topological order. Stalled "
"at node = "
<< graph.node(back).DebugString();
for (int i = 0; i < graph_view.num_nodes(); ++i) {
const int max_size = graph_view.GetFanin(i).size();
if (num_ready_inputs[i] != max_size) {
VLOG(1) << "Node not ready: " << graph.node(i).DebugString();
}
}
}
return errors::InvalidArgument(
"The graph couldn't be sorted in topological order.");
}
return absl::OkStatus();
}
}
Status ComputeTopologicalOrder(
const GraphDef& graph,
const absl::Span<const TopologicalDependency> extra_dependencies,
std::vector<const NodeDef*>* topo_order) {
std::vector<int> ready_nodes;
TF_RETURN_IF_ERROR(
ComputeTopologicalOrder(graph, extra_dependencies, &ready_nodes));
topo_order->reserve(ready_nodes.size());
for (int ready_node_idx : ready_nodes) {
topo_order->emplace_back(&graph.node(ready_node_idx));
}
return absl::OkStatus();
}
Status ComputeTopologicalOrder(const GraphDef& graph,
std::vector<const NodeDef*>* topo_order) {
return ComputeTopologicalOrder(graph, {}, topo_order);
}
Status ReversedTopologicalSort(GraphDef* graph) {
std::vector<int> ready_nodes;
TF_RETURN_IF_ERROR(ComputeTopologicalOrder(*graph, {}, &ready_nodes));
std::reverse(ready_nodes.begin(), ready_nodes.end());
PermuteNodesInPlace(graph, &ready_nodes, true);
return absl::OkStatus();
}
Status TopologicalSort(GraphDef* graph) {
std::vector<int> ready_nodes;
TF_RETURN_IF_ERROR(ComputeTopologicalOrder(*graph, {}, &ready_nodes));
PermuteNodesInPlace(graph, &ready_nodes, true);
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/utils/topological_sort.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/graph/benchmark_testlib.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace grappler {
class TopologicalSortTest : public ::testing::Test {
protected:
struct NodeConfig {
NodeConfig(string name, std::vector<string> inputs)
: name(std::move(name)), inputs(std::move(inputs)) {}
NodeConfig(string name, string op, std::vector<string> inputs)
: name(std::move(name)), op(std::move(op)), inputs(std::move(inputs)) {}
string name;
string op;
std::vector<string> inputs;
};
static GraphDef CreateGraph(const std::vector<NodeConfig>& nodes) {
GraphDef graph;
for (const NodeConfig& node : nodes) {
NodeDef node_def;
node_def.set_name(node.name);
node_def.set_op(node.op);
for (const string& input : node.inputs) {
node_def.add_input(input);
}
*graph.add_node() = std::move(node_def);
}
return graph;
}
};
TEST_F(TopologicalSortTest, NoLoop) {
GraphDef graph = CreateGraph({
{"2", {"5"}},
{"0", {"5", "4"}},
{"1", {"4", "3"}},
{"3", {"2"}},
{"5", {}},
{"4", {}}
});
std::vector<const NodeDef*> topo_order;
TF_EXPECT_OK(ComputeTopologicalOrder(graph, &topo_order));
const std::vector<string> order = {"5", "4", "2", "0", "3", "1"};
ASSERT_EQ(topo_order.size(), order.size());
for (int i = 0; i < topo_order.size(); ++i) {
const NodeDef* node = topo_order[i];
EXPECT_EQ(node->name(), order[i]);
}
TF_EXPECT_OK(TopologicalSort(&graph));
for (int i = 0; i < topo_order.size(); i++) {
EXPECT_EQ(graph.node(i).name(), order[i]);
}
}
TEST_F(TopologicalSortTest, WithLoop) {
GraphDef graph = CreateGraph({
{"2", "Merge", {"1", "5"}},
{"3", "Switch", {"2"}},
{"4", "Identity", {"3"}},
{"5", "NextIteration", {"4"}},
{"1", {}}
});
std::vector<const NodeDef*> topo_order;
TF_EXPECT_OK(ComputeTopologicalOrder(graph, &topo_order));
const std::vector<string> order = {"1", "2", "3", "4", "5"};
ASSERT_EQ(topo_order.size(), order.size());
for (int i = 0; i < topo_order.size(); ++i) {
const NodeDef* node = topo_order[i];
EXPECT_EQ(node->name(), order[i]);
}
TF_EXPECT_OK(TopologicalSort(&graph));
for (int i = 0; i < order.size(); i++) {
EXPECT_EQ(graph.node(i).name(), order[i]);
}
}
TEST_F(TopologicalSortTest, WithIllegalLoop) {
GraphDef graph = CreateGraph({
{"2", {"1", "3"}},
{"3", {"2"}},
{"1", {}}
});
EXPECT_FALSE(TopologicalSort(&graph).ok());
std::vector<string> order = {"2", "3", "1"};
for (int i = 0; i < order.size(); i++) {
EXPECT_EQ(graph.node(i).name(), order[i]);
}
}
TEST_F(TopologicalSortTest, DuplicatedInputs) {
GraphDef graph = CreateGraph({
{"2", {"1", "1"}},
{"1", {}}
});
TF_EXPECT_OK(TopologicalSort(&graph));
std::vector<string> order = {"1", "2"};
for (int i = 0; i < order.size(); i++) {
EXPECT_EQ(graph.node(i).name(), order[i]);
}
}
TEST_F(TopologicalSortTest, Idempotent) {
GraphDef graph = CreateGraph({
{"1", {}},
{"2", {}},
{"3", {"1", "2"}},
{"4", {"1", "3"}},
{"5", {"2", "3"}}
});
TF_EXPECT_OK(TopologicalSort(&graph));
std::vector<string> order = {"1", "2", "3", "4", "5"};
for (int i = 0; i < order.size(); i++) {
EXPECT_EQ(graph.node(i).name(), order[i]);
}
TF_EXPECT_OK(TopologicalSort(&graph));
for (int i = 0; i < order.size(); i++) {
EXPECT_EQ(graph.node(i).name(), order[i]);
}
}
TEST_F(TopologicalSortTest, ExtraDependencies) {
GraphDef graph = CreateGraph({
{"2", {"5"}},
{"0", {"5", "4"}},
{"1", {"4", "3"}},
{"3", {"2"}},
{"5", {}},
{"4", {}}
});
std::vector<TopologicalDependency> extra_dependencies;
extra_dependencies.push_back({&graph.node(5), &graph.node(4)});
std::vector<const NodeDef*> topo_order;
TF_EXPECT_OK(ComputeTopologicalOrder(graph, extra_dependencies, &topo_order));
const std::vector<string> valid_order_1 = {"4", "5", "2", "0", "3", "1"};
const std::vector<string> valid_order_2 = {"4", "5", "0", "2", "3", "1"};
ASSERT_EQ(topo_order.size(), valid_order_1.size());
std::vector<string> computed_order(6, "");
for (int i = 0; i < topo_order.size(); ++i) {
const NodeDef* node = topo_order[i];
computed_order[i] = node->name();
}
EXPECT_TRUE(computed_order == valid_order_1 ||
computed_order == valid_order_2);
extra_dependencies.push_back({&graph.node(1), &graph.node(5)});
EXPECT_FALSE(
ComputeTopologicalOrder(graph, extra_dependencies, &topo_order).ok());
}
static void BM_ComputeTopologicalOrder(::testing::benchmark::State& state) {
const int size = state.range(0);
GraphDef graph = test::CreateRandomGraph(size);
std::vector<const NodeDef*> topo_order;
for (auto s : state) {
topo_order.clear();
Status st = ComputeTopologicalOrder(graph, &topo_order);
CHECK(st.ok()) << "Failed to compute topological order";
}
}
BENCHMARK(BM_ComputeTopologicalOrder)
->Arg(10)
->Arg(100)
->Arg(1000)
->Arg(10000)
->Arg(25000)
->Arg(50000)
->Arg(100000);
}
} |
1,230 | cpp | tensorflow/tensorflow | error_util | third_party/xla/xla/mlir/utils/error_util.cc | third_party/xla/xla/mlir/utils/error_util_test.cc | #ifndef XLA_MLIR_UTILS_ERROR_UTIL_H_
#define XLA_MLIR_UTILS_ERROR_UTIL_H_
#include <string>
#include "absl/status/status.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
namespace mlir {
class BaseScopedDiagnosticHandler : public SourceMgrDiagnosticHandler {
public:
explicit BaseScopedDiagnosticHandler(MLIRContext* context,
bool propagate = false,
bool filter_stack = false);
~BaseScopedDiagnosticHandler();
bool ok() const;
absl::Status ConsumeStatus();
absl::Status Combine(absl::Status status);
protected:
LogicalResult handler(Diagnostic* diag);
std::string diag_str_;
llvm::raw_string_ostream diag_stream_;
llvm::SourceMgr source_mgr_;
bool propagate_;
};
}
#endif
#include "xla/mlir/utils/error_util.h"
#include <string>
#include <string_view>
#include "tsl/platform/errors.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/Diagnostics.h"
namespace mlir {
BaseScopedDiagnosticHandler::BaseScopedDiagnosticHandler(MLIRContext* context,
bool propagate,
bool filter_stack)
: SourceMgrDiagnosticHandler(source_mgr_, context, diag_stream_),
diag_stream_(diag_str_),
propagate_(propagate) {}
BaseScopedDiagnosticHandler::~BaseScopedDiagnosticHandler() {
bool all_errors_produced_were_consumed = ok();
DCHECK(all_errors_produced_were_consumed) << "Error status not consumed:\n"
<< diag_str_;
}
bool BaseScopedDiagnosticHandler::ok() const { return diag_str_.empty(); }
absl::Status BaseScopedDiagnosticHandler::ConsumeStatus() {
if (ok()) return absl::OkStatus();
absl::Status s = absl::UnknownError(diag_str_);
diag_str_.clear();
return s;
}
absl::Status BaseScopedDiagnosticHandler::Combine(absl::Status status) {
if (status.ok()) return ConsumeStatus();
if (ok()) return status;
std::string str_status_message(status.message());
status = absl::Status(status.code(), str_status_message + diag_str_);
diag_str_.clear();
return status;
}
LogicalResult BaseScopedDiagnosticHandler::handler(Diagnostic* diag) {
size_t current_diag_str_size_ = diag_str_.size();
emitDiagnostic(*diag);
diag_stream_.flush();
if (diag->getSeverity() != DiagnosticSeverity::Error) {
VLOG(1) << diag_str_.substr(current_diag_str_size_);
diag_str_.resize(current_diag_str_size_);
}
return failure(propagate_);
}
} | #include "xla/mlir/utils/error_util.h"
#include <string>
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "llvm/ADT/Twine.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/MLIRContext.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status.h"
namespace mlir {
namespace {
TEST(ErrorUtilTest, BaseScopedDiagnosticHandler) {
MLIRContext context;
auto id = StringAttr::get(&context, "
auto loc = FileLineColLoc::get(&context, id, 0, 0);
{
TF_EXPECT_OK(
BaseScopedDiagnosticHandler(&context).Combine(absl::OkStatus()));
}
{
BaseScopedDiagnosticHandler handler(&context);
emitError(loc) << "Diagnostic message";
ASSERT_TRUE(absl::IsUnknown(handler.ConsumeStatus()));
}
{
absl::Status err = absl::InternalError("Passed in error");
ASSERT_TRUE(
absl::IsInternal(BaseScopedDiagnosticHandler(&context).Combine(err)));
}
{
auto function = [&]() {
emitError(loc) << "Diagnostic message reported";
emitError(loc) << "Second diagnostic message reported";
return absl::InternalError("Passed in error");
};
BaseScopedDiagnosticHandler ssdh(&context);
absl::Status s = ssdh.Combine(function());
ASSERT_TRUE(absl::IsInternal(s));
EXPECT_TRUE(absl::StrContains(s.message(), "Passed in error"));
EXPECT_TRUE(absl::StrContains(s.message(), "Diagnostic message reported"));
EXPECT_TRUE(
absl::StrContains(s.message(), "Second diagnostic message reported"));
}
}
}
} |
1,231 | cpp | tensorflow/tensorflow | bridge_logger | tensorflow/compiler/mlir/tensorflow/utils/bridge_logger.cc | tensorflow/compiler/mlir/tensorflow/utils/bridge_logger_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TENSORFLOW_UTILS_BRIDGE_LOGGER_H_
#define TENSORFLOW_COMPILER_MLIR_TENSORFLOW_UTILS_BRIDGE_LOGGER_H_
#include <string>
#include <vector>
#include "mlir/IR/Operation.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/Timing.h"
namespace tensorflow {
class BridgeLoggerConfig : public mlir::PassManager::IRPrinterConfig {
public:
explicit BridgeLoggerConfig(
bool print_module_scope = false, bool print_after_only_on_change = true,
mlir::OpPrintingFlags op_printing_flags = mlir::OpPrintingFlags());
void printBeforeIfEnabled(mlir::Pass* pass, mlir::Operation* op,
PrintCallbackFn print_callback) override;
void printAfterIfEnabled(mlir::Pass* pass, mlir::Operation* op,
PrintCallbackFn print_callback) override;
bool ShouldPrint(mlir::Pass* pass, mlir::Operation* op);
private:
static std::vector<std::string> GetFilter(const std::string& env_var);
static bool MatchesFilter(const std::string& str,
const std::vector<std::string>& filter,
bool exact_match);
const std::vector<std::string> pass_filter_;
const std::vector<std::string> string_filter_;
};
}
#endif
#include "tensorflow/compiler/mlir/tensorflow/utils/bridge_logger.h"
#include <atomic>
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/str_split.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/FormatVariadic.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/Pass/Pass.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
namespace tensorflow {
static std::atomic<int> log_counter(0);
BridgeLoggerConfig::BridgeLoggerConfig(bool print_module_scope,
bool print_after_only_on_change,
mlir::OpPrintingFlags op_printing_flags)
: mlir::PassManager::IRPrinterConfig(
print_module_scope, print_after_only_on_change,
false, op_printing_flags),
pass_filter_(GetFilter("MLIR_BRIDGE_LOG_PASS_FILTER")),
string_filter_(GetFilter("MLIR_BRIDGE_LOG_STRING_FILTER")) {}
inline static void Log(BridgeLoggerConfig::PrintCallbackFn print_callback,
mlir::Pass* pass, mlir::Operation* op,
llvm::StringRef file_suffix) {
std::string pass_name = pass->getName().str();
std::string name = llvm::formatv("{0,0+4}_mlir_bridge_{1}_{2}", log_counter++,
pass_name, file_suffix);
std::unique_ptr<llvm::raw_ostream> os;
std::string filepath;
if (CreateFileForDumping(name, &os, &filepath).ok()) {
print_callback(*os);
LOG(INFO) << "Dumped MLIR module to " << filepath;
}
}
void BridgeLoggerConfig::printBeforeIfEnabled(mlir::Pass* pass,
mlir::Operation* op,
PrintCallbackFn print_callback) {
if (ShouldPrint(pass, op)) Log(print_callback, pass, op, "before");
}
void BridgeLoggerConfig::printAfterIfEnabled(mlir::Pass* pass,
mlir::Operation* op,
PrintCallbackFn print_callback) {
if (ShouldPrint(pass, op)) Log(print_callback, pass, op, "after");
}
std::vector<std::string> BridgeLoggerConfig::GetFilter(
const std::string& env_var) {
std::vector<std::string> filter;
const char* filter_str = getenv(env_var.c_str());
if (filter_str) {
filter = absl::StrSplit(filter_str, ';', absl::SkipWhitespace());
}
return filter;
}
bool BridgeLoggerConfig::MatchesFilter(const std::string& str,
const std::vector<std::string>& filter,
bool exact_match) {
if (filter.empty()) return true;
for (const std::string& filter_str : filter) {
if (str == filter_str) return true;
if (!exact_match && str.find(filter_str) != std::string::npos) return true;
}
return false;
}
bool BridgeLoggerConfig::ShouldPrint(mlir::Pass* pass, mlir::Operation* op) {
std::string pass_name = pass->getName().str();
if (!MatchesFilter(pass_name, pass_filter_, true)) {
VLOG(1) << "Not logging invocation of pass `" << pass_name
<< "` because the pass name does not match any string in "
"`MLIR_BRIDGE_LOG_PASS_FILTER`";
return false;
}
if (!string_filter_.empty()) {
std::string serialized_op;
llvm::raw_string_ostream os(serialized_op);
op->print(os);
if (!MatchesFilter(serialized_op, string_filter_, false)) {
VLOG(1) << "Not logging invocation of pass `" << pass_name
<< "` because the serialized operation on which the pass is "
"invoked does not contain any of the strings specified by "
"MLIR_BRIDGE_LOG_STRING_FILTER";
return false;
}
}
return true;
}
} | #include "tensorflow/compiler/mlir/tensorflow/utils/bridge_logger.h"
#include <memory>
#include "mlir/IR/MLIRContext.h"
#include "mlir/Transforms/Passes.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
static const char *const module_with_add =
R"(module {
func.func @main(%arg0: tensor<3x4x5xf32>, %arg1: tensor<3x4x5xf32>) -> tensor<3x4x5xf32> {
%0 = "tf.AddV2"(%arg0, %arg1) : (tensor<3x4x5xf32>, tensor<3x4x5xf32>) -> tensor<3x4x5xf32>
func.return %0 : tensor<3x4x5xf32>
}
}
)";
static const char *const module_with_sub =
R"(module {
func.func @main(%arg0: tensor<7x8x9xi8>, %arg1: tensor<7x8x9xi8>) -> tensor<7x8x9xi8> {
%0 = "tf.Sub"(%arg0, %arg1) : (tensor<7x8x9xi8>, tensor<7x8x9xi8>) -> tensor<7x8x9xi8>
func.return %0 : tensor<7x8x9xi8>
}
}
)";
TEST(BridgeLoggerFilters, TestPassFilter) {
mlir::DialectRegistry mlir_registry;
mlir::RegisterAllTensorFlowDialects(mlir_registry);
mlir::MLIRContext mlir_context(mlir_registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module_with_add;
TF_ASSERT_OK(DeserializeMlirModule(module_with_add, &mlir_context,
&mlir_module_with_add));
std::unique_ptr<mlir::Pass> partitioning_pass =
mlir::TFTPU::CreateTPUResourceReadsWritesPartitioningPass();
std::unique_ptr<mlir::Pass> shape_inference_pass =
mlir::TF::CreateTFShapeInferencePass();
std::unique_ptr<mlir::Pass> inliner_pass = mlir::createInlinerPass();
setenv("MLIR_BRIDGE_LOG_PASS_FILTER",
"TPUResourceReadsWritesPartitioningPass;TensorFlowShapeInferencePass",
1);
BridgeLoggerConfig logger_config;
EXPECT_TRUE(logger_config.ShouldPrint(partitioning_pass.get(),
mlir_module_with_add.get()));
EXPECT_TRUE(logger_config.ShouldPrint(shape_inference_pass.get(),
mlir_module_with_add.get()));
EXPECT_FALSE(logger_config.ShouldPrint(inliner_pass.get(),
mlir_module_with_add.get()));
}
TEST(BridgeLoggerFilters, TestStringFilter) {
mlir::DialectRegistry mlir_registry;
mlir::RegisterAllTensorFlowDialects(mlir_registry);
mlir::MLIRContext mlir_context(mlir_registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module_with_add, mlir_module_with_sub;
TF_ASSERT_OK(DeserializeMlirModule(module_with_add, &mlir_context,
&mlir_module_with_add));
TF_ASSERT_OK(DeserializeMlirModule(module_with_sub, &mlir_context,
&mlir_module_with_sub));
std::unique_ptr<mlir::Pass> dummy_pass =
mlir::TF::CreateTFShapeInferencePass();
setenv("MLIR_BRIDGE_LOG_STRING_FILTER", "func @main(%arg0: tensor;XXX", 1);
BridgeLoggerConfig logger_config1;
EXPECT_TRUE(
logger_config1.ShouldPrint(dummy_pass.get(), mlir_module_with_add.get()));
EXPECT_TRUE(
logger_config1.ShouldPrint(dummy_pass.get(), mlir_module_with_sub.get()));
setenv("MLIR_BRIDGE_LOG_STRING_FILTER", "func @main(%arg0:tensor;XXX", 1);
BridgeLoggerConfig logger_config2;
EXPECT_FALSE(
logger_config2.ShouldPrint(dummy_pass.get(), mlir_module_with_add.get()));
EXPECT_FALSE(
logger_config2.ShouldPrint(dummy_pass.get(), mlir_module_with_sub.get()));
setenv("MLIR_BRIDGE_LOG_STRING_FILTER",
"\"tf.AddV2\"(%arg0, %arg1) : (tensor<3x4x5xf32>", 1);
BridgeLoggerConfig logger_config3;
EXPECT_TRUE(
logger_config3.ShouldPrint(dummy_pass.get(), mlir_module_with_add.get()));
EXPECT_FALSE(
logger_config3.ShouldPrint(dummy_pass.get(), mlir_module_with_sub.get()));
}
TEST(BridgeLoggerFilters, TestBothFilters) {
mlir::DialectRegistry mlir_registry;
mlir::RegisterAllTensorFlowDialects(mlir_registry);
mlir::MLIRContext mlir_context(mlir_registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module_with_add;
TF_ASSERT_OK(DeserializeMlirModule(module_with_add, &mlir_context,
&mlir_module_with_add));
std::unique_ptr<mlir::Pass> shape_inference_pass =
mlir::TF::CreateTFShapeInferencePass();
setenv("MLIR_BRIDGE_LOG_STRING_FILTER",
"(tensor<3x4x5xf32>, tensor<3x4x5xf32>) -> tensor<3x4x5xf32>", 1);
setenv("MLIR_BRIDGE_LOG_PASS_FILTER", "ensorFlowShapeInferencePass", 1);
BridgeLoggerConfig logger_config1;
EXPECT_FALSE(logger_config1.ShouldPrint(shape_inference_pass.get(),
mlir_module_with_add.get()));
setenv("MLIR_BRIDGE_LOG_STRING_FILTER", "XXX", 1);
setenv("MLIR_BRIDGE_LOG_PASS_FILTER", "TensorFlowShapeInferencePass", 1);
BridgeLoggerConfig logger_config2;
EXPECT_FALSE(logger_config2.ShouldPrint(shape_inference_pass.get(),
mlir_module_with_add.get()));
setenv("MLIR_BRIDGE_LOG_STRING_FILTER",
"(tensor<3x4x5xf32>, tensor<3x4x5xf32>) -> tensor<3x4x5xf32>", 1);
setenv("MLIR_BRIDGE_LOG_PASS_FILTER", "TensorFlowShapeInferencePass", 1);
BridgeLoggerConfig logger_config3;
EXPECT_TRUE(logger_config3.ShouldPrint(shape_inference_pass.get(),
mlir_module_with_add.get()));
}
}
} |
1,232 | cpp | tensorflow/tensorflow | convert_tensor | tensorflow/core/ir/importexport/convert_tensor.cc | tensorflow/compiler/mlir/tensorflow/utils/convert_tensor_test.cc | #ifndef TENSORFLOW_CORE_IR_IMPORTEXPORT_CONVERT_TENSOR_H_
#define TENSORFLOW_CORE_IR_IMPORTEXPORT_CONVERT_TENSOR_H_
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/types/dialect.h"
#include "tensorflow/core/platform/statusor.h"
namespace mlir {
namespace tfg {
absl::StatusOr<ElementsAttr> ConvertTensorProto(
const tensorflow::TensorProto& input_tensor, Builder builder);
absl::StatusOr<ElementsAttr> ConvertTensor(
const tensorflow::Tensor& input_tensor, Builder builder);
void ConvertToTensorShapeProto(ArrayRef<int64_t> shape,
tensorflow::TensorShapeProto* output_shape);
tensorflow::PartialTensorShape ConvertTypeToTensorShape(const Type& type);
absl::StatusOr<ShapeAttr> ConvertTensorShapeProto(
const tensorflow::TensorShapeProto& shape, MLIRContext* context);
template <typename ShapeContainerT>
void SetTensorShapeProto(ShapeContainerT shape,
tensorflow::TensorShapeProto* proto) {
if (shape.hasRank()) {
for (int64_t dim : shape.getShape()) {
proto->add_dim()->set_size(mlir::ShapedType::isDynamic(dim) ? -1 : dim);
}
} else {
proto->set_unknown_rank(true);
}
}
tensorflow::Status ConvertToTensorProto(ElementsAttr attr,
tensorflow::TensorProto* output_tensor);
tensorflow::Status ConvertToTensor(ElementsAttr attr,
tensorflow::Tensor* output_tensor);
llvm::SmallVector<int64_t> ConvertTFShapeToMlir(llvm::ArrayRef<int64_t> shape);
llvm::SmallVector<int64_t> ConvertMlirShapeToTF(llvm::ArrayRef<int64_t> shape);
mlir::RankedTensorType GetTypeFromTFTensorShape(llvm::ArrayRef<int64_t> shape,
mlir::Type elementType,
mlir::Attribute encoding = {});
}
}
#endif
#include "tensorflow/core/ir/importexport/convert_tensor.h"
#include <optional>
#include <string>
#include <vector>
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/importexport/convert_types.h"
#include "tensorflow/core/ir/importexport/mangling.h"
#include "tensorflow/core/ir/types/dialect.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/bfloat16.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/tstring.h"
#include "tsl/platform/ml_dtypes.h"
namespace mlir {
namespace tfg {
using tensorflow::bfloat16;
using tensorflow::PartialTensorShape;
using tensorflow::Status;
using tensorflow::Tensor;
using tensorflow::TensorProto;
using tensorflow::TensorShape;
using tensorflow::TensorShapeProto;
using tensorflow::tstring;
using tensorflow::errors::InvalidArgument;
using tensorflow::errors::Unimplemented;
using tensorflow::port::CopyFromArray;
using tensorflow::protobuf::RepeatedField;
using tensorflow::protobuf::RepeatedPtrField;
static TensorProto ConvertToProto(const Tensor& input_tensor,
bool use_tensor_content = true) {
TensorProto tensor_proto;
if (use_tensor_content)
input_tensor.AsProtoTensorContent(&tensor_proto);
else
input_tensor.AsProtoField(&tensor_proto);
return tensor_proto;
}
static std::string MangleTensor(const Tensor& tensor) {
return mangling_util::MangleTensor(ConvertToProto(tensor));
}
template <typename T>
absl::StatusOr<ElementsAttr> ConvertFlatTensor(const Tensor& input_tensor,
ShapedType type) {
auto arr = input_tensor.flat<T>();
return ElementsAttr(
DenseElementsAttr::get(type, llvm::ArrayRef(arr.data(), arr.size())));
}
ElementsAttr ConvertBf16Tensor(const Tensor& input_tensor,
RankedTensorType type) {
auto buffer = llvm::ArrayRef(static_cast<char*>(input_tensor.data()),
input_tensor.TotalBytes());
return DenseElementsAttr::getFromRawBuffer(type, buffer);
}
ElementsAttr ConvertHalfTensor(const Tensor& tensor, RankedTensorType type) {
auto buffer =
llvm::ArrayRef(static_cast<char*>(tensor.data()), tensor.TotalBytes());
return DenseElementsAttr::getFromRawBuffer(type, buffer);
}
absl::StatusOr<ElementsAttr> ConvertStringTensor(const Tensor& input_tensor,
ShapedType type) {
auto arr = input_tensor.flat<tstring>();
std::vector<StringRef> string_refs;
string_refs.reserve(arr.size());
for (int i = 0; i < arr.size(); i++) {
const auto& val = arr(i);
string_refs.push_back({val.data(), val.size()});
}
return ElementsAttr(DenseStringElementsAttr::get(type, string_refs));
}
absl::StatusOr<ElementsAttr> ConvertTensor(const Tensor& input_tensor,
Builder builder) {
const auto& input_dtype = input_tensor.dtype();
const auto& input_shape = input_tensor.shape();
Type elt_type;
TF_RETURN_IF_ERROR(ConvertDataType(input_dtype, builder, &elt_type));
SmallVector<int64_t, 4> shape;
ConvertToMlirShape(input_shape, &shape);
auto type = RankedTensorType::get(shape, elt_type);
#define CONVERT_FLAT(DTYPE, CTYPE) \
case tensorflow::DTYPE: \
return ConvertFlatTensor<CTYPE>(input_tensor, type);
switch (input_dtype) {
CONVERT_FLAT(DT_BOOL, bool)
CONVERT_FLAT(DT_FLOAT, float)
CONVERT_FLAT(DT_DOUBLE, double)
CONVERT_FLAT(DT_INT8, int8_t)
CONVERT_FLAT(DT_INT16, int16_t)
CONVERT_FLAT(DT_INT32, int32_t)
CONVERT_FLAT(DT_INT64, int64_t)
CONVERT_FLAT(DT_UINT8, uint8_t)
CONVERT_FLAT(DT_UINT16, uint16_t)
CONVERT_FLAT(DT_UINT32, uint32_t)
CONVERT_FLAT(DT_UINT64, uint64_t)
CONVERT_FLAT(DT_COMPLEX64, std::complex<float>)
CONVERT_FLAT(DT_COMPLEX128, std::complex<double>)
case tensorflow::DT_BFLOAT16:
return ConvertBf16Tensor(input_tensor, type);
case tensorflow::DT_HALF:
return ConvertHalfTensor(input_tensor, type);
case tensorflow::DT_STRING:
return ConvertStringTensor(input_tensor, type);
default:
return ElementsAttr(
tf_type::TensorProtoAttr::get(type, MangleTensor(input_tensor)));
}
#undef CONVERT_FLAT
}
static int NumberOfMaterializedElements(const TensorProto& tensor) {
if (!tensor.tensor_content().empty()) return -1;
#define MATCH(DTYPE, FIELD) \
case tensorflow::DTYPE: \
return tensor.FIELD##_val().size()
switch (tensor.dtype()) {
MATCH(DT_FLOAT, float);
MATCH(DT_DOUBLE, double);
MATCH(DT_INT8, int);
MATCH(DT_UINT8, int);
MATCH(DT_INT16, int);
MATCH(DT_UINT16, int);
MATCH(DT_INT32, int);
MATCH(DT_UINT32, uint32);
MATCH(DT_INT64, int64);
MATCH(DT_UINT64, uint64);
MATCH(DT_BOOL, bool);
MATCH(DT_HALF, half);
MATCH(DT_BFLOAT16, half);
MATCH(DT_STRING, string);
case tensorflow::DT_COMPLEX64:
case tensorflow::DT_COMPLEX128:
default:
return -1;
}
}
absl::StatusOr<ElementsAttr> ConvertTensorProto(const TensorProto& input_tensor,
Builder builder) {
TensorShape input_tensor_shape(input_tensor.tensor_shape());
int num_elt = NumberOfMaterializedElements(input_tensor);
if ((num_elt == 1 ||
(num_elt == 0 && input_tensor.tensor_content().empty())) &&
input_tensor_shape.num_elements() > 1) {
TensorProto tensor_copy = input_tensor;
auto* shape = tensor_copy.mutable_tensor_shape();
shape->clear_dim();
shape->add_dim()->set_size(1);
TF_ASSIGN_OR_RETURN(ElementsAttr single_attr,
ConvertTensorProto(tensor_copy, builder));
std::vector<int64_t> original_dimensions;
for (auto dim : input_tensor_shape) original_dimensions.push_back(dim.size);
return ElementsAttr(SplatElementsAttr::get(
single_attr.getShapedType().clone(original_dimensions),
single_attr.getValues<Attribute>()[0]));
}
Tensor t;
if (!t.FromProto(input_tensor)) {
return InvalidArgument("Failed to parse input_tensor: ",
input_tensor.DebugString());
}
return ConvertTensor(t, builder);
}
void ConvertToTensorShapeProto(ArrayRef<int64_t> shape,
TensorShapeProto* output_shape) {
for (auto d : shape) {
output_shape->add_dim()->set_size(d);
}
}
PartialTensorShape ConvertTypeToTensorShape(const Type& type) {
if (mlir::isa<UnrankedTensorType>(type)) {
return PartialTensorShape();
}
if (auto tensor_type = mlir::dyn_cast<RankedTensorType>(type)) {
TensorShapeProto tensor_shape_proto;
ConvertToTensorShapeProto(ConvertMlirShapeToTF(tensor_type.getShape()),
&tensor_shape_proto);
return PartialTensorShape(tensor_shape_proto);
}
return TensorShape();
}
ShapeAttr ConvertTypeToTensorShapeAttr(const Type& type) {
if (mlir::isa<UnrankedTensorType>(type)) {
return ShapeAttr::get(type.getContext(), std::nullopt);
}
if (auto tensor_type = mlir::dyn_cast<RankedTensorType>(type)) {
return ShapeAttr::get(
type.getContext(),
llvm::ArrayRef(ConvertMlirShapeToTF(tensor_type.getShape())));
}
return ShapeAttr::get(type.getContext(), ArrayRef<int64_t>());
}
absl::StatusOr<ShapeAttr> ConvertTensorShapeProto(const TensorShapeProto& shape,
MLIRContext* context) {
if (shape.unknown_rank()) return ShapeAttr::get(context, std::nullopt);
SmallVector<int64_t, 4> dims;
dims.reserve(shape.dim_size());
for (const auto& dim : shape.dim()) {
dims.push_back(dim.size());
}
return ShapeAttr::get(context, llvm::ArrayRef(dims));
}
void ConvertStringElementsAttr(const DenseStringElementsAttr attr,
RepeatedPtrField<std::string>* output) {
for (const auto& val : attr.getRawStringData())
output->Add({val.data(), val.size()});
}
template <typename T>
void ConvertComplexElementsAttr(const DenseElementsAttr attr,
RepeatedField<T>* output) {
for (const auto& val : attr.getValues<std::complex<T>>()) {
output->Add(val.real());
output->Add(val.imag());
}
}
Status ConvertTensorProtoAttr(const mlir::tf_type::TensorProtoAttr attr,
TensorProto* output_tensor) {
auto mangled_tensor = attr.getValue();
absl::string_view tensor_view(mangled_tensor.data(), mangled_tensor.size());
return mangling_util::DemangleTensor(tensor_view, output_tensor);
}
template <typename T>
void ConvertElementsAttr(const DenseElementsAttr attr,
RepeatedField<T>* output) {
if (attr.isSplat()) {
if (attr.getSplatValue<T>() != T(0)) output->Add(attr.getSplatValue<T>());
} else {
output->Reserve(attr.getNumElements());
for (auto value : attr.getValues<T>()) output->AddAlreadyReserved(value);
}
}
template <typename T, typename Cord>
void ConvertFloatElementsAttr(const DenseElementsAttr attr,
RepeatedField<T>* output, Cord* tensor_content) {
if (attr.isSplat()) {
auto value = attr.getSplatValue<T>();
if (value != T(0) || std::signbit(value))
output->Add(attr.getSplatValue<T>());
} else {
CopyFromArray(tensor_content, attr.getRawData().data(),
attr.getRawData().size());
}
}
void ConvertHalfElementsAttr(const DenseElementsAttr attr,
RepeatedField<int>* output) {
if (attr.isSplat()) {
uint16_t bits =
Eigen::numext::bit_cast<uint16_t>(attr.getSplatValue<Eigen::half>());
if (bits != 0) {
output->Add(bits);
}
} else {
output->Reserve(attr.getNumElements());
for (const Eigen::half value : attr.getValues<Eigen::half>()) {
output->AddAlreadyReserved(Eigen::numext::bit_cast<uint16_t>(value));
}
}
}
template <typename T, typename U = T, typename Cord>
void ConvertIntElementsAttr(const DenseElementsAttr attr,
RepeatedField<T>* output, Cord* tensor_content) {
if (attr.isSplat()) {
if (attr.getSplatValue<U>() != U(0))
output->Add(static_cast<T>(attr.getSplatValue<U>()));
} else {
CopyFromArray(tensor_content, attr.getRawData().data(),
attr.getRawData().size());
}
}
template <typename T, typename U = T, typename Cord>
void ConvertUIntElementsAttr(const DenseElementsAttr attr,
RepeatedField<T>* output, Cord* tensor_content) {
if (attr.isSplat()) {
if (attr.getSplatValue<U>() != U(0))
output->Add(static_cast<T>(attr.getSplatValue<U>()));
} else {
CopyFromArray(tensor_content, attr.getRawData().data(),
attr.getRawData().size());
}
}
void ConvertBfloat16ElementsAttr(const DenseElementsAttr attr,
RepeatedField<int>* output) {
if (attr.isSplat()) {
uint16_t bits =
Eigen::numext::bit_cast<uint16_t>(attr.getSplatValue<bfloat16>());
if (bits != 0) {
output->Add(bits);
}
} else {
output->Reserve(attr.getNumElements());
for (const bfloat16 value : attr.getValues<bfloat16>()) {
output->AddAlreadyReserved(Eigen::numext::bit_cast<uint16_t>(value));
}
}
}
template <typename T>
void ConvertFloat8ElementsAttr(const DenseElementsAttr attr,
std::string* output) {
if (attr.isSplat()) {
uint8_t bits = Eigen::numext::bit_cast<uint8_t>(attr.getSplatValue<T>());
if (bits != 0) {
output->push_back(bits);
}
} else {
output->reserve(attr.getNumElements());
for (const T value : attr.getValues<T>()) {
output->push_back(Eigen::numext::bit_cast<uint8_t>(value));
}
}
}
Status ConvertToTensorProto(const ElementsAttr attr, TensorProto* output) {
auto type = attr.getShapedType();
auto shape = type.getShape();
tensorflow::DataType output_dtype;
TF_RETURN_IF_ERROR(ConvertToDataType(type, &output_dtype));
output->set_dtype(output_dtype);
ConvertToTensorShapeProto(shape, output->mutable_tensor_shape());
if (auto tensor_attr = mlir::dyn_cast<mlir::tf_type::TensorProtoAttr>(attr))
return ConvertTensorProtoAttr(tensor_attr, output);
auto dense_attr = mlir::dyn_cast<DenseElementsAttr>(attr);
if (!dense_attr) return InvalidArgument("Unsupported elements attr");
switch (output_dtype) {
case tensorflow::DT_BOOL:
ConvertElementsAttr(dense_attr, output->mutable_bool_val());
break;
case tensorflow::DT_BFLOAT16:
ConvertBfloat16ElementsAttr(dense_attr, output->mutable_half_val());
break;
case tensorflow::DT_COMPLEX64:
ConvertComplexElementsAttr(dense_attr, output->mutable_scomplex_val());
break;
case tensorflow::DT_COMPLEX128:
ConvertComplexElementsAttr(dense_attr, output->mutable_dcomplex_val());
break;
case tensorflow::DT_DOUBLE:
ConvertFloatElementsAttr(dense_attr, output->mutable_double_val(),
output->mutable_tensor_content());
break;
case tensorflow::DT_HALF:
ConvertHalfElementsAttr(dense_attr, output->mutable_half_val());
break;
case tensorflow::DT_FLOAT:
ConvertFloatElementsAttr(dense_attr, output->mutable_float_val(),
output->mutable_tensor_content());
break;
case tensorflow::DT_FLOAT8_E5M2:
ConvertFloat8ElementsAttr<tsl::float8_e5m2>(dense_attr,
output->mutable_float8_val());
break;
case tensorflow::DT_FLOAT8_E4M3FN:
ConvertFloat8ElementsAttr<tsl::float8_e4m3fn>(
dense_attr, output->mutable_float8_val());
break;
case tensorflow::DT_INT4:
ConvertIntElementsAttr<int, tsl::int4>(dense_attr,
output->mutable_int_val(),
output->mutable_tensor_content());
break;
case tensorflow::DT_UINT4:
ConvertUIntElementsAttr<int, tsl::uint4>(
dense_attr, output->mutable_int_val(),
output->mutable_tensor_content());
break;
case tensorflow::DT_QUINT8:
case tensorflow::DT_INT8:
ConvertUIntElementsAttr<int, int8_t>(dense_attr,
output->mutable_int_val(),
output->mutable_tensor_content());
break;
case tensorflow::DT_QUINT16:
case tensorflow::DT_INT16:
ConvertIntElementsAttr<int, int16_t>(dense_attr,
output->mutable_int_val(),
output->mutable_tensor_content());
break;
case tensorflow::DT_INT32:
ConvertIntElementsAttr(dense_attr, output->mutable_int_val(),
output->mutable_tensor_content());
break;
case tensorflow::DT_INT64:
ConvertIntElementsAttr(dense_attr, output->mutable_int64_val(),
output->mutable_tensor_content());
break;
case tensorflow::DT_STRING:
ConvertStringElementsAttr(mlir::cast<DenseStringElementsAttr>(dense_attr),
output->mutable_string_val());
break;
case tensorflow::DT_UINT8:
ConvertUIntElementsAttr<int, uint8_t>(dense_attr,
output->mutable_int_val(),
output->mutable_tensor_content());
break;
case tensorflow::DT_UINT16:
ConvertUIntElementsAttr<int, uint16_t>(dense_attr,
output->mutable_int_val(),
output->mutable_tensor_content());
break;
case tensorflow::DT_UINT32:
ConvertUIntElementsAttr(dense_attr, output->mutable_uint32_val(),
output->mutable_tensor_content());
break;
case tensorflow::DT_UINT64:
ConvertUIntElementsAttr(dense_attr, output->mutable_uint64_val(),
output->mutable_tensor_content());
break;
default:
return Unimplemented(absl::StrCat("Unimplemented data type ",
DataTypeString(output_dtype)));
}
return absl::OkStatus();
}
Status ConvertToTensor(const ElementsAttr attr, Tensor* output_tensor) {
TensorProto tensor_proto;
TF_RETURN_IF_ERROR(ConvertToTensorProto(attr, &tensor_proto));
if (!output_tensor->FromProto(tensor_proto)) {
return InvalidArgument("Couldn't convert tensor proto to tensor.");
}
return absl::OkStatus();
}
llvm::SmallVector<int64_t> ConvertMlirShapeToTF(llvm::ArrayRef<int64_t> shape) {
return llvm::to_vector(llvm::map_range(shape, [](int64_t dim) {
return mlir::ShapedType::isDynamic(dim) ? -1 : dim;
}));
}
llvm::SmallVector<int64_t> ConvertTFShapeToMlir(llvm::ArrayRef<int64_t> shape) {
return llvm::to_vector(llvm::map_range(shape, [](int64_t dim) {
return dim == -1 ? mlir::ShapedType::kDynamic : dim;
}));
}
mlir::RankedTensorType GetTypeFromTFTensorShape(llvm::ArrayRef<int64_t> shape,
mlir::Type elementType,
mlir::Attribute encoding) {
return mlir::RankedTensorType::get(ConvertTFShapeToMlir(shape), elementType,
encoding);
}
}
} | #include "tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.h"
#include <cstring>
#include <initializer_list>
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Dialect.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dynamic_shape_utils.h"
#include "xla/test.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/ml_dtypes.h"
namespace tensorflow {
namespace {
using ::testing::Eq;
using ::testing::IsFalse;
using ::testing::IsTrue;
static void RegisterDialects(mlir::MLIRContext &context) {
context.loadDialect<mlir::TF::TensorFlowDialect>();
}
TEST(ConvertTypeToTensorTypeTest, UnrankedTensorType) {
mlir::MLIRContext context;
RegisterDialects(context);
mlir::Builder b(&context);
PartialTensorShape output_shape =
ConvertTypeToTensorShape(mlir::UnrankedTensorType::get(b.getF32Type()));
EXPECT_TRUE(output_shape.IsIdenticalTo(PartialTensorShape()));
}
TEST(ConvertTypeToTensorTypeTest, NonFullyDefinedRankedTensorType) {
mlir::MLIRContext context;
RegisterDialects(context);
mlir::Builder b(&context);
PartialTensorShape output_shape = ConvertTypeToTensorShape(
GetTypeFromTFTensorShape({-1, 2, 3}, b.getF32Type()));
EXPECT_TRUE(output_shape.IsIdenticalTo(PartialTensorShape({-1, 2, 3})));
}
TEST(ConvertTypeToTensorTypeTest, FullyDefinedRankedTensorType) {
mlir::MLIRContext context;
RegisterDialects(context);
mlir::Builder b(&context);
PartialTensorShape output_shape = ConvertTypeToTensorShape(
mlir::RankedTensorType::get({1, 2, 3}, b.getF32Type()));
EXPECT_TRUE(output_shape.IsIdenticalTo(PartialTensorShape({1, 2, 3})));
}
TEST(ConvertTypeToTensorTypeTest, ScalarTensorType) {
mlir::MLIRContext context;
mlir::Builder b(&context);
PartialTensorShape output_shape = ConvertTypeToTensorShape(b.getF32Type());
EXPECT_TRUE(output_shape.IsIdenticalTo(TensorShape()));
}
TEST(ConvertTypeToTensorTypeTest, ConvertStringTensor) {
mlir::MLIRContext context;
RegisterDialects(context);
mlir::Builder b(&context);
Tensor tensor(DT_STRING, TensorShape({1, 2, 2, 1}));
EXPECT_EQ(4, tensor.NumElements());
auto Tt = tensor.flat<tstring>();
Tt.setValues({"one", "two", "three", "four"});
auto value_or_status = ConvertTensor(tensor, &b);
ASSERT_TRUE(value_or_status.ok());
auto attr = value_or_status.value();
EXPECT_TRUE(mlir::isa<mlir::DenseStringElementsAttr>(attr));
auto string_attr = mlir::cast<mlir::DenseStringElementsAttr>(attr);
auto string_values = string_attr.getRawStringData();
ASSERT_EQ(string_values.size(), 4);
EXPECT_EQ(string_values[0], mlir::StringRef("one"));
EXPECT_EQ(string_values[1], mlir::StringRef("two"));
EXPECT_EQ(string_values[2], mlir::StringRef("three"));
EXPECT_EQ(string_values[3], mlir::StringRef("four"));
}
class ConvertTensorTest : public ::testing::Test {
protected:
template <typename T>
void VerifyConversion(std::initializer_list<T> values, DataType dtype,
mlir::Type expected_ty) {
mlir::Builder b(expected_ty.getContext());
Tensor tensor(dtype, TensorShape({static_cast<int64_t>(values.size())}));
tensor.flat<T>().setValues(values);
auto value_or = ConvertTensor(tensor, &b);
TF_ASSERT_OK(value_or.status());
auto attr = value_or.value();
EXPECT_EQ(attr.getShapedType().getElementType(), expected_ty);
Tensor out;
TF_ASSERT_OK(ConvertToTensor(attr, &out));
test::ExpectTensorEqual<T>(tensor, out);
}
};
TEST_F(ConvertTensorTest, Simple) {
mlir::MLIRContext context;
RegisterDialects(context);
ASSERT_NO_FATAL_FAILURE(VerifyConversion<Eigen::half>(
{Eigen::half(1.0)}, DT_HALF, mlir::FloatType::getF16(&context)));
ASSERT_NO_FATAL_FAILURE(
VerifyConversion<bfloat16>({bfloat16(1.0), bfloat16(-1.0)}, DT_BFLOAT16,
mlir::FloatType::getBF16(&context)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<float>(
{1.0, -1.0}, DT_FLOAT, mlir::FloatType::getF32(&context)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<double>(
{1.0, -1.0}, DT_DOUBLE, mlir::FloatType::getF64(&context)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<tsl::float8_e5m2>(
{tsl::float8_e5m2{1.0}, tsl::float8_e5m2{-1.0}}, DT_FLOAT8_E5M2,
mlir::FloatType::getFloat8E5M2(&context)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<tsl::float8_e4m3fn>(
{tsl::float8_e4m3fn{1.0}, tsl::float8_e4m3fn{-1.0}}, DT_FLOAT8_E4M3FN,
mlir::FloatType::getFloat8E4M3FN(&context)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<int4>(
{static_cast<int4>(1), static_cast<int4>(-1)}, DT_INT4,
mlir::IntegerType::get(&context, 4,
mlir::IntegerType::SignednessSemantics::Signed)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<int8>(
{1, -1}, DT_INT8, mlir::IntegerType::get(&context, 8)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<int16>(
{1, -1}, DT_INT16, mlir::IntegerType::get(&context, 16)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<int32>(
{1, -1}, DT_INT32, mlir::IntegerType::get(&context, 32)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<int64_t>(
{1, -1}, DT_INT64, mlir::IntegerType::get(&context, 64)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<uint4>(
{static_cast<uint4>(1), static_cast<uint4>(2)}, DT_UINT4,
mlir::IntegerType::get(
&context, 4, mlir::IntegerType::SignednessSemantics::Unsigned)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<uint8>(
{1, 2}, DT_UINT8,
mlir::IntegerType::get(
&context, 8, mlir::IntegerType::SignednessSemantics::Unsigned)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<uint16>(
{1, 2}, DT_UINT16,
mlir::IntegerType::get(
&context, 16, mlir::IntegerType::SignednessSemantics::Unsigned)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<uint32>(
{1, 2}, DT_UINT32,
mlir::IntegerType::get(
&context, 32, mlir::IntegerType::SignednessSemantics::Unsigned)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<uint64>(
{1, 2}, DT_UINT64,
mlir::IntegerType::get(
&context, 64, mlir::IntegerType::SignednessSemantics::Unsigned)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<std::complex<float>>(
{{0.0, 1.0}, {1.0, 0.0}}, DT_COMPLEX64,
mlir::ComplexType::get(mlir::FloatType::getF32(&context))));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<std::complex<double>>(
{{0.0, 1.0}, {1.0, 0.0}}, DT_COMPLEX128,
mlir::ComplexType::get(mlir::FloatType::getF64(&context))));
}
bool IsSplat(mlir::ElementsAttr attr) {
return mlir::cast<mlir::DenseElementsAttr>(attr).isSplat();
}
TEST(ConvertTensorProtoTest, SplatTensor) {
TensorProto tensor;
tensor.set_dtype(DT_FLOAT);
tensor.mutable_tensor_shape()->add_dim()->set_size(1ULL << 35);
tensor.add_float_val(42.0);
mlir::MLIRContext context;
mlir::Builder builder(&context);
TF_ASSERT_OK_AND_ASSIGN(mlir::ElementsAttr attribute,
ConvertTensorProto(tensor, &builder));
EXPECT_THAT(
attribute,
AllOf(Eq(mlir::DenseElementsAttr::get(
mlir::RankedTensorType::get({1ULL << 35}, builder.getF32Type()),
42.0f)),
ResultOf(IsSplat, IsTrue())));
}
TEST(ConvertTensorProtoTest, NonSplatTensor) {
TensorProto proto = tensor::CreateTensorProto<float>(
{1.0f, 2.0f, 3.0f, 4.0f}, {2, 2});
mlir::MLIRContext context;
mlir::Builder builder(&context);
TF_ASSERT_OK_AND_ASSIGN(mlir::ElementsAttr attribute,
ConvertTensorProto(proto, &builder));
EXPECT_THAT(
attribute,
AllOf(Eq(mlir::DenseElementsAttr::get(
mlir::RankedTensorType::get({2, 2}, builder.getF32Type()),
{1.0f, 2.0f, 3.0f, 4.0f})),
ResultOf(IsSplat, IsFalse())));
}
TEST(ConvertTypeToTensorSpecProtoTest, UnrankedTensorType) {
mlir::MLIRContext context;
mlir::Builder b(&context);
auto output_proto = ConvertTypeToTensorSpecProto(
mlir::UnrankedTensorType::get(b.getF32Type()));
TF_ASSERT_OK(output_proto.status());
EXPECT_EQ(output_proto->dtype(), DT_FLOAT);
EXPECT_TRUE(output_proto->shape().unknown_rank());
}
TEST(ConvertTypeToTensorSpecProtoTest, RankedTensorType) {
mlir::MLIRContext context;
mlir::Builder b(&context);
auto output_proto = ConvertTypeToTensorSpecProto(
mlir::RankedTensorType::get({1, 2, 3}, b.getF32Type()));
TF_ASSERT_OK(output_proto.status());
EXPECT_EQ(output_proto->dtype(), DT_FLOAT);
EXPECT_EQ(output_proto->shape().dim_size(), 3);
EXPECT_EQ(output_proto->shape().dim().at(0).size(), 1);
EXPECT_EQ(output_proto->shape().dim().at(1).size(), 2);
EXPECT_EQ(output_proto->shape().dim().at(2).size(), 3);
}
TEST(ConvertTypeToTensorSpecProtoTest, ScalarTensorType) {
mlir::MLIRContext context;
mlir::Builder b(&context);
auto output_proto = ConvertTypeToTensorSpecProto(b.getF32Type());
TF_ASSERT_OK(output_proto.status());
EXPECT_EQ(output_proto->dtype(), DT_FLOAT);
EXPECT_FALSE(output_proto->shape().unknown_rank());
EXPECT_EQ(output_proto->shape().dim_size(), 0);
}
}
} |
1,233 | cpp | tensorflow/tensorflow | tpu_rewrite_device_util | tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util.cc | tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TENSORFLOW_UTILS_TPU_REWRITE_DEVICE_UTIL_H_
#define TENSORFLOW_COMPILER_MLIR_TENSORFLOW_UTILS_TPU_REWRITE_DEVICE_UTIL_H_
#include <optional>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Operation.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_structs.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
using tsl::StatusOr;
inline constexpr absl::string_view kNumCoresPerReplicaAttr =
"num_cores_per_replica";
inline constexpr absl::string_view kTopologyAttr = "topology";
inline constexpr absl::string_view kDeviceAssignmentAttr = "device_assignment";
struct TPUDeviceAndHost {
TPUDeviceAndHost() = default;
TPUDeviceAndHost(llvm::StringRef device, llvm::StringRef host)
: device(device), host(host) {}
std::string device;
std::string host;
};
using TPUDevicesAndHosts =
llvm::SmallVector<llvm::SmallVector<TPUDeviceAndHost, 8>, 8>;
struct TPUDeviceAssignment {
TPUDeviceAssignment(llvm::StringRef compilation_device,
TPUDevicesAndHosts&& tpu_devices)
: compilation_device(compilation_device),
tpu_devices(std::move(tpu_devices)) {}
TPUDeviceAssignment(llvm::StringRef compilation_device,
TPUDevicesAndHosts&& tpu_devices,
xla::DeviceAssignmentProto&& xla_device_assignment)
: compilation_device(compilation_device),
tpu_devices(std::move(tpu_devices)),
xla_device_assignment(std::move(xla_device_assignment)) {}
std::string compilation_device;
TPUDevicesAndHosts tpu_devices;
std::optional<xla::DeviceAssignmentProto> xla_device_assignment;
};
absl::StatusOr<llvm::SmallVector<int64_t, 8>> GetDeviceCoordinates(
mlir::ArrayAttr device_assignment_attr);
absl::StatusOr<TPUDeviceAssignment> GetTPUCompilationAndExecutionDevices(
llvm::ArrayRef<DeviceNameUtils::ParsedName> devices, int num_replicas,
int num_cores_per_replica, llvm::StringRef topology_attr,
llvm::ArrayRef<int64_t> device_assignment_attr);
std::string GetDeviceAliasForLogicalCore(int core_index);
std::string GetDeviceAliasForHostOfLogicalCore(int core_index);
bool HasModelParallelism(mlir::tf_device::ClusterOp cluster);
bool HasTPUDevice(const mlir::TF::RuntimeDevices& devices);
mlir::LogicalResult GetHostDeviceOutsideCompilationInGenericPipeline(
mlir::TF::RuntimeDevices devices, std::string* host_device);
mlir::LogicalResult GetHostDeviceOutsideComputation(
mlir::TF::RuntimeDevices devices, mlir::tf_device::ClusterOp cluster,
std::string* host_device);
bool IsTPUDevice(llvm::StringRef device);
bool IsTPUReplicatedCore(llvm::StringRef device);
bool TypeValidForXLA(const mlir::Type& type);
mlir::LogicalResult GetDeviceToHostMap(
mlir::tf_device::ClusterOp cluster,
llvm::SmallVector<std::string, 8>& core_to_host);
mlir::LogicalResult GetNonReplicatedTPU0(mlir::Operation* op,
std::string* tpu0_device);
mlir::LogicalResult GetNonReplicatedCPU0(mlir::Operation* op,
std::string* cpu0_device);
}
#endif
#include "tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util.h"
#include <algorithm>
#include <cstdint>
#include <iterator>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/FormatVariadic.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_structs.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/device_util.h"
#include "tensorflow/compiler/mlir/utils/string_container_utils.h"
#include "xla/array4d.h"
#include "xla/service/computation_placer.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/protobuf/tpu/topology.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
constexpr int kTPUTopologyRank = 4;
constexpr char kDeviceTPUSystem[] = "TPU_SYSTEM";
constexpr char kDeviceTPU[] = "TPU";
constexpr char kTPUReplicatedCore[] = "TPU_REPLICATED_CORE";
constexpr char kTPUReplicatedHost[] = "TPU_REPLICATED_HOST";
constexpr char kBadIntArrayElementMsg[] =
"bad '{0}' attribute at index {1}, not an int";
using ParsedDevice = DeviceNameUtils::ParsedName;
using ParsedDevices = llvm::ArrayRef<DeviceNameUtils::ParsedName>;
namespace {
llvm::SmallVector<ParsedDevice, 8> FindMatchingDevices(
ParsedDevices devices, const ParsedDevice& spec) {
llvm::SmallVector<ParsedDevice, 8> matching_devices;
for (const auto& device : devices)
if (DeviceNameUtils::IsCompleteSpecification(spec, device))
matching_devices.push_back(device);
return matching_devices;
}
template <typename T>
absl::Status MismatchedTPUSystemAttributeErr(absl::string_view attribute, T a,
T b) {
return absl::InvalidArgumentError(
absl::StrCat("found ", kDeviceTPUSystem, " devices with conflicting ",
attribute, "s '", a, "' and '", b, "'"));
}
absl::StatusOr<llvm::SmallVector<ParsedDevice, 8>> GetTPUSystemDevices(
ParsedDevices devices) {
ParsedDevice spec;
spec.type = kDeviceTPUSystem;
spec.has_type = true;
spec.id = 0;
spec.has_id = true;
llvm::SmallVector<ParsedDevice, 8> system_devices =
FindMatchingDevices(devices, spec);
if (system_devices.empty())
return absl::InvalidArgumentError(
absl::StrCat("no ", kDeviceTPUSystem, " devices found"));
const auto& job = system_devices[0].job;
auto replica = system_devices[0].replica;
for (const auto& device : llvm::make_range(std::next(system_devices.begin()),
system_devices.end())) {
if (device.job != job)
return MismatchedTPUSystemAttributeErr("job", job, device.job);
if (device.replica != replica)
return MismatchedTPUSystemAttributeErr("replica", replica,
device.replica);
}
std::sort(system_devices.begin(), system_devices.end(),
[](const ParsedDevice& a, const ParsedDevice& b) {
return a.task < b.task;
});
return system_devices;
}
absl::StatusOr<llvm::SmallVector<llvm::SmallVector<ParsedDevice, 8>, 8>>
GetTPUDevices(ParsedDevices devices,
llvm::ArrayRef<ParsedDevice> system_devices) {
llvm::SmallVector<llvm::SmallVector<ParsedDevice, 8>, 8> tpu_devices;
tpu_devices.reserve(system_devices.size());
auto lookup = [&devices](ParsedDevice device_spec) {
device_spec.has_type = true;
device_spec.type = kDeviceTPU;
device_spec.has_id = false;
llvm::SmallVector<ParsedDevice, 8> host_tpu_devices =
FindMatchingDevices(devices, device_spec);
std::sort(host_tpu_devices.begin(), host_tpu_devices.end(),
[](const ParsedDevice& i, const ParsedDevice& j) {
return i.id < j.id;
});
return host_tpu_devices;
};
int num_tpus_per_host = 0;
{
const auto& device = system_devices[0];
auto host_tpu_devices = lookup(device);
num_tpus_per_host = host_tpu_devices.size();
tpu_devices.push_back(std::move(host_tpu_devices));
}
for (const auto& device_spec : llvm::make_range(
std::next(system_devices.begin()), system_devices.end())) {
auto host_tpu_devices = lookup(device_spec);
const int64_t host_tpu_devices_size = host_tpu_devices.size();
if (num_tpus_per_host != host_tpu_devices_size)
return absl::InvalidArgumentError(
absl::StrCat("expected the number of TPU devices per host to be ",
num_tpus_per_host, ", got ", host_tpu_devices.size()));
tpu_devices.push_back(std::move(host_tpu_devices));
}
return tpu_devices;
}
std::string GetTPUCompilationDevice(ParsedDevice system_device) {
system_device.type = tensorflow::DEVICE_CPU;
return DeviceNameUtils::ParsedNameToString(system_device);
}
absl::StatusOr<std::string> GetCPUHostDeviceForTPUDevice(
ParsedDevice tpu_device, ParsedDevices devices) {
tpu_device.type = DEVICE_CPU;
bool enable_multiple_local_cpu_devices =
tensorflow::GetMlirCommonFlags()
->tf_mlir_enable_multiple_local_cpu_devices;
if (!enable_multiple_local_cpu_devices) {
tpu_device.id = 0;
}
if (FindMatchingDevices(devices, tpu_device).empty()) {
return absl::InvalidArgumentError(absl::StrCat(
"Can't find device: ", DeviceNameUtils::ParsedNameToString(tpu_device),
" in the devices list."));
}
return DeviceNameUtils::ParsedNameToString(tpu_device);
}
absl::StatusOr<TPUDevicesAndHosts> GetFullMeshTPUExecutionDeviceAssignment(
int num_replicas, int num_cores_per_replica,
llvm::ArrayRef<llvm::SmallVector<ParsedDevice, 8>> tpu_devices,
ParsedDevices devices) {
const int num_tasks = tpu_devices.size();
const int num_tpus_per_task = tpu_devices[0].size();
const int num_tpu_devices = num_tasks * num_tpus_per_task;
if (num_replicas != 1 && num_replicas != num_tpu_devices)
return absl::InvalidArgumentError(
absl::StrCat("'num_replicas' must be equal to 1 or ", num_tpu_devices,
", got ", num_replicas));
if (num_cores_per_replica != 1)
return absl::InvalidArgumentError(
absl::StrCat("'num_cores_per_replica' must be equal to 1, got ",
num_cores_per_replica));
TPUDevicesAndHosts devices_and_hosts;
devices_and_hosts.reserve(num_replicas);
for (int i = 0; i < num_replicas; ++i) {
const int task = i / num_tpus_per_task;
const int device = i % num_tpus_per_task;
const auto& tpu_device = tpu_devices[task][device];
devices_and_hosts.push_back({TPUDeviceAndHost(
tensorflow::DeviceNameUtils::ParsedNameToString(tpu_device),
*GetCPUHostDeviceForTPUDevice(tpu_device, devices))});
}
return devices_and_hosts;
}
struct TaskAndDevice {
TaskAndDevice() = default;
TaskAndDevice(int task, int device) : task(task), device(device) {}
int task = -1;
int device = -1;
};
bool DeviceCoordinateOutOfBound(int x, int y, int z, int core, int bound_x,
int bound_y, int bound_z, int bound_core) {
return x < 0 || x >= bound_x || y < 0 || y >= bound_y || z < 0 ||
z >= bound_z || core < 0 || core >= bound_core;
}
absl::Status DeviceCoordinateErrorMsg(absl::string_view attribute, int x, int y,
int z, int core, int bound_x, int bound_y,
int bound_z, int bound_core) {
return absl::InvalidArgumentError(
absl::StrCat("device coordinate (", x, ", ", y, ", ", z, ", ", core,
") in '", attribute, "' is outside of mesh shape (", bound_x,
", ", bound_y, ", ", bound_z, ", ", bound_core, ")"));
}
absl::Status DuplicateCoordinateErrorMsg(absl::string_view attribute, int x,
int y, int z, int core) {
return absl::InvalidArgumentError(
absl::StrCat("'", attribute, "' has duplicate device coordinate (", x,
", ", y, ", ", z, ", ", core, ")"));
}
absl::StatusOr<xla::Array4D<TaskAndDevice>> ParseTopologyAttr(
llvm::StringRef topology_attr, int num_tasks, int num_tpus_per_task) {
tpu::TopologyProto topology_proto;
if (!topology_proto.ParseFromString(topology_attr.str()))
return absl::InvalidArgumentError(absl::StrCat(
"failed to parse '", kTopologyAttr, "' attribute to TopologyProto"));
if (topology_proto.mesh_shape_size() != kTPUTopologyRank)
return absl::InvalidArgumentError(absl::StrCat(
"'", kTopologyAttr, "' 'mesh_shape' must be rank ", kTPUTopologyRank,
", got rank ", topology_proto.mesh_shape_size()));
for (auto mesh_shape_dim : llvm::enumerate(topology_proto.mesh_shape()))
if (mesh_shape_dim.value() <= 0)
return absl::InvalidArgumentError(
absl::StrCat("'", kTopologyAttr, "' 'mesh_shape' dimension ",
mesh_shape_dim.index(), " must be positive, got ",
mesh_shape_dim.value()));
if (topology_proto.num_tasks() != num_tasks)
return absl::InvalidArgumentError(absl::StrCat(
"number of tasks from available TPU devices must be 'num_tasks' in '",
kTopologyAttr, "' (", topology_proto.num_tasks(), "), got ",
num_tasks));
if (topology_proto.num_tpu_devices_per_task() != num_tpus_per_task)
return absl::InvalidArgumentError(absl::StrCat(
"number of TPU devices available per task must be "
"'num_tpu_devices_per_task' in '",
kTopologyAttr, "' (", topology_proto.num_tpu_devices_per_task(),
"), got ", num_tpus_per_task));
const int expected_device_coordinates_size =
num_tasks * num_tpus_per_task * kTPUTopologyRank;
if (topology_proto.device_coordinates_size() !=
expected_device_coordinates_size)
return absl::InvalidArgumentError(absl::StrCat(
"length of 'device_coordinates' in '", kTopologyAttr,
"' must be 'num_tasks' * 'num_tpus_per_task' * ", kTPUTopologyRank,
" (", num_tasks, " * ", num_tpus_per_task, " * ", kTPUTopologyRank,
"), got ", topology_proto.device_coordinates_size()));
const int bound_x = topology_proto.mesh_shape(0);
const int bound_y = topology_proto.mesh_shape(1);
const int bound_z = topology_proto.mesh_shape(2);
const int bound_core = topology_proto.mesh_shape(3);
xla::Array4D<TaskAndDevice> topology(bound_x, bound_y, bound_z, bound_core);
int pos = 0;
for (int task = 0; task < num_tasks; ++task) {
for (int device = 0; device < num_tpus_per_task; ++device) {
int x = topology_proto.device_coordinates(pos++);
int y = topology_proto.device_coordinates(pos++);
int z = topology_proto.device_coordinates(pos++);
int core = topology_proto.device_coordinates(pos++);
if (DeviceCoordinateOutOfBound(x, y, z, core, bound_x, bound_y, bound_z,
bound_core))
return DeviceCoordinateErrorMsg(kTopologyAttr, x, y, z, core, bound_x,
bound_y, bound_z, bound_core);
auto& task_and_device = topology(x, y, z, core);
if (task_and_device.task != -1)
return DuplicateCoordinateErrorMsg(kTopologyAttr, x, y, z, core);
task_and_device = {task, device};
}
}
return topology;
}
absl::StatusOr<std::pair<TPUDevicesAndHosts, xla::DeviceAssignmentProto>>
GetGeneralTPUExecutionDeviceAssignment(
int num_replicas, int num_cores_per_replica,
llvm::ArrayRef<llvm::SmallVector<ParsedDevice, 8>> tpu_devices,
ParsedDevices devices, llvm::StringRef topology_attr,
llvm::ArrayRef<int64_t> device_assignment_attr) {
const int num_tasks = tpu_devices.size();
const int num_tpus_per_task = tpu_devices[0].size();
TF_ASSIGN_OR_RETURN(auto topology, ParseTopologyAttr(topology_attr, num_tasks,
num_tpus_per_task));
const int expected_device_assignment_size =
num_replicas * num_cores_per_replica * kTPUTopologyRank;
const int device_assignment_attr_size = device_assignment_attr.size();
if (device_assignment_attr_size != expected_device_assignment_size)
return absl::InvalidArgumentError(absl::StrCat(
"length of '", kDeviceAssignmentAttr,
"' must be 'num_replicas' * 'num_cores_per_replica' * ",
kTPUTopologyRank, " (", num_replicas, " * ", num_cores_per_replica,
" * ", kTPUTopologyRank, "), got ", device_assignment_attr.size()));
const int bound_x = topology.n1();
const int bound_y = topology.n2();
const int bound_z = topology.n3();
const int bound_core = topology.n4();
auto location_to_id = [&](int x, int y, int z, int core) {
return (x + bound_x * (y + bound_y * z)) * bound_core + core;
};
std::vector<bool> used_device_ids(bound_x * bound_y * bound_z * bound_core,
false);
TPUDevicesAndHosts devices_and_hosts(
num_replicas, llvm::SmallVector<TPUDeviceAndHost, 8>(
num_cores_per_replica, TPUDeviceAndHost()));
xla::DeviceAssignment device_assignment(num_replicas, num_cores_per_replica);
int pos = 0;
for (int replica = 0; replica < num_replicas; ++replica) {
for (int logical_core = 0; logical_core < num_cores_per_replica;
++logical_core) {
int x = device_assignment_attr[pos++];
int y = device_assignment_attr[pos++];
int z = device_assignment_attr[pos++];
int core = device_assignment_attr[pos++];
if (DeviceCoordinateOutOfBound(x, y, z, core, bound_x, bound_y, bound_z,
bound_core))
return DeviceCoordinateErrorMsg(kDeviceAssignmentAttr, x, y, z, core,
bound_x, bound_y, bound_z, bound_core);
TaskAndDevice task_and_device = topology(x, y, z, core);
const int task = task_and_device.task;
const int device = task_and_device.device;
if (task == -1 || device == -1)
return absl::InvalidArgumentError(absl::StrCat(
"no TPU device found for '", kDeviceAssignmentAttr,
"' device coordinate (", x, ", ", y, ", ", z, ", ", core, ")"));
const int device_id = location_to_id(x, y, z, core);
if (used_device_ids[device_id])
return DuplicateCoordinateErrorMsg(kDeviceAssignmentAttr, x, y, z,
core);
used_device_ids[device_id] = true;
device_assignment(replica, logical_core) = device_id;
auto& device_and_host = devices_and_hosts[replica][logical_core];
const auto& tpu_device = tpu_devices[task][device];
device_and_host.device = DeviceNameUtils::ParsedNameToString(tpu_device);
device_and_host.host = *GetCPUHostDeviceForTPUDevice(tpu_device, devices);
}
}
xla::DeviceAssignmentProto device_assignment_proto;
device_assignment.Serialize(&device_assignment_proto);
return std::pair<TPUDevicesAndHosts, xla::DeviceAssignmentProto>(
std::move(devices_and_hosts), std::move(device_assignment_proto));
}
mlir::LogicalResult GetTopology(mlir::tf_device::ClusterOp cluster,
std::string& topology) {
mlir::StringAttr topology_attr =
cluster->getAttrOfType<mlir::StringAttr>(tensorflow::kTopologyAttr);
if (topology_attr) {
topology = topology_attr.getValue();
return mlir::success();
} else {
return cluster.emitOpError(
llvm::formatv("requires attribute '{0}'", tensorflow::kTopologyAttr)
.str());
}
}
mlir::LogicalResult GetDeviceAssignmentCoordinates(
mlir::tf_device::ClusterOp cluster,
llvm::SmallVector<int64_t, 8>& device_coordinates) {
mlir::ArrayAttr device_assignment_attr =
cluster->getAttrOfType<mlir::ArrayAttr>(
tensorflow::kDeviceAssignmentAttr);
if (!device_assignment_attr)
return cluster.emitOpError(llvm::formatv("requires attribute '{0}'",
tensorflow::kDeviceAssignmentAttr)
.str());
if (absl::StatusOr<llvm::SmallVector<int64_t, 8>> fetched_device_coordinates =
tensorflow::GetDeviceCoordinates(device_assignment_attr);
fetched_device_coordinates.ok()) {
device_coordinates = * | #include "tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util.h"
#include <cstdint>
#include <optional>
#include <string>
#include <tuple>
#include <vector>
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/FormatVariadic.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/device_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/tpu/topology.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace {
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> GetMlirModuleFromString(
llvm::StringRef string, mlir::MLIRContext* context) {
mlir::DialectRegistry mlir_registry;
RegisterAllTensorFlowDialects(mlir_registry);
context->appendDialectRegistry(mlir_registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module;
auto status =
tensorflow::DeserializeMlirModule(string, context, &mlir_module);
if (!status.ok()) {
return status;
}
return mlir_module;
}
using Device = DeviceNameUtils::ParsedName;
bool DeviceNamesToParsedNames(llvm::ArrayRef<std::string> device_names,
llvm::SmallVectorImpl<Device>* parsed_devices) {
parsed_devices->reserve(device_names.size());
for (const auto& device_name : device_names) {
Device parsed_name;
if (!DeviceNameUtils::ParseFullName(device_name, &parsed_name))
return false;
parsed_devices->push_back(parsed_name);
}
return true;
}
using DeviceNames = llvm::SmallVector<std::string, 8>;
struct ParameterizedDeviceSetTest
: ::testing::TestWithParam<std::tuple<DeviceNames, std::string>> {};
TEST_P(ParameterizedDeviceSetTest, BadDeviceSet) {
llvm::SmallVector<Device, 8> devices;
ASSERT_TRUE(DeviceNamesToParsedNames(std::get<0>(GetParam()), &devices));
std::string topology_attr;
std::vector<int64_t> device_assignment_attr;
auto status_or = GetTPUCompilationAndExecutionDevices(
devices, 1, 1, topology_attr,
device_assignment_attr);
ASSERT_FALSE(status_or.ok());
EXPECT_EQ(status_or.status().message(), std::get<1>(GetParam()));
}
INSTANTIATE_TEST_SUITE_P(
BadDeviceSet, ParameterizedDeviceSetTest,
::testing::Values(
std::make_tuple<DeviceNames, std::string>(
{"/job:localhost/replica:0/task:0/device:CPU:0"},
"no TPU_SYSTEM devices found"),
std::make_tuple<DeviceNames, std::string>(
{"/job:localhost/replica:0/task:0/device:TPU_SYSTEM:0",
"/job:worker/replica:0/task:0/device:TPU_SYSTEM:0"},
"found TPU_SYSTEM devices with conflicting jobs 'localhost' and "
"'worker'"),
std::make_tuple<DeviceNames, std::string>(
{"/job:localhost/replica:0/task:0/device:TPU_SYSTEM:0",
"/job:localhost/replica:1/task:0/device:TPU_SYSTEM:0"},
"found TPU_SYSTEM devices with conflicting replicas '0' and '1'"),
std::make_tuple<DeviceNames, std::string>(
{"/job:localhost/replica:0/task:0/device:TPU_SYSTEM:0",
"/job:localhost/replica:0/task:0/device:TPU:0",
"/job:localhost/replica:0/task:0/device:TPU:1",
"/job:localhost/replica:0/task:1/device:TPU_SYSTEM:0",
"/job:localhost/replica:0/task:1/device:TPU:0"},
"expected the number of TPU devices per host to be 2, got 1")));
struct ParameterizedMetadataTest
: ::testing::TestWithParam<std::tuple<int, int, std::string,
std::vector<int64_t>, std::string>> {
};
TEST_P(ParameterizedMetadataTest, BadMetadata) {
llvm::SmallVector<Device, 8> devices;
ASSERT_TRUE(DeviceNamesToParsedNames(
{"/job:worker/replica:0/task:0/device:TPU_SYSTEM:0",
"/job:worker/replica:0/task:0/device:TPU:0",
"/job:worker/replica:0/task:0/device:CPU:0",
"/job:worker/replica:0/task:1/device:TPU_SYSTEM:0",
"/job:worker/replica:0/task:1/device:TPU:0",
"/job:worker/replica:0/task:1/device:CPU:0"},
&devices));
std::string compilation_device;
llvm::SmallVector<llvm::SmallVector<std::string, 8>, 8> execution_devices;
std::optional<xla::DeviceAssignmentProto> xla_device_assignment;
auto status_or = GetTPUCompilationAndExecutionDevices(
devices, std::get<0>(GetParam()), std::get<1>(GetParam()),
std::get<2>(GetParam()), std::get<3>(GetParam()));
ASSERT_FALSE(status_or.ok());
EXPECT_EQ(status_or.status().message(), std::get<4>(GetParam()));
}
std::string TopologyWithMeshShape(llvm::ArrayRef<int> mesh_shape) {
tpu::TopologyProto topology_proto;
for (int mesh_dim : mesh_shape) topology_proto.add_mesh_shape(mesh_dim);
return topology_proto.SerializeAsString();
}
std::string TopologyWithMeshShapeAndTasks(llvm::ArrayRef<int> mesh_shape,
int num_tasks,
int num_tpu_devices_per_task) {
tpu::TopologyProto topology_proto;
for (int mesh_dim : mesh_shape) topology_proto.add_mesh_shape(mesh_dim);
topology_proto.set_num_tasks(num_tasks);
topology_proto.set_num_tpu_devices_per_task(num_tpu_devices_per_task);
return topology_proto.SerializeAsString();
}
std::string TopologyWithDeviceCoordinates(
llvm::ArrayRef<int> device_coordinates) {
tpu::TopologyProto topology_proto;
topology_proto.add_mesh_shape(2);
topology_proto.add_mesh_shape(1);
topology_proto.add_mesh_shape(1);
topology_proto.add_mesh_shape(1);
topology_proto.set_num_tasks(2);
topology_proto.set_num_tpu_devices_per_task(1);
for (int device_coordinate : device_coordinates)
topology_proto.add_device_coordinates(device_coordinate);
return topology_proto.SerializeAsString();
}
INSTANTIATE_TEST_SUITE_P(
BadFullMeshMetadata, ParameterizedMetadataTest,
::testing::Values(
std::make_tuple(
2, 1, "", std::vector<int64_t>{0},
"'device_assignment' must not be set when 'topology' is not set"),
std::make_tuple(8, 1, "", std::vector<int64_t>(),
"'num_replicas' must be equal to 1 or 2, got 8"),
std::make_tuple(2, 2, "", std::vector<int64_t>(),
"'num_cores_per_replica' must be equal to 1, got 2")));
INSTANTIATE_TEST_SUITE_P(
BadGeneralTopologyMetadata, ParameterizedMetadataTest,
::testing::Values(
std::make_tuple(
2, 1, "BAD_TOPOLOGY", std::vector<int64_t>(),
"failed to parse 'topology' attribute to TopologyProto"),
std::make_tuple(4, 2, TopologyWithMeshShape({0}),
std::vector<int64_t>(),
"'topology' 'mesh_shape' must be rank 4, got rank 1"),
std::make_tuple(
2, 1, TopologyWithMeshShape({2, 0, 1, 2}), std::vector<int64_t>(),
"'topology' 'mesh_shape' dimension 1 must be positive, got 0"),
std::make_tuple(2, 1, TopologyWithMeshShapeAndTasks({1, 1, 1, 1}, 1, 1),
std::vector<int64_t>(),
"number of tasks from available TPU devices must be "
"'num_tasks' in 'topology' (1), got 2"),
std::make_tuple(2, 1, TopologyWithMeshShapeAndTasks({1, 1, 1, 1}, 2, 2),
std::vector<int64_t>(),
"number of TPU devices available per task must be "
"'num_tpu_devices_per_task' in 'topology' (2), got 1"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({}), std::vector<int64_t>(),
"length of 'device_coordinates' in 'topology' must be 'num_tasks' "
"* 'num_tpus_per_task' * 4 (2 * 1 * 4), got 0"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({-1, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>(),
"device coordinate (-1, 0, 0, 0) in 'topology' is outside "
"of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({2, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>(),
"device coordinate (2, 0, 0, 0) in 'topology' is outside "
"of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, -1, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>(),
"device coordinate (0, -1, 0, 0) in 'topology' is outside "
"of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 1, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>(),
"device coordinate (0, 1, 0, 0) in 'topology' is outside "
"of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 0, 0, -1, 1, 0, 0, 0}),
std::vector<int64_t>(),
"device coordinate (0, 0, 0, -1) in 'topology' is outside "
"of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 0, 0, 1, 1, 0, 0, 0}),
std::vector<int64_t>(),
"device coordinate (0, 0, 0, 1) in 'topology' is outside "
"of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 0, 0, 0, 0, 0, 0, 0}),
std::vector<int64_t>(),
"'topology' has duplicate device coordinate (0, 0, 0, 0)")));
INSTANTIATE_TEST_SUITE_P(
BadGeneralDeviceAssignmentMetadata, ParameterizedMetadataTest,
::testing::Values(
std::make_tuple(2, 1,
TopologyWithDeviceCoordinates({0, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>(),
"length of 'device_assignment' must be 'num_replicas' "
"* 'num_cores_per_replica' * 4 (2 * 1 * 4), got 0"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>{-1, 0, 0, 0, 0, 0, 0, 0},
"device coordinate (-1, 0, 0, 0) in 'device_assignment' "
"is outside of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>{2, 0, 0, 0, 0, 0, 0, 0},
"device coordinate (2, 0, 0, 0) in 'device_assignment' is "
"outside of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>{0, -1, 0, 0, 0, 0, 0, 0},
"device coordinate (0, -1, 0, 0) in 'device_assignment' "
"is outside of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>{0, 1, 0, 0, 0, 0, 0, 0},
"device coordinate (0, 1, 0, 0) in 'device_assignment' is "
"outside of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>{0, 0, 0, -1, 0, 0, 0, 0},
"device coordinate (0, 0, 0, -1) in 'device_assignment' "
"is outside of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>{0, 0, 0, 1, 0, 0, 0, 0},
"device coordinate (0, 0, 0, 1) in 'device_assignment' is "
"outside of mesh shape (2, 1, 1, 1)"),
std::make_tuple(2, 1,
TopologyWithDeviceCoordinates({0, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>{0, 0, 0, 0, 0, 0, 0, 0},
"'device_assignment' has duplicate device coordinate "
"(0, 0, 0, 0)")));
std::vector<std::string> MakeDeviceSet(int num_tasks,
int num_devices_per_task) {
std::vector<std::string> devices{
"/job:localhost/replica:0/task:0/device:CPU:0"};
devices.reserve(num_tasks * num_devices_per_task + num_tasks + 1);
for (int task = 0; task < num_tasks; ++task) {
devices.push_back(
llvm::formatv("/job:worker/replica:0/task:{0}/device:CPU:0", task)
.str());
devices.push_back(
llvm::formatv("/job:worker/replica:0/task:{0}/device:TPU_SYSTEM:0",
task)
.str());
for (int device = 0; device < num_devices_per_task; ++device)
devices.push_back(
llvm::formatv("/job:worker/replica:0/task:{0}/device:TPU:{1}", task,
device)
.str());
}
return devices;
}
TEST(TPURewriteDeviceUtilTest,
BadGeneralDeviceAssignmentMetadataMissingDevice) {
tpu::TopologyProto topology_proto;
{
topology_proto.add_mesh_shape(2);
topology_proto.add_mesh_shape(1);
topology_proto.add_mesh_shape(1);
topology_proto.add_mesh_shape(1);
topology_proto.set_num_tasks(1);
topology_proto.set_num_tpu_devices_per_task(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
}
std::string topology_attr = topology_proto.SerializeAsString();
std::vector<int64_t> device_assignment_attr{1, 0, 0, 0};
llvm::SmallVector<Device, 8> devices;
std::vector<std::string> device_names =
MakeDeviceSet(1, 1);
ASSERT_TRUE(DeviceNamesToParsedNames(device_names, &devices));
auto status_or = GetTPUCompilationAndExecutionDevices(
devices, 1, 1, topology_attr,
device_assignment_attr);
ASSERT_FALSE(status_or.ok());
EXPECT_EQ(status_or.status().message(),
"no TPU device found for 'device_assignment' device coordinate (1, "
"0, 0, 0)");
}
TEST(TPURewriteDeviceUtilTest, ValidFullMeshDeviceAssignment) {
llvm::SmallVector<Device, 8> devices;
std::vector<std::string> device_names =
MakeDeviceSet(2, 4);
ASSERT_TRUE(DeviceNamesToParsedNames(device_names, &devices));
std::string topology_attr;
std::vector<int64_t> device_assignment_attr;
auto status_or = GetTPUCompilationAndExecutionDevices(
devices, 8, 1, topology_attr,
device_assignment_attr);
TF_ASSERT_OK(status_or.status());
const auto& tpu_device_assignment = status_or.value();
EXPECT_EQ(tpu_device_assignment.compilation_device,
"/job:worker/replica:0/task:0/device:CPU:0");
const auto& tpu_devices = tpu_device_assignment.tpu_devices;
ASSERT_EQ(tpu_devices.size(), 8);
for (const auto& replica_tpu_devices : tpu_devices)
ASSERT_EQ(replica_tpu_devices.size(), 1);
EXPECT_EQ(tpu_devices[0][0].device,
"/job:worker/replica:0/task:0/device:TPU:0");
EXPECT_EQ(tpu_devices[0][0].host,
"/job:worker/replica:0/task:0/device:CPU:0");
EXPECT_EQ(tpu_devices[1][0].device,
"/job:worker/replica:0/task:0/device:TPU:1");
EXPECT_EQ(tpu_devices[1][0].host,
"/job:worker/replica:0/task:0/device:CPU:0");
EXPECT_EQ(tpu_devices[2][0].device,
"/job:worker/replica:0/task:0/device:TPU:2");
EXPECT_EQ(tpu_devices[2][0].host,
"/job:worker/replica:0/task:0/device:CPU:0");
EXPECT_EQ(tpu_devices[3][0].device,
"/job:worker/replica:0/task:0/device:TPU:3");
EXPECT_EQ(tpu_devices[3][0].host,
"/job:worker/replica:0/task:0/device:CPU:0");
EXPECT_EQ(tpu_devices[4][0].device,
"/job:worker/replica:0/task:1/device:TPU:0");
EXPECT_EQ(tpu_devices[4][0].host,
"/job:worker/replica:0/task:1/device:CPU:0");
EXPECT_EQ(tpu_devices[5][0].device,
"/job:worker/replica:0/task:1/device:TPU:1");
EXPECT_EQ(tpu_devices[5][0].host,
"/job:worker/replica:0/task:1/device:CPU:0");
EXPECT_EQ(tpu_devices[6][0].device,
"/job:worker/replica:0/task:1/device:TPU:2");
EXPECT_EQ(tpu_devices[6][0].host,
"/job:worker/replica:0/task:1/device:CPU:0");
EXPECT_EQ(tpu_devices[7][0].device,
"/job:worker/replica:0/task:1/device:TPU:3");
EXPECT_EQ(tpu_devices[7][0].host,
"/job:worker/replica:0/task:1/device:CPU:0");
EXPECT_FALSE(tpu_device_assignment.xla_device_assignment.has_value());
}
TEST(TPURewriteDeviceUtilTest, ValidGeneralDeviceAssignmentMesh2x2x2) {
tpu::TopologyProto topology_proto;
{
topology_proto.add_mesh_shape(2);
topology_proto.add_mesh_shape(2);
topology_proto.add_mesh_shape(1);
topology_proto.add_mesh_shape(2);
topology_proto.set_num_tasks(2);
topology_proto.set_num_tpu_devices_per_task(4);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
}
std::string topology_attr = topology_proto.SerializeAsString();
std::vector<int64_t> device_assignment_attr{0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0,
0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0,
0, 1, 1, 1, 0, 0, 1, 1, 0, 1};
llvm::SmallVector<Device, 8> devices;
std::vector<std::string> device_names =
MakeDeviceSet(2, 4);
ASSERT_TRUE(DeviceNamesToParsedNames(device_names, &devices));
auto status_or = GetTPUCompilationAndExecutionDevices(
devices, 4, 2, topology_attr,
device_assignment_attr);
TF_ASSERT_OK(status_or.status());
const auto& tpu_device_assignment = status_or.value();
EXPECT_EQ(tpu_device_assignment.compilation_device,
"/job:worker/replica:0/task:0/device:CPU:0");
const auto& tpu_devices = tpu_device_assignment.tpu_devices;
ASSERT_EQ(tpu_devices.size(), 4);
for (const auto& replica_tpu_devices : tpu_devices)
ASSERT_EQ(replica_tpu_devices.size(), 2);
EXPECT_EQ(tpu_devices[0][0].device,
"/job:worker/replica:0/task:0/device:TPU:0");
EXPECT_EQ(tpu_devices[0][0].host,
"/job:worker/replica:0/task:0/device:CPU:0");
EXPECT_EQ(tpu_devices[0][1].device,
"/job:worker/replica:0/task:1/device:TPU:3");
EXPECT_EQ(tpu_devices[0][1].host,
"/job:worker/replica:0/task:1/device:CPU:0");
EXPECT_EQ(tpu_devices[1][0].device,
"/job:worker/replica:0/task:0/device:TPU:1");
EXPECT_EQ(tpu_devices[1][0].host,
"/job:worker/replica:0/task:0/device:CPU:0");
EXPECT_EQ(tpu_devices[1][1].device,
"/job:worker/replica:0/task:1/device:TPU:2");
EXPECT_EQ(tpu_devices[1][1].host,
"/job:worker/replica:0/task:1/device:CPU:0");
EXPECT_EQ(tpu_devices[2][0].device,
"/job:worker/replica:0/task:0/device:TPU:3");
EXPECT_EQ(tpu_devices[2][0].host,
"/job:worker/replica:0/task:0/device:CPU:0");
EXPECT_EQ(tpu_devices[2][1].device,
"/job:worker/replica:0/task:1/device:TPU:0");
EXPECT_EQ(tpu_devices[2][1].host,
"/job:worker/replica:0/task:1/device:CPU:0");
EXPECT_EQ(tpu_devices[3][0].device,
"/job:worker/replica:0/task:0/device:TPU:2");
EXPECT_EQ(tpu_devices[3][0].host,
"/job:worker/replica:0/task:0/device:CPU:0");
EXPECT_EQ(tpu_devices[3][1].device,
"/job:worker/replica:0/task:1/device:TPU:1");
EXPECT_EQ(tpu_devices[3][1].host,
"/job:worker/replica:0/task:1/device:CPU:0");
auto& xla_device_assignment = tpu_device_assignment.xla_device_assignment;
ASSERT_TRUE(xla_device_assignment.has_value());
EXPECT_EQ(xla_device_assignment->replica_count(), 4);
EXPECT_EQ(xla_device_assignment->computation_count(), 2);
ASSERT_EQ(xla_device_assignment->computation_devices_size(), 2);
const auto& computation_device_0 =
xla_device_assignment->computation_devices(0);
ASSERT_EQ(computation_device_0.replica_device_ids_size(), 4);
const auto& computation_device_1 =
xla_device_assignment->computation_devices(1);
ASSERT_EQ(computation_device_1.replica_device_ids_size(), 4);
EXPECT_EQ(computation_device_0.replica_device_ids(0), 0);
EXPECT_EQ(computation_device_0.replica_device_ids(1), 4);
EXPECT_EQ(computation_device_0.replica_device_ids(2), 2);
EXPECT_EQ(computation_device_0.replica_device_ids(3), 6);
EXPECT_EQ(computation_device_1.replica_device_ids(0), 1);
EXPECT_EQ(computation_device_1.replica_device_ids(1), 5);
EXPECT_EQ(computation_device_1.replica_device_ids(2), 3);
EXPECT_EQ(computation_device_1.replica_device_ids(3), 7);
}
TEST(TPURewriteDeviceUtilTest, ValidGeneralDeviceAssignmentMesh1x2x1x3) {
tpu::TopologyProto topology_proto;
{
topology_proto.add_mesh_shape(1);
topology_proto.add_mesh_shape(2);
topology_proto.add_mesh_shape(1);
topology_proto.add_mesh_shape(3);
topology_proto.set_num_tasks(3);
topology_proto.set_num_tpu_devices_per_task(2);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(2);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(2);
}
std::string topology_attr = topology_proto.SerializeAsString();
std::vector<int64_t> device_assignment_attr{
0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 2, 0, 1, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0};
llvm::SmallVector<Device, 8> devices;
std::vector<std::string> device_names =
MakeDeviceSet(3, 2);
ASSERT_TRUE(DeviceNamesToParsedNames(device_names, &devices));
auto status_or = GetTPUCompilationAndExecutionDevices(
devices, 2, 3, topology_attr,
device_assignment_attr);
TF_ASSERT_OK(status_or.status());
auto& tpu_device_assignment = status_or.value();
EXPECT_EQ(tpu_device_assignment.compilation_device,
"/job:worker/replica:0/task:0/device:CPU:0");
auto& tpu_devices = tpu_device_assignment.tpu_devices;
ASSERT_EQ(tpu_devices.size(), 2);
for (const auto& replica_tpu_devices : tpu_devices)
ASSERT_EQ(replica_tpu_devices.size(), 3);
EXPECT_EQ(tpu_devices[0][0].device,
"/job:worker/replica:0/task:1/device:TPU:1");
EXPECT_EQ(tpu_devices[0][0].host,
"/job:worker/replica:0/task:1/device:CPU:0");
EXPECT_EQ(tpu_devices[0][1].device,
"/job:worker/replica:0/task:1/device:TPU:0");
EXPECT_EQ(tpu_devices[0][1].host,
"/job:worker/replica:0/task:1/device:CPU:0");
EXPECT_EQ(tpu_devices[0][2].device,
"/job:worker/replica:0/task:2/device:TPU:0");
EXPECT_EQ(tpu_devices[0][2].host,
"/job:worker/replica:0/task:2/device:CPU:0");
EXPECT_EQ(tpu_devices[1][0].device,
"/job:worker/replica:0/task:2/device:TPU:1");
EXPECT_EQ(tpu_devices[1][0].host,
"/job:worker/replica:0/task:2/device:CPU:0");
EXPECT_EQ(tpu_devices[1][1].device,
"/job:worker/replica:0/task:0/device:TPU:0");
EXPECT_EQ(tpu_devices[1][1].host,
"/job:worker/replica:0/task:0/device:CPU:0");
EXPECT_EQ(tpu_devices[1][2].device,
"/job:worker/replica:0/task:0/device:TPU:1");
EXPECT_EQ(tpu_devices[1][2].host,
"/job:worker/replica:0/task:0/device:CPU:0");
auto& xla_device_assignment = tpu_device_assignment.xla_device_assignment;
ASSERT_TRUE(xla_device_assignment.has_value());
EXPECT_EQ(xla_device_assignment->replica_count(), 2);
EXPECT_EQ(xla_device_assignment->computation_count(), 3);
ASSERT_EQ(xla_device_assignment->computation_devices_size(), 3);
const auto& computation_device_0 =
xla_device_assignment->computation_devices(0);
ASSERT_EQ(computation_device_0.replica_device_ids_size(), 2);
const auto& computation_device_1 =
xla_device_assignment->computation_devices(1);
ASSERT_EQ(computation_device_1.replica_device_ids_size(), 2);
const auto& computation_device_2 =
xla_device_assignment->computation_devices(2);
ASSERT_EQ(computation_device_2.replica_device_ids_size(), 2);
EXPECT_EQ(computation_device_0.replica_device_ids(0), 1);
EXPECT_EQ(computation_device_0.replica_device_ids(1), 5);
EXPECT_EQ(computation_device_1.replica_device_ids(0), 4);
EXPECT_EQ(computation_device_1.replica_device_ids(1), 0);
EXPECT_EQ(computation_device_2.replica_device_ids(0), 2);
EXPECT_EQ(computation_device_2.replica_device_ids(1), 3);
}
TEST(TPURewriteDeviceUtilTest, TestGetDeviceCoordinates) {
mlir::MLIRContext context;
mlir::Builder builder(&context);
auto device_assignment_attr = builder.getI64ArrayAttr({1, 2, 3});
auto status_or_device_coodinates =
GetDeviceCoordinates(device_assignment_attr);
ASSERT_TRUE(status_or_device_coodinates.ok());
auto device_coordinates = status_or_device_coodinates.value();
EXPECT_EQ(device_coordinates[0], 1);
EXPECT_EQ(device_coordinates[1], 2);
EXPECT_EQ(device_coordinates[2], 3);
}
TEST(TPURewriteDeviceUtilTest, TestInvalidAttrForDeviceAssignmentDisallowed) {
mlir::MLIRContext context;
mlir::Builder builder(&context);
auto device_assignment_attr = builder.getF32ArrayAttr({1.0, 2.0, 3.0});
auto status_or_device_coodinates =
GetDeviceCoordinates(device_assignment_attr);
ASSERT_TRUE(!status_or_device_coodinates.ok());
EXPECT_EQ(status_or_device_coodinates.status().message(),
"bad 'device_assignment' attribute at index 0, not an int");
}
TEST(TPURewriteDeviceUtilTest, TestHasModelParallelismFalse) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
cluster->setAttr(kNumCoresPerReplicaAttr,
builder.getIntegerAttr(builder.getIntegerType(64), 1));
cluster->setAttr(kTopologyAttr, builder.getStringAttr(""));
cluster->setAttr(kDeviceAssignmentAttr, builder.getArrayAttr({}));
EXPECT_FALSE(HasModelParallelism(cluster));
}
TEST(TPURewriteDeviceUtilTest, TestHasModelParallelismTrue) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
cluster->setAttr(kNumCoresPerReplicaAttr,
builder.getIntegerAttr(builder.getIntegerType(64), 5));
cluster->setAttr(kTopologyAttr, builder.getStringAttr(""));
cluster->setAttr(kDeviceAssignmentAttr, builder.getArrayAttr({}));
EXPECT_TRUE(HasModelParallelism(cluster));
}
TEST(TPURewriteDeviceUtilTest,
TestHasModelParallelismFalseMissingCoresPerReplicaAttr) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
cluster->setAttr(kNumCoresPerReplicaAttr,
builder.getIntegerAttr(builder.getIntegerType(64), 1));
cluster->setAttr(kTopologyAttr, builder.getStringAttr(""));
cluster->setAttr(kDeviceAssignmentAttr, builder.getArrayAttr({}));
EXPECT_FALSE(HasModelParallelism(cluster));
}
TEST(TPURewriteDeviceUtilTest,
TestGetHostFailNumCoresPerReplicaMissingAttributes) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
cluster->setAttr(kDeviceAssignmentAttr, builder.getArrayAttr({}));
mlir::TF::RuntimeDevices devices;
std::string host_device;
EXPECT_TRUE(mlir::failed(
GetHostDeviceOutsideComputation(devices, cluster, &host_device)));
}
TEST(TPURewriteDeviceUtilTest, TestGetHostFailDeviceMissingAttributes) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
cluster->setAttr(kNumCoresPerReplicaAttr,
builder.getIntegerAttr(builder.getIntegerType(64), 1));
mlir::TF::RuntimeDevices devices;
std::string host_device;
EXPECT_TRUE(mlir::failed(
GetHostDeviceOutsideComputation(devices, cluster, &host_device)));
}
TEST(TPURe |
1,234 | cpp | tensorflow/tensorflow | xla_rewrite_util | tensorflow/compiler/mlir/tensorflow/utils/xla_rewrite_util.cc | tensorflow/compiler/mlir/tensorflow/utils/xla_rewrite_util_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TENSORFLOW_UTILS_XLA_REWRITE_UTIL_H_
#define TENSORFLOW_COMPILER_MLIR_TENSORFLOW_UTILS_XLA_REWRITE_UTIL_H_
#include <optional>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/IR/Attributes.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_structs.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
mlir::LogicalResult EraseClusterFuncs(
llvm::MutableArrayRef<mlir::tf_device::ClusterFuncOp> to_be_erased);
int MovePreservedParallelExecuteChildren(
int num_cores_per_replica,
llvm::SmallVector<mlir::Type, 8>& concatenated_output_types,
mlir::OpBuilder* builder, mlir::tf_device::ClusterFuncOp cluster_func,
mlir::tf_device::ParallelExecuteOp old_parallel_execute,
mlir::tf_device::ParallelExecuteOp* new_parallel_execute);
mlir::tf_device::LaunchOp WrapOpInLaunch(mlir::OpBuilder* builder,
mlir::Location loc,
mlir::Operation* op,
llvm::StringRef device);
}
#endif
#include "tensorflow/compiler/mlir/tensorflow/utils/xla_rewrite_util.h"
namespace tensorflow {
mlir::LogicalResult EraseClusterFuncs(
llvm::MutableArrayRef<mlir::tf_device::ClusterFuncOp> to_be_erased) {
for (auto cluster : to_be_erased) {
auto old_parallel_execute =
cluster->getParentOfType<mlir::tf_device::ParallelExecuteOp>();
if (!old_parallel_execute) {
LOG(ERROR) << "Parent op of cluster " << cluster.getOperationName().str()
<< " is not ParallelExecuteOp.";
return mlir::failure();
}
for (auto result : old_parallel_execute.getExecuteOutputs()) {
for (mlir::Operation* user :
llvm::make_early_inc_range(result.getUsers())) {
if (llvm::isa<mlir::TF::TPUPartitionedOutputV2Op>(user)) {
assert(user->use_empty());
user->erase();
}
}
}
for (auto operand : cluster.getOperands()) {
mlir::Operation* def = operand.getDefiningOp();
if (operand.hasOneUse() &&
llvm::isa_and_nonnull<mlir::TF::TPUPartitionedInputV2Op>(def)) {
operand.dropAllUses();
def->erase();
}
}
if (!old_parallel_execute->use_empty()) {
LOG(ERROR) << "Use of parallel execute op "
<< old_parallel_execute.getOperationName().str()
<< " is not empty.";
return mlir::failure();
}
old_parallel_execute->erase();
}
return mlir::success();
}
int MovePreservedParallelExecuteChildren(
int num_cores_per_replica,
llvm::SmallVector<mlir::Type, 8>& concatenated_output_types,
mlir::OpBuilder* builder, mlir::tf_device::ClusterFuncOp cluster_func,
mlir::tf_device::ParallelExecuteOp old_parallel_execute,
mlir::tf_device::ParallelExecuteOp* new_parallel_execute) {
const size_t num_moved_children =
old_parallel_execute.getRegions().size() - 1;
*new_parallel_execute = builder->create<mlir::tf_device::ParallelExecuteOp>(
old_parallel_execute->getLoc(),
num_moved_children + num_cores_per_replica, concatenated_output_types);
int cluster_idx = -1;
for (size_t child_idx = 0;
child_idx < old_parallel_execute.getRegions().size(); ++child_idx) {
auto& block = old_parallel_execute.GetRegionBlockWithIndex(child_idx);
if (cluster_func->getBlock() == &block) {
assert(cluster_idx == -1);
cluster_idx = child_idx;
}
}
assert(cluster_idx != -1);
for (int child_idx = 0; child_idx < num_moved_children; ++child_idx) {
int old_idx = child_idx >= cluster_idx ? child_idx + 1 : child_idx;
int new_idx = child_idx >= cluster_idx ? child_idx + num_cores_per_replica
: child_idx;
new_parallel_execute->getRegions()[new_idx].takeBody(
old_parallel_execute.getRegions()[old_idx]);
}
return cluster_idx;
}
mlir::tf_device::LaunchOp WrapOpInLaunch(mlir::OpBuilder* builder,
mlir::Location loc,
mlir::Operation* op,
llvm::StringRef device) {
mlir::OpBuilder::InsertPoint insert_point = builder->saveInsertionPoint();
auto launch = builder->create<mlir::tf_device::LaunchOp>(
loc, builder->getStringAttr(device), op->getResultTypes());
launch.getBody().push_back(new mlir::Block);
builder->setInsertionPointToEnd(&launch.GetBody());
builder->create<mlir::tf_device::ReturnOp>(loc, op->getResults());
op->moveBefore(launch.GetBody().getTerminator());
builder->restoreInsertionPoint(insert_point);
return launch;
}
} | #include "tensorflow/compiler/mlir/tensorflow/utils/xla_rewrite_util.h"
#include <string>
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/FormatVariadic.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/device_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/tpu/topology.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace {
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> GetMlirModuleFromString(
llvm::StringRef string, mlir::MLIRContext* context) {
mlir::DialectRegistry mlir_registry;
RegisterAllTensorFlowDialects(mlir_registry);
context->appendDialectRegistry(mlir_registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module;
auto status =
tensorflow::DeserializeMlirModule(string, context, &mlir_module);
if (!status.ok()) {
return status;
}
return mlir_module;
}
TEST(XlaRewriteUtilTest, TestEraseClusterFuncs) {
static const char* const module_str =
R"(
module attributes {tf.devices = ["/job:worker/replica:0/task:0/device:CPU:0", "/job:worker/replica:0/task:0/device:GPU:0"]} {
func.func @convert_cluster_func(%arg0: tensor<i32>) -> () {
%2 = "tf_device.parallel_execute"() ({
%3 = "tf_device.cluster_func"(%arg0) {device = "/job:localhost/replica:0/task:0/device:GPU:0", func = @func} : (tensor<i32>) -> tensor<i32>
tf_device.return %3 : tensor<i32>
}) : () -> tensor<i32>
return
}
func.func @func(%arg0: tensor<i32>) -> tensor<i32> {
return %arg0 : tensor<i32>
}
}
)";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
GetMlirModuleFromString(module_str, &context));
llvm::SmallVector<mlir::tf_device::ClusterFuncOp, 4> cluster_func_ops;
module->walk([&](mlir::tf_device::ClusterFuncOp cluster_func) {
cluster_func_ops.push_back(cluster_func);
});
EXPECT_EQ(cluster_func_ops.size(), 1);
EXPECT_TRUE(mlir::succeeded(tensorflow::EraseClusterFuncs(cluster_func_ops)));
llvm::SmallVector<mlir::tf_device::ClusterFuncOp, 4> new_cluster_func_ops;
module->walk([&](mlir::tf_device::ClusterFuncOp cluster_func) {
new_cluster_func_ops.push_back(cluster_func);
});
EXPECT_EQ(new_cluster_func_ops.size(), 0);
}
TEST(XlaRewriteUtilTest, TestWrapOpInLaunch) {
static const char* const module_str =
R"(
module attributes {tf.devices = {"/job:localhost/replica:0/task:0/device:CPU:0"}} {
func.func @main() -> () {
"tf_device.cluster"() ({
tf_device.return
}) {} : () -> ()
func.return
}
})";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
GetMlirModuleFromString(module_str, &context));
mlir::tf_device::ClusterOp cluster;
std::string device = "/job:localhost/replica:0/task:0/device:CPU:0";
module->walk(
[&](mlir::tf_device::ClusterOp descendant) { cluster = descendant; });
mlir::OpBuilder builder(&context);
auto loc = cluster->getLoc();
auto launch_op = tensorflow::WrapOpInLaunch(&builder, loc, cluster, device);
EXPECT_TRUE(llvm::isa<mlir::tf_device::LaunchOp>(launch_op));
launch_op->erase();
}
}
} |
1,235 | cpp | tensorflow/tensorflow | call_graph_util | tensorflow/compiler/mlir/tensorflow/utils/call_graph_util.cc | tensorflow/compiler/mlir/tensorflow/utils/call_graph_util_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TENSORFLOW_UTILS_CALL_GRAPH_UTIL_H_
#define TENSORFLOW_COMPILER_MLIR_TENSORFLOW_UTILS_CALL_GRAPH_UTIL_H_
#include <functional>
#include <stack>
#include <vector>
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Support/LogicalResult.h"
namespace mlir {
std::vector<llvm::StringRef> GetEntryFunctionAttributeNames();
bool IsEntryFunction(func::FuncOp func);
llvm::SmallVector<func::FuncOp> GetEntryFunctions(ModuleOp module);
LogicalResult GetCallees(SymbolUserOpInterface op, SymbolTable &symtab,
llvm::SmallVector<func::FuncOp> &callees);
template <typename T, typename... Types>
LogicalResult GetFirstOpsOfType(
func::FuncOp root, SymbolTable &symtab,
const std::function<bool(SymbolUserOpInterface)> &predicate,
llvm::SmallVector<SymbolUserOpInterface> &ops) {
std::stack<func::FuncOp> worklist;
worklist.push(root);
while (!worklist.empty()) {
func::FuncOp u = worklist.top();
worklist.pop();
auto result = u.walk([&](SymbolUserOpInterface op) {
if (llvm::isa<T, Types...>(op) && (!predicate || predicate(op))) {
ops.push_back(op);
return WalkResult::advance();
}
llvm::SmallVector<func::FuncOp> callees;
if (GetCallees(op, symtab, callees).failed()) {
return WalkResult::interrupt();
}
for (auto callee : callees) {
worklist.push(callee);
}
return WalkResult::advance();
});
if (result.wasInterrupted()) return failure();
}
return success();
}
template <typename T, typename... Types>
LogicalResult GetOpsOfTypeUntilMiss(
func::FuncOp root, SymbolTable &symtab,
const std::function<bool(SymbolUserOpInterface)> &predicate,
llvm::SmallVector<SymbolUserOpInterface> &hits,
llvm::SmallVector<SymbolUserOpInterface> &first_misses) {
std::stack<func::FuncOp> worklist;
worklist.push(root);
while (!worklist.empty()) {
func::FuncOp u = worklist.top();
worklist.pop();
auto result = u.walk([&](SymbolUserOpInterface op) {
if (llvm::isa<T, Types...>(op)) {
if (!predicate || predicate(op)) {
hits.push_back(op);
} else {
first_misses.push_back(op);
return WalkResult::advance();
}
}
llvm::SmallVector<func::FuncOp> callees;
if (GetCallees(op, symtab, callees).failed()) {
return WalkResult::interrupt();
}
for (auto callee : callees) {
worklist.push(callee);
}
return WalkResult::advance();
});
if (result.wasInterrupted()) return failure();
}
return success();
}
bool HasSingleBlock(func::FuncOp func);
}
#endif
#include <vector>
#include "absl/strings/string_view.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
namespace mlir {
std::vector<llvm::StringRef> GetEntryFunctionAttributeNames() {
return {"tf.entry_function",
tf_saved_model::kTfSavedModelInitializerTypeAttr};
}
bool IsEntryFunction(func::FuncOp func) {
for (const auto &attr : GetEntryFunctionAttributeNames()) {
if (func->hasAttr(attr)) {
return true;
}
}
return false;
}
llvm::SmallVector<func::FuncOp> GetEntryFunctions(ModuleOp module) {
llvm::SmallVector<func::FuncOp> entry_funcs;
module.walk([&](func::FuncOp func) {
if (IsEntryFunction(func)) {
entry_funcs.push_back(func);
}
});
return entry_funcs;
}
LogicalResult GetCallees(SymbolUserOpInterface op, SymbolTable &symtab,
llvm::SmallVector<func::FuncOp> &callees) {
for (auto attr : op->getAttrs()) {
auto sym = mlir::dyn_cast<SymbolRefAttr>(attr.getValue());
if (!sym) continue;
auto callee = symtab.lookup<func::FuncOp>(sym.getRootReference());
if (!callee) {
return op->emitError()
<< "Cannot find function " << sym.getRootReference();
}
callees.push_back(callee);
}
return success();
}
bool HasSingleBlock(func::FuncOp func) {
return func->getNumRegions() == 1 && func.getBody().hasOneBlock();
}
} | #include "tensorflow/compiler/mlir/tensorflow/utils/call_graph_util.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/attribute_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(CallGraphUtilTest, GetEntryFunctionAttributeNames) {
auto attr_names = mlir::GetEntryFunctionAttributeNames();
EXPECT_EQ(attr_names.size(), 2);
EXPECT_EQ(attr_names[0], "tf.entry_function");
EXPECT_EQ(attr_names[1],
mlir::tf_saved_model::kTfSavedModelInitializerTypeAttr);
}
TEST(CallGraphUtilTest, GetEntryFunctions) {
const char *const code = R"mlir(
func.func @entry_func_1(%arg0: tensor<i32>) -> tensor<i32> attributes {tf.entry_function = {}} {
%0 = "tf.StatefulPartitionedCall"(%arg0) {_xla_compile_device_type = "CPU", config = "", config_proto = "", device = "/device:CPU:0", executor_type = "", f = @func} : (tensor<i32>) -> (tensor<i32>)
func.return %0 : tensor<i32>
}
func.func @entry_func_2(%arg0: tensor<i32>) -> tensor<i32> attributes {tf_saved_model.initializer_type = ""} {
%0 = "tf.StatefulPartitionedCall"(%arg0) {_xla_compile_device_type = "CPU", config = "", config_proto = "", device = "/device:CPU:0", executor_type = "", f = @func} : (tensor<i32>) -> (tensor<i32>)
func.return %0 : tensor<i32>
}
func.func @func(%arg0: tensor<i32>) -> tensor<i32> {
func.return %arg0 : tensor<i32>
}
)mlir";
mlir::MLIRContext context;
context.loadDialect<mlir::func::FuncDialect, mlir::TF::TensorFlowDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
auto entry_funcs = GetEntryFunctions(*module);
EXPECT_EQ(entry_funcs.size(), 2);
EXPECT_EQ(entry_funcs[0].getSymName(), "entry_func_1");
EXPECT_EQ(entry_funcs[1].getSymName(), "entry_func_2");
}
TEST(CallGraphUtilTest, GetCallees) {
const char *const code = R"mlir(
func.func @entry_func(%arg0: tensor<i32>) -> tensor<i32> attributes {tf_saved_model.initializer_type = ""} {
%0 = "tf.While"(%arg0) {cond = @while_cond_func, body = @while_body_func, is_stateless = true} : (tensor<i32>) -> (tensor<i32>)
func.return %0 : tensor<i32>
}
func.func @while_cond_func(%arg0: tensor<i32>) -> tensor<i1> {
%0 = "tf.Const"() {value = dense<0> : tensor<i1>} : () -> tensor<i1>
func.return %0 : tensor<i1>
}
func.func @while_body_func(%arg0: tensor<i32>) -> (tensor<i32>) {
%0 = "tf.Const"() {value = dense<0> : tensor<i32>} : () -> tensor<i32>
func.return %0 : tensor<i32>
}
)mlir";
mlir::MLIRContext context;
context.loadDialect<mlir::func::FuncDialect, mlir::TF::TensorFlowDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
mlir::SymbolTable symtab(*module);
llvm::SmallVector<mlir::func::FuncOp> callees;
module->walk([&](mlir::SymbolUserOpInterface op) {
auto result = GetCallees(op, symtab, callees).succeeded();
ASSERT_TRUE(result);
EXPECT_EQ(callees.size(), 2);
EXPECT_EQ(callees[0].getSymName(), "while_body_func");
EXPECT_EQ(callees[1].getSymName(), "while_cond_func");
});
}
TEST(CallGraphUtilTest, GetFirstOpsOfType) {
const char *const code = R"mlir(
func.func @entry_func(%arg0: tensor<i32>) -> tensor<i32> attributes {tf.entry_function = {}} {
%0 = "tf.While"(%arg0) {cond = @while_cond_func, body = @while_body_func, is_stateless = true} : (tensor<i32>) -> (tensor<i32>)
func.return %0 : tensor<i32>
}
func.func @while_cond_func(%arg0: tensor<i32>) -> tensor<i1> {
%0 = "tf.Const"() {value = dense<0> : tensor<i1>} : () -> tensor<i1>
func.return %0 : tensor<i1>
}
func.func @while_body_func(%arg0: tensor<i32>) -> (tensor<i32>) {
%0 = "tf.StatefulPartitionedCall"(%arg0) {config = "", config_proto = "", device = "/device:CPU:0", executor_type = "", f = @outer_stateful_pcall_func} : (tensor<i32>) -> (tensor<i32>)
func.return %0 : tensor<i32>
}
func.func @outer_stateful_pcall_func(%arg0: tensor<i32>) -> (tensor<i32>) {
%0 = "tf.StatefulPartitionedCall"(%arg0) {_xla_compile_device_type = "CPU", config = "", config_proto = "", device = "/device:CPU:0", executor_type = "", f = @inner_stateful_pcall_func} : (tensor<i32>) -> (tensor<i32>)
func.return %0 : tensor<i32>
}
func.func @inner_stateful_pcall_func(%arg0: tensor<i32>) -> tensor<i32> {
%0 = "tf.StatefulPartitionedCall"(%arg0) {_xla_compile_device_type = "CPU", config = "", config_proto = "", device = "/device:CPU:0", executor_type = "", f = @func} : (tensor<i32>) -> (tensor<i32>)
func.return %0 : tensor<i32>
}
func.func @func(%arg0: tensor<i32>) -> tensor<i32> {
func.return %arg0 : tensor<i32>
}
)mlir";
auto has_compile_device_type = [](mlir::SymbolUserOpInterface op) {
return op->hasAttr(tensorflow::kCompileDeviceTypeAttr);
};
mlir::MLIRContext context;
context.loadDialect<mlir::func::FuncDialect, mlir::TF::TensorFlowDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
mlir::SymbolTable symtab(*module);
llvm::SmallVector<mlir::func::FuncOp> entry_funcs =
GetEntryFunctions(*module);
EXPECT_EQ(entry_funcs.size(), 1);
EXPECT_EQ(entry_funcs[0].getSymName(), "entry_func");
llvm::SmallVector<mlir::SymbolUserOpInterface> outermost_pcall_ops;
auto result =
mlir::GetFirstOpsOfType<mlir::TF::StatefulPartitionedCallOp,
mlir::TF::PartitionedCallOp>(
entry_funcs[0], symtab, has_compile_device_type, outermost_pcall_ops)
.succeeded();
ASSERT_TRUE(result);
EXPECT_EQ(outermost_pcall_ops.size(), 1);
auto func =
llvm::dyn_cast<mlir::func::FuncOp>(outermost_pcall_ops[0]->getParentOp());
ASSERT_TRUE(func);
EXPECT_EQ(func.getSymName(), "outer_stateful_pcall_func");
}
TEST(CallGraphUtilTest, GetOpsOfTypeUntilMiss) {
const char *const code = R"mlir(
func.func @entry_func(%arg0: tensor<i32>) -> tensor<i32> attributes {tf.entry_function = {}} {
%0 = "tf.While"(%arg0) {cond = @while_cond_func, body = @while_body_func, is_stateless = true} : (tensor<i32>) -> (tensor<i32>)
func.return %0 : tensor<i32>
}
func.func @while_cond_func(%arg0: tensor<i32>) -> tensor<i1> {
%0 = "tf.Const"() {value = dense<0> : tensor<i1>} : () -> tensor<i1>
func.return %0 : tensor<i1>
}
func.func @while_body_func(%arg0: tensor<i32>) -> (tensor<i32>) {
%0 = "tf.StatefulPartitionedCall"(%arg0) {config = "", config_proto = "", device = "/device:CPU:0", executor_type = "", f = @outer_stateful_pcall_func} : (tensor<i32>) -> (tensor<i32>)
func.return %0 : tensor<i32>
}
func.func @outer_stateful_pcall_func(%arg0: tensor<i32>) -> (tensor<i32>) {
%0 = "tf.StatefulPartitionedCall"(%arg0) {config = "", config_proto = "", device = "/device:CPU:0", executor_type = "", f = @inner_stateful_pcall_func} : (tensor<i32>) -> (tensor<i32>)
func.return %0 : tensor<i32>
}
func.func @inner_stateful_pcall_func(%arg0: tensor<i32>) -> tensor<i32> {
%0 = "tf.StatefulPartitionedCall"(%arg0) {_xla_compile_device_type = "CPU", config = "", config_proto = "", device = "/device:CPU:0", executor_type = "", f = @func} : (tensor<i32>) -> (tensor<i32>)
func.return %0 : tensor<i32>
}
func.func @func(%arg0: tensor<i32>) -> tensor<i32> {
func.return %arg0 : tensor<i32>
}
)mlir";
auto has_no_compile_device_type = [](mlir::SymbolUserOpInterface op) {
return !op->hasAttr(tensorflow::kCompileDeviceTypeAttr);
};
mlir::MLIRContext context;
context.loadDialect<mlir::func::FuncDialect, mlir::TF::TensorFlowDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
mlir::SymbolTable symtab(*module);
llvm::SmallVector<mlir::func::FuncOp> entry_funcs =
GetEntryFunctions(*module);
EXPECT_EQ(entry_funcs.size(), 1);
EXPECT_EQ(entry_funcs[0].getSymName(), "entry_func");
llvm::SmallVector<mlir::SymbolUserOpInterface> noinline_pcall_ops,
outermost_pcall_ops;
auto result =
mlir::GetOpsOfTypeUntilMiss<mlir::TF::StatefulPartitionedCallOp,
mlir::TF::PartitionedCallOp>(
entry_funcs[0], symtab, has_no_compile_device_type,
noinline_pcall_ops, outermost_pcall_ops)
.succeeded();
ASSERT_TRUE(result);
EXPECT_EQ(noinline_pcall_ops.size(), 2);
auto func =
llvm::dyn_cast<mlir::func::FuncOp>(noinline_pcall_ops[0]->getParentOp());
ASSERT_TRUE(func);
EXPECT_EQ(func.getSymName(), "while_body_func");
func =
llvm::dyn_cast<mlir::func::FuncOp>(noinline_pcall_ops[1]->getParentOp());
ASSERT_TRUE(func);
EXPECT_EQ(func.getSymName(), "outer_stateful_pcall_func");
EXPECT_EQ(outermost_pcall_ops.size(), 1);
func =
llvm::dyn_cast<mlir::func::FuncOp>(outermost_pcall_ops[0]->getParentOp());
ASSERT_TRUE(func);
EXPECT_EQ(func.getSymName(), "inner_stateful_pcall_func");
}
TEST(CallGraphUtilTest, SingleBlockEntryFunction) {
const char *const code = R"mlir(
func.func @entry_func(%arg0: tensor<i32>) -> tensor<i32> attributes {tf.entry_function = {}} {
func.return %arg0 : tensor<i32>
}
)mlir";
mlir::MLIRContext context;
context.loadDialect<mlir::func::FuncDialect, mlir::TF::TensorFlowDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
llvm::errs() << "module:\n";
ASSERT_TRUE(module);
mlir::SymbolTable symtab(*module);
llvm::SmallVector<mlir::func::FuncOp> entry_funcs =
GetEntryFunctions(*module);
EXPECT_EQ(entry_funcs.size(), 1);
EXPECT_EQ(entry_funcs[0].getSymName(), "entry_func");
EXPECT_TRUE(HasSingleBlock(entry_funcs[0]));
}
TEST(CallGraphUtilTest, MultipleBlocksEntryFunction) {
const char *const code = R"mlir(
func.func @entry_func(%arg0: tensor<i32>) -> tensor<i32> attributes {tf.entry_function = {}} {
cf.br ^bb1
^bb1:
func.return %arg0 : tensor<i32>
}
)mlir";
mlir::MLIRContext context;
context.loadDialect<mlir::cf::ControlFlowDialect, mlir::func::FuncDialect,
mlir::TF::TensorFlowDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
llvm::errs() << "module:\n";
ASSERT_TRUE(module);
mlir::SymbolTable symtab(*module);
llvm::SmallVector<mlir::func::FuncOp> entry_funcs =
GetEntryFunctions(*module);
EXPECT_EQ(entry_funcs.size(), 1);
EXPECT_EQ(entry_funcs[0].getSymName(), "entry_func");
EXPECT_FALSE(HasSingleBlock(entry_funcs[0]));
}
}
} |
1,236 | cpp | tensorflow/tensorflow | data_dumper_logger_config | tensorflow/compiler/mlir/tensorflow/utils/data_dumper_logger_config.cc | tensorflow/compiler/mlir/tensorflow/utils/data_dumper_logger_config_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TENSORFLOW_UTILS_DATA_DUMPER_LOGGER_CONFIG_H_
#define TENSORFLOW_COMPILER_MLIR_TENSORFLOW_UTILS_DATA_DUMPER_LOGGER_CONFIG_H_
#include <functional>
#include <string>
#include "tensorflow/compiler/mlir/tensorflow/utils/bridge_logger.h"
namespace tensorflow {
class DataDumperLoggerConfig : public ::tensorflow::BridgeLoggerConfig {
public:
explicit DataDumperLoggerConfig(
std::function<std::string(const std::string &, mlir::Operation *op)>
get_filename,
const std::string &pass_prefix = "", bool print_module_scope = false,
bool print_after_only_on_change = true,
mlir::OpPrintingFlags op_printing_flags = mlir::OpPrintingFlags());
void printBeforeIfEnabled(mlir::Pass *pass, mlir::Operation *op,
PrintCallbackFn print_callback) override;
void printAfterIfEnabled(mlir::Pass *pass, mlir::Operation *op,
PrintCallbackFn print_callback) override;
private:
static void DumpMlir(const std::string &filename,
BridgeLoggerConfig::PrintCallbackFn print_callback);
std::function<std::string(const std::string &, mlir::Operation *op)>
get_filename_;
std::string pass_prefix_;
};
}
#endif
#include "tensorflow/compiler/mlir/tensorflow/utils/data_dumper_logger_config.h"
#include <functional>
#include <memory>
#include <string>
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
namespace tensorflow {
DataDumperLoggerConfig::DataDumperLoggerConfig(
std::function<std::string(const std::string &, mlir::Operation *op)>
get_filename,
const std::string &pass_prefix, bool print_module_scope,
bool print_after_only_on_change, mlir::OpPrintingFlags op_printing_flags)
: ::tensorflow::BridgeLoggerConfig(
print_module_scope, print_after_only_on_change, op_printing_flags),
get_filename_(get_filename),
pass_prefix_(pass_prefix) {}
void DataDumperLoggerConfig::printBeforeIfEnabled(
mlir::Pass *pass, mlir::Operation *op, PrintCallbackFn print_callback) {
std::string pass_name = pass->getName().str();
std::string filename =
get_filename_(pass_prefix_ + "before_" + pass_name, op);
if (ShouldPrint(pass, op)) DumpMlir(filename, print_callback);
}
void DataDumperLoggerConfig::printAfterIfEnabled(
mlir::Pass *pass, mlir::Operation *op, PrintCallbackFn print_callback) {
std::string pass_name = pass->getName().str();
std::string filename = get_filename_(pass_prefix_ + "after_" + pass_name, op);
if (ShouldPrint(pass, op)) DumpMlir(filename, print_callback);
}
void DataDumperLoggerConfig::DumpMlir(
const std::string &filename,
BridgeLoggerConfig::PrintCallbackFn print_callback) {
std::unique_ptr<llvm::raw_ostream> os;
std::string filepath;
if (tensorflow::CreateFileForDumping(filename, &os, &filepath).ok()) {
print_callback(*os);
LOG(INFO) << "Dumped MLIR module to " << filepath;
}
}
} | #include "tensorflow/compiler/mlir/tensorflow/utils/data_dumper_logger_config.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Transforms/Passes.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
static const char *const module_with_add =
R"(module {
func.func @main(%arg0: tensor<3x4x5xf32>, %arg1: tensor<3x4x5xf32>) -> tensor<3x4x5xf32> {
%0 = "tf.AddV2"(%arg0, %arg1) : (tensor<3x4x5xf32>, tensor<3x4x5xf32>) -> tensor<3x4x5xf32>
func.return %0 : tensor<3x4x5xf32>
}
}
)";
TEST(DataDumperLoggerConfig, TestPassFilter) {
mlir::DialectRegistry mlir_registry;
mlir::RegisterAllTensorFlowDialects(mlir_registry);
mlir::MLIRContext mlir_context(mlir_registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module_with_add;
TF_ASSERT_OK(DeserializeMlirModule(module_with_add, &mlir_context,
&mlir_module_with_add));
std::unique_ptr<mlir::Pass> partitioning_pass =
mlir::TFTPU::CreateTPUResourceReadsWritesPartitioningPass();
std::unique_ptr<mlir::Pass> shape_inference_pass =
mlir::TF::CreateTFShapeInferencePass();
std::unique_ptr<mlir::Pass> inliner_pass = mlir::createInlinerPass();
setenv("MLIR_BRIDGE_LOG_PASS_FILTER",
"TPUResourceReadsWritesPartitioningPass;TensorFlowShapeInferencePass",
1);
setenv("TF_DUMP_GRAPH_PREFIX", "sponge", 1);
const string kTestFilename = "test.txt";
int print_callback_count = 0;
auto get_filename_fn = [](const string &filename, mlir::Operation *op) {
return filename;
};
auto print_callback = [&](llvm::raw_ostream &out) {
print_callback_count++;
return;
};
DataDumperLoggerConfig data_dumper_logger_config(get_filename_fn);
data_dumper_logger_config.printBeforeIfEnabled(
partitioning_pass.get(), mlir_module_with_add.get(), print_callback);
EXPECT_EQ(print_callback_count, 1);
data_dumper_logger_config.printBeforeIfEnabled(
shape_inference_pass.get(), mlir_module_with_add.get(), print_callback);
EXPECT_EQ(print_callback_count, 2);
data_dumper_logger_config.printBeforeIfEnabled(
inliner_pass.get(), mlir_module_with_add.get(), print_callback);
EXPECT_EQ(print_callback_count, 2);
data_dumper_logger_config.printAfterIfEnabled(
partitioning_pass.get(), mlir_module_with_add.get(), print_callback);
EXPECT_EQ(print_callback_count, 3);
data_dumper_logger_config.printAfterIfEnabled(
shape_inference_pass.get(), mlir_module_with_add.get(), print_callback);
EXPECT_EQ(print_callback_count, 4);
data_dumper_logger_config.printAfterIfEnabled(
inliner_pass.get(), mlir_module_with_add.get(), print_callback);
EXPECT_EQ(print_callback_count, 4);
}
}
} |
1,237 | cpp | tensorflow/tensorflow | shape_inference_utils | tensorflow/core/ir/utils/shape_inference_utils.cc | tensorflow/core/ir/utils/shape_inference_utils_test.cc | #ifndef TENSORFLOW_CORE_IR_UTILS_SHAPE_INFERENCE_UTILS_H_
#define TENSORFLOW_CORE_IR_UTILS_SHAPE_INFERENCE_UTILS_H_
#include <cstdint>
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Types.h"
#include "mlir/Interfaces/InferTypeOpInterface.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
struct OpRegistrationData;
}
namespace mlir {
namespace tfg {
using OperandAsConstantFn = llvm::function_ref<Attribute(Value)>;
using OpResultAsShapeFn =
llvm::function_ref<tensorflow::shape_inference::ShapeHandle(
tensorflow::shape_inference::InferenceContext&, OpResult)>;
using ResultElementTypeFn = llvm::function_ref<Type(int)>;
using GetAttrValuesFn = llvm::function_ref<tensorflow::Status(
Operation*, llvm::StringRef, const tensorflow::OpRegistrationData*, bool,
tensorflow::AttrValueMap*)>;
LogicalResult InferReturnTypeComponentsForTFOp(
std::optional<Location> location, Operation* op, ValueRange operands,
int64_t graph_version, OperandAsConstantFn operand_as_constant_fn,
OpResultAsShapeFn op_result_as_shape_fn,
ResultElementTypeFn result_element_type_fn,
GetAttrValuesFn get_attr_values_fn,
SmallVectorImpl<ShapedTypeComponents>& inferred_return_shapes);
LogicalResult InferReturnTypeComponentsForTFOp(
std::optional<Location> location, Operation* op, ValueRange operands,
int64_t graph_version, OperandAsConstantFn operand_as_constant_fn,
OpResultAsShapeFn op_result_as_shape_fn,
ResultElementTypeFn result_element_type_fn,
SmallVectorImpl<ShapedTypeComponents>& inferred_return_shapes);
}
}
#endif
#include "tensorflow/core/ir/utils/shape_inference_utils.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Casting.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Value.h"
#include "mlir/Interfaces/DerivedAttributeOpInterface.h"
#include "mlir/Interfaces/InferTypeOpInterface.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/importexport/convert_tensor.h"
#include "tensorflow/core/ir/importexport/convert_types.h"
#include "tensorflow/core/ir/importexport/graphdef_export.h"
#include "tensorflow/core/ir/types/dialect.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#define DEBUG_TYPE "tfg-shape-inference-utils"
using tensorflow::shape_inference::InferenceContext;
using tensorflow::shape_inference::ShapeHandle;
namespace mlir {
namespace tfg {
namespace {
llvm::StringRef GetTensorFlowOpName(Operation* inst) {
llvm::StringRef op_name = inst->getName().stripDialect();
if (!op_name.consume_back(".sink")) op_name.consume_back(".Sink");
return op_name;
}
NamedAttrList GetAllAttributesFromOperation(Operation* op) {
NamedAttrList attr_list;
attr_list.append(op->getAttrDictionary().getValue());
if (auto derived = dyn_cast<DerivedAttributeOpInterface>(op)) {
auto materialized = derived.materializeDerivedAttributes();
attr_list.append(materialized.getValue());
}
return attr_list;
}
std::optional<tensorflow::PartialTensorShape> GetShapeFromMlirType(Type t) {
if (auto ranked_type = t.dyn_cast<RankedTensorType>()) {
tensorflow::PartialTensorShape shape;
const tensorflow::Status status =
tensorflow::PartialTensorShape::BuildPartialTensorShape(
ConvertMlirShapeToTF(ranked_type.getShape()), &shape);
if (status.ok()) return shape;
}
return std::nullopt;
}
std::optional<tensorflow::PartialTensorShape> GetShapeFromMlirAttr(Value v) {
if (auto arg = v.dyn_cast<BlockArgument>()) {
Operation* parent_op = arg.getOwner()->getParentOp();
if (auto func_op = llvm::dyn_cast<FunctionOpInterface>(parent_op)) {
int arg_idx = arg.getArgNumber();
auto attrs =
func_op.getArgAttrOfType<ArrayAttr>(arg_idx, "tf._output_shapes");
if (!attrs || attrs.size() != 1) return std::nullopt;
auto shape_attr = attrs[0].dyn_cast<tf_type::ShapeAttr>();
if (shape_attr && shape_attr.hasRank())
return tensorflow::PartialTensorShape(shape_attr.getShape());
}
}
return std::nullopt;
}
template <typename T>
std::unique_ptr<std::vector<
std::pair<tensorflow::PartialTensorShape, tensorflow::DataType>>>
GetSubtypesHelper(Type type) {
auto type_with_subtypes =
type.cast<TensorType>().getElementType().dyn_cast<T>();
if (!type_with_subtypes || type_with_subtypes.getSubtypes().empty()) {
return nullptr;
}
auto shapes_and_types = std::make_unique<std::vector<
std::pair<tensorflow::PartialTensorShape, tensorflow::DataType>>>();
for (auto subtype : type_with_subtypes.getSubtypes()) {
auto shape = GetShapeFromMlirType(subtype);
if (!shape) {
shapes_and_types = nullptr;
break;
}
tensorflow::DataType dtype;
auto status = ConvertToDataType(subtype.getElementType(), &dtype);
assert(status.ok() && "Unknown element type");
shapes_and_types->emplace_back(*shape, dtype);
}
return shapes_and_types;
}
std::unique_ptr<std::vector<
std::pair<tensorflow::PartialTensorShape, tensorflow::DataType>>>
GetSubtypes(Type type) {
auto subclasses = GetSubtypesHelper<tf_type::ResourceType>(type);
if (subclasses) return subclasses;
return GetSubtypesHelper<tf_type::VariantType>(type);
}
LogicalResult ReportErrorFromShapeFunction(std::optional<Location> location,
llvm::StringRef op_name,
llvm::StringRef error_message) {
VLOG(3) << "TensorFlow shape inference function errored for op '"
<< op_name.data() << "': " << error_message.data();
return failure();
}
std::optional<SmallVector<int64_t, 8>> GetShapeFromHandle(
InferenceContext& context, const ShapeHandle& sh) {
if (!context.RankKnown(sh)) return std::nullopt;
SmallVector<int64_t, 8> shape;
for (int dim : llvm::seq<int>(0, context.Rank(sh)))
shape.push_back(context.Value(context.Dim(sh, dim)));
return shape;
}
TensorType CreateTensorType(InferenceContext& context, const ShapeHandle& sh,
Type element_type) {
auto shape = GetShapeFromHandle(context, sh);
if (shape.has_value())
return GetTypeFromTFTensorShape(shape.value(), element_type, {});
return UnrankedTensorType::get(element_type);
}
ShapedTypeComponents CreateShapedTypeComponents(InferenceContext& context,
const ShapeHandle& sh,
Type element_type) {
auto shape = GetShapeFromHandle(context, sh);
if (shape.has_value())
return ShapedTypeComponents(ConvertTFShapeToMlir(shape.value()),
element_type);
return ShapedTypeComponents(element_type);
}
}
LogicalResult InferReturnTypeComponentsForTFOp(
std::optional<Location> location, Operation* op, ValueRange operands,
int64_t graph_version, OperandAsConstantFn operand_as_constant_fn,
OpResultAsShapeFn op_result_as_shape_fn,
ResultElementTypeFn result_element_type_fn,
GetAttrValuesFn get_attr_values_fn,
SmallVectorImpl<ShapedTypeComponents>& inferred_return_shapes) {
llvm::StringRef op_name = GetTensorFlowOpName(op);
const tensorflow::OpRegistrationData* op_reg_data =
tensorflow::OpRegistry::Global()->LookUp(op_name.str());
if (!op_reg_data) {
VLOG(3) << "Skipping inference for unregistered op '" << op_name.data()
<< "'.\n";
return failure();
}
if (!op_reg_data->shape_inference_fn) {
VLOG(3) << "Skipping inference for op without shape function '"
<< op_name.data() << "'.\n";
return failure();
}
tensorflow::AttrValueMap attributes;
if (get_attr_values_fn) {
tensorflow::Status status =
get_attr_values_fn(op, op_name, op_reg_data,
true, &attributes);
if (!status.ok()) {
VLOG(3) << op_name.data()
<< " failed to get AttrValue: " << status.message();
return failure();
}
} else {
auto* dialect = cast<TFGraphDialect>(op->getDialect());
tensorflow::NodeDef node_def;
tensorflow::Status status = ConvertToNodeDef(
op, &node_def, dialect,
[&](Value value) { return GetValueName(value, dialect); });
if (!status.ok()) {
VLOG(3) << op_name.data()
<< " failed to be converted to NodeDef: " << status.message();
return failure();
}
attributes = node_def.attr();
}
const int num_operands = operands.size();
std::vector<tensorflow::PartialTensorShape> input_shapes(num_operands);
std::vector<std::unique_ptr<std::vector<
std::pair<tensorflow::PartialTensorShape, tensorflow::DataType>>>>
handle_shapes_and_types(num_operands);
for (const auto& it : llvm::enumerate(operands)) {
Value operand = it.value();
size_t index = it.index();
Type operand_type = operand.getType();
if (auto shape = GetShapeFromMlirType(operand_type)) {
input_shapes[index] = *shape;
} else if (auto shape = GetShapeFromMlirAttr(operand)) {
input_shapes[index] = *shape;
}
handle_shapes_and_types[index] = GetSubtypes(operand_type);
}
InferenceContext c(graph_version, tensorflow::AttrSlice(&attributes),
op_reg_data->op_def, input_shapes, {},
{}, handle_shapes_and_types);
if (!c.construction_status().ok()) {
VLOG(3) << "InferenceContext construction failed on " << op_name.data()
<< ": " << c.construction_status().message();
return failure();
}
auto status = c.Run(op_reg_data->shape_inference_fn);
if (!status.ok()) {
return ReportErrorFromShapeFunction(location, op_name,
std::string(status.message()));
}
std::vector<const tensorflow::Tensor*> input_tensors(num_operands);
std::vector<tensorflow::Tensor> tensors(num_operands);
std::vector<ShapeHandle> input_tensors_as_shapes(num_operands);
auto requires_inputs = [&]() {
return any_of(llvm::seq<int>(0, c.num_inputs()), [&](int input) {
return !input_tensors[input] &&
(c.requested_input_tensor(input) ||
c.requested_input_tensor_as_partial_shape(input));
});
};
while (requires_inputs()) {
VLOG(4) << "\tfeeding new inputs or input as partial shapes\n";
bool has_new_inputs = false;
for (int input : llvm::seq<int>(0, c.num_inputs())) {
if (input_tensors[input]) continue;
if (c.requested_input_tensor(input)) {
if (auto attr = operand_as_constant_fn(op->getOperand(input))
.dyn_cast_or_null<ElementsAttr>()) {
VLOG(4) << "Requesting " << input << " as constant\n";
tensorflow::Tensor* input_tensor = &tensors.at(input);
auto status = ConvertToTensor(attr, input_tensor);
if (status.ok()) {
input_tensors.at(input) = input_tensor;
has_new_inputs = true;
} else {
VLOG(4) << "Error converting input " << input << " of op '"
<< op_name.data() << "' to Tensor: " << status.message()
<< "\n";
}
}
}
if (c.requested_input_tensor_as_partial_shape(input) &&
!input_tensors[input] && !input_tensors_as_shapes[input].Handle()) {
VLOG(4) << "Requesting " << input << " as shape\n";
auto op_result = op->getOperand(input).dyn_cast<OpResult>();
if (!op_result) continue;
auto handle = op_result_as_shape_fn(c, op_result);
VLOG(4) << "Requested " << input << " as shape "
<< (handle.Handle() ? "found" : "not found");
if (handle.Handle()) {
input_tensors_as_shapes[input] = handle;
has_new_inputs = true;
}
}
}
if (!has_new_inputs) break;
c.set_input_tensors(input_tensors);
c.set_input_tensors_as_shapes(input_tensors_as_shapes);
auto status = c.Run(op_reg_data->shape_inference_fn);
if (!status.ok()) {
return ReportErrorFromShapeFunction(location, op_name,
std::string(status.message()));
}
}
for (int output : llvm::seq<int>(0, c.num_outputs())) {
ShapeHandle shape_handle = c.output(output);
VLOG(4) << "Inferred output " << output << " : "
<< c.DebugString(shape_handle) << "\n";
Type new_element_type = result_element_type_fn(output);
if (new_element_type &&
new_element_type.isa<tf_type::ResourceType, tf_type::VariantType>()) {
auto handle_shapes_types = c.output_handle_shapes_and_types(output);
if (handle_shapes_types) {
SmallVector<TensorType, 1> subtypes;
Builder b(op->getContext());
for (const auto& shape_n_type : *handle_shapes_types) {
Type element_type;
auto status = ConvertDataType(shape_n_type.dtype, b, &element_type);
assert(status.ok() && "Unknown element type");
subtypes.push_back(
CreateTensorType(c, shape_n_type.shape, element_type));
}
if (new_element_type.isa<tf_type::ResourceType>()) {
new_element_type =
tf_type::ResourceType::get(subtypes, op->getContext());
} else {
new_element_type =
tf_type::VariantType::get(subtypes, op->getContext());
}
}
}
inferred_return_shapes.push_back(
CreateShapedTypeComponents(c, shape_handle, new_element_type));
}
return success();
}
LogicalResult InferReturnTypeComponentsForTFOp(
std::optional<Location> location, Operation* op, ValueRange operands,
int64_t graph_version, OperandAsConstantFn operand_as_constant_fn,
OpResultAsShapeFn op_result_as_shape_fn,
ResultElementTypeFn result_element_type_fn,
SmallVectorImpl<ShapedTypeComponents>& inferred_return_shapes) {
return InferReturnTypeComponentsForTFOp(
location, op, operands, graph_version, operand_as_constant_fn,
op_result_as_shape_fn, result_element_type_fn,
nullptr, inferred_return_shapes);
}
}
} | #include "tensorflow/core/ir/utils/shape_inference_utils.h"
#include <vector>
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/ops.h"
#include "tensorflow/core/ir/tf_op_wrapper.h"
#include "tensorflow/core/platform/test.h"
using tensorflow::shape_inference::DimensionHandle;
using tensorflow::shape_inference::InferenceContext;
using tensorflow::shape_inference::ShapeHandle;
namespace mlir {
namespace tfg {
namespace {
const char *const code = R"mlir(
tfg.func @test(%arg : tensor<32x?x256x4xi32> {tfg.name = "arg"}, %arg_1 : tensor<*xi32> {tfg.name = "arg1", tf._output_shapes = [5 : i32]}) -> (tensor<2x2xf32>) {
%Placeholder, %ctl = Placeholder name("placeholder") {dtype = f32, shape = #tf_type.shape<>} : () -> (tensor<f32>)
%Const, %ctl_0 = Const name("c0") {dtype = f32, value = dense<1.000000e+00> : tensor<2x2xf32>} : () -> (tensor<2x2xf32>)
%Const_1, %ctl_2 = Const name("c1") {dtype = f32, value = dense<2.000000e+00> : tensor<2x2xf32>} : () -> (tensor<2x2xf32>)
%IdentityN:3, %ctl_3 = IdentityN(%Const, %Placeholder, %Const_1) name("id_n") {T = [f32, f32, f32]} : (tensor<2x2xf32>, tensor<f32>, tensor<2x2xf32>) -> (tensor<2x2xf32>, tensor<f32>, tensor<2x2xf32>)
%Identity, %ctl_6 = Identity(%IdentityN#1) name("id1") {T = f32} : (tensor<f32>) -> (tensor<f32>)
%Add, %ctl_7 = Add(%Const, %IdentityN#1) name("add") {T = f32} : (tensor<2x2xf32>, tensor<f32>) -> (tensor<2x2xf32>)
%Const_1000, %ctl_9 = Const name("c1000") {dtype = i32, value = dense<1000> : tensor<i32>} : () -> (tensor<i32>)
%Const_2, %ctl_10 = Const name("c2") {dtype = i32, value = dense<0> : tensor<i32>} : () -> (tensor<i32>)
%Const_3, %ctl_11 = Const name("c3") {dtype = i32, value = dense<1> : tensor<i32>} : () -> (tensor<i32>)
%Range, %ctl_range = Range(%Const_2, %Const_1000, %Const_3) name("range") {Tidx = i32} : (tensor<i32>, tensor<i32>, tensor<i32>) -> tensor<1000xi32>
%Const_4, %ctl_12 = Const name("c4") {dtype = i32, value = dense<[32, -1, 4]> : tensor<3xi32>} : () -> (tensor<3xi32>)
%Reshape, %ctl_13 = Reshape(%arg, %Const_4) name("reshape") {T = i32} : (tensor<32x?x256x4xi32>, tensor<3xi32>) -> tensor<32x?x4xi32>
%Const_5, %ctl_14 = Const name("TensorListReserve/num_elements") {dtype = i32, value = dense<3> : tensor<i32>} : () -> (tensor<i32>)
%Const_6, %ctl_15 = Const name("TensorListReserve/element_shape") {dtype = i32, value = dense<2> : tensor<2xi32>} : () -> (tensor<2xi32>)
%TensorListReserve, %ctl_16 = TensorListReserve(%Const_6, %Const_5) name("TensorListReserve") {element_dtype = f32, shape_type = i32} : (tensor<2xi32>, tensor<i32>) -> (tensor<!tf_type.variant<tensor<2x2xf32>>>)
%Const_7, %ctl_17 = Const name("index") {dtype = i32, value = dense<0> : tensor<i32>} : () -> (tensor<i32>)
%Const_8, %ctl_18 = Const name("item") {dtype = f32, value = dense<[[1.000000e+00, 2.000000e+00], [3.000000e+00, 4.000000e+00]]> : tensor<2x2xf32>} : () -> (tensor<2x2xf32>)
%TensorListSetItem, %ctl_19 = TensorListSetItem(%TensorListReserve, %Const_7, %Const_8) name("TensorListSetItem") {element_dtype = f32} : (tensor<!tf_type.variant<tensor<2x2xf32>>>, tensor<i32>, tensor<2x2xf32>) -> (tensor<!tf_type.variant<tensor<2x2xf32>>>)
%Identity_1, %ctl_20 = Identity(%arg_1) name("id2") {T = i32} : (tensor<*xi32>) -> (tensor<*xi32>)
return (%Const_1) : tensor<2x2xf32>
}
)mlir";
}
class ShapeInferenceTest : public ::testing::Test {
protected:
using OpShapeInfo = SmallVector<ShapedTypeComponents>;
ShapeInferenceTest() {
context_.getOrLoadDialect<tfg::TFGraphDialect>();
module_ = mlir::parseSourceString<mlir::ModuleOp>(code, &context_);
assert(module_);
}
template <typename OpRange>
void VerifyInferredShapes(OpRange &&ops,
SmallVector<OpShapeInfo> &inferred_result,
bool check_type) {
for (auto it : llvm::zip(ops, inferred_result)) {
Operation &op = std::get<0>(it);
OpShapeInfo &info = std::get<1>(it);
EXPECT_EQ(op.getNumResults() - 1, info.size());
for (int i = 0; i < op.getNumResults() - 1; ++i) {
ShapedType shape = mlir::cast<ShapedType>(op.getResultTypes()[i]);
EXPECT_EQ(shape.hasRank(), info[i].hasRank());
if (shape.hasRank()) EXPECT_EQ(shape.getShape(), info[i].getDims());
if (check_type)
EXPECT_EQ(shape.getElementType(), info[i].getElementType());
}
}
}
ModuleOp GetModule() { return module_.get(); }
MLIRContext *GetContext() { return &context_; }
private:
MLIRContext context_;
OwningOpRef<ModuleOp> module_;
};
TEST_F(ShapeInferenceTest, TestShapeAndTypeInference) {
auto operand_as_constant_fn = [](Value operand) -> Attribute {
return operand.getDefiningOp()->getAttr("value");
};
auto op_result_as_shape_fn = [](InferenceContext &ic,
OpResult op_result) -> ShapeHandle {
auto rt = mlir::dyn_cast<RankedTensorType>(op_result.getType());
if (!rt || rt.getRank() != 1 || !rt.hasStaticShape()) return {};
std::vector<DimensionHandle> dims(rt.getDimSize(0), ic.UnknownDim());
auto attr =
op_result.getDefiningOp()->getAttrOfType<DenseElementsAttr>("value");
for (auto element : llvm::enumerate(attr.getValues<APInt>()))
dims[element.index()] = ic.MakeDim(element.value().getSExtValue());
return ic.MakeShape(dims);
};
GraphFuncOp func = GetModule().lookupSymbol<GraphFuncOp>("test");
ASSERT_TRUE(func);
Block &block = *func.getBody().begin();
SmallVector<SmallVector<ShapedTypeComponents>> all_results;
for (Operation &op : block.without_terminator()) {
auto result_element_type_fn = [&](int idx) -> Type {
return mlir::cast<ShapedType>(op.getResult(idx).getType())
.getElementType();
};
SmallVector<ShapedTypeComponents> results;
EXPECT_TRUE(InferReturnTypeComponentsForTFOp(
op.getLoc(), &op, TFOp(&op).getNonControlOperands(),
1010, operand_as_constant_fn,
op_result_as_shape_fn, result_element_type_fn,
nullptr, results)
.succeeded());
all_results.push_back(results);
}
VerifyInferredShapes(func.getBody().begin()->without_terminator(),
all_results,
true);
auto exclude_reshape_operand_as_constant_fn =
[&](Value operand) -> Attribute {
Operation *defining_op = operand.getDefiningOp();
if (!defining_op || defining_op->getName().getStringRef() == "tfg.Reshape")
return BoolAttr::get(GetContext(), false);
return operand.getDefiningOp()->getAttr("value");
};
all_results.clear();
for (Operation &op : block.without_terminator()) {
auto result_element_type_fn = [&](int idx) -> Type {
return mlir::cast<ShapedType>(op.getResult(idx).getType())
.getElementType();
};
SmallVector<ShapedTypeComponents> results;
EXPECT_TRUE(InferReturnTypeComponentsForTFOp(
op.getLoc(), &op, TFOp(&op).getNonControlOperands(),
1010,
exclude_reshape_operand_as_constant_fn,
op_result_as_shape_fn, result_element_type_fn,
nullptr, results)
.succeeded());
all_results.push_back(results);
}
VerifyInferredShapes(func.getBody().begin()->without_terminator(),
all_results,
true);
}
TEST_F(ShapeInferenceTest, TestInferenceFailure) {
auto operand_as_constant_fn = [](Value operand) -> Attribute {
return nullptr;
};
auto op_result_as_shape_fn = [](InferenceContext &ic,
OpResult op_result) -> ShapeHandle {
return {};
};
auto result_element_type_fn = [](int idx) -> Type { return nullptr; };
GraphFuncOp func = GetModule().lookupSymbol<GraphFuncOp>("test");
ASSERT_TRUE(func);
Block &block = *func.getBody().begin();
SmallVector<SmallVector<ShapedTypeComponents>> all_results;
auto get_empty_attr_values_fn =
[](Operation *, llvm::StringRef, const tensorflow::OpRegistrationData *,
bool, tensorflow::AttrValueMap *) { return absl::OkStatus(); };
for (Operation &op : block.without_terminator()) {
SmallVector<ShapedTypeComponents> results;
auto result = InferReturnTypeComponentsForTFOp(
op.getLoc(), &op, TFOp(&op).getNonControlOperands(),
1010, operand_as_constant_fn, op_result_as_shape_fn,
result_element_type_fn, get_empty_attr_values_fn, results);
if (op.getName().getStringRef() == "tfg.Const" ||
op.getName().getStringRef() == "tfg.IdentityN" ||
op.getName().getStringRef() == "tfg.PlaceHolder" ||
op.getName().getStringRef() == "tfg.Range")
EXPECT_TRUE(failed(result));
}
auto error_attr_values_fn = [](Operation *, llvm::StringRef,
const tensorflow::OpRegistrationData *, bool,
tensorflow::AttrValueMap *) {
return tensorflow::errors::Unknown("Intended error");
};
for (Operation &op : block.without_terminator()) {
SmallVector<ShapedTypeComponents> results;
EXPECT_FALSE(InferReturnTypeComponentsForTFOp(
op.getLoc(), &op, TFOp(&op).getNonControlOperands(),
1010, operand_as_constant_fn,
op_result_as_shape_fn, result_element_type_fn,
error_attr_values_fn, results)
.succeeded());
}
}
}
} |
1,238 | cpp | tensorflow/tensorflow | cluster_util | tensorflow/compiler/mlir/tensorflow/utils/cluster_util.cc | tensorflow/compiler/mlir/tensorflow/utils/cluster_util_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TENSORFLOW_UTILS_CLUSTER_UTIL_H_
#define TENSORFLOW_COMPILER_MLIR_TENSORFLOW_UTILS_CLUSTER_UTIL_H_
#include <functional>
#include <string>
#include "llvm/ADT/StringRef.h"
#include "mlir/IR/Block.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.h"
namespace mlir::TF {
struct Cluster {
llvm::SetVector<Operation*> ops;
std::string target;
};
llvm::StringMap<SmallVector<Cluster>> BuildAllClusters(
Block& block, const TF::SideEffectAnalysis::Info& side_effect_analysis,
std::function<std::string(Operation*)> get_target,
std::function<bool(Operation*)> is_ignored_op);
void ReorderOpResultUses(mlir::Operation* cluster);
}
#endif
#include "tensorflow/compiler/mlir/tensorflow/utils/cluster_util.h"
#include <algorithm>
#include <functional>
#include <string>
#include <vector>
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/Matchers.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Transforms/RegionUtils.h"
#include "tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.h"
namespace mlir::TF {
namespace {
llvm::SetVector<Operation*> GetAllOpsDependOnCluster(
const Cluster& c,
const llvm::DenseMap<Operation*, Cluster*>& op_to_cluster_map) {
llvm::SetVector<Operation*> ops_depend_on_cluster;
for (Operation& op : *c.ops.front()->getBlock()) {
if (op.isBeforeInBlock(c.ops.front()) || c.ops.contains(&op)) {
continue;
}
llvm::SetVector<Value> live_ins(op.operand_begin(), op.operand_end());
getUsedValuesDefinedAbove(op.getRegions(), live_ins);
if (llvm::any_of(live_ins, [&](Value value) {
Operation* defining_op = value.getDefiningOp();
if (!defining_op) {
return false;
}
return c.ops.contains(defining_op) ||
ops_depend_on_cluster.contains(defining_op);
})) {
ops_depend_on_cluster.insert(&op);
}
}
llvm::SetVector<Operation*> same_cluster_ops_with_dependency(
ops_depend_on_cluster.begin(), ops_depend_on_cluster.end());
for (Operation* op : ops_depend_on_cluster) {
Cluster* cluster = op_to_cluster_map.lookup(op);
if (cluster == nullptr) {
continue;
}
for (Operation* ops_in_same_cluster : cluster->ops) {
same_cluster_ops_with_dependency.insert(ops_in_same_cluster);
}
}
return same_cluster_ops_with_dependency;
}
bool CanMergeIntoCluster(
const Cluster& c, Operation* to_merge,
const TF::SideEffectAnalysis::Info& side_effect_analysis,
std::function<std::string(Operation*)> get_target,
const llvm::DenseMap<Operation*, Cluster*>& op_to_cluster_map) {
const bool has_control_predecessors_after_cluster =
!side_effect_analysis
.DirectControlPredecessors(
to_merge,
[&c](Operation* pred) {
Operation* const last_c_op = c.ops.back();
return last_c_op->getBlock() == pred->getBlock() &&
last_c_op->isBeforeInBlock(pred);
})
.empty();
if (has_control_predecessors_after_cluster) {
return false;
}
llvm::SetVector<Operation*> ops_depend_on_cluster =
GetAllOpsDependOnCluster(c, op_to_cluster_map);
return llvm::none_of(to_merge->getOperands(), [&](Value value) {
Operation* defining_op = value.getDefiningOp();
return defining_op && ops_depend_on_cluster.contains(defining_op);
});
}
}
llvm::StringMap<SmallVector<Cluster>> BuildAllClusters(
Block& block, const TF::SideEffectAnalysis::Info& side_effect_analysis,
std::function<std::string(Operation*)> get_target,
std::function<bool(Operation*)> is_ignored_op) {
llvm::StringMap<SmallVector<Cluster>> all_clusters;
llvm::DenseMap<Operation*, Cluster*> op_to_cluster_map;
llvm::StringMap<Cluster> nearest_clusters;
for (Operation& op : llvm::make_early_inc_range(block)) {
if (is_ignored_op(&op)) {
continue;
}
std::string target_name = get_target(&op);
auto it = nearest_clusters.find(target_name);
if (it == nearest_clusters.end()) {
SetVector<Operation*> new_cluster_op_set;
new_cluster_op_set.insert(&op);
nearest_clusters[target_name] = Cluster{new_cluster_op_set, target_name};
op_to_cluster_map[&op] = &nearest_clusters[target_name];
continue;
}
Cluster& nearest_cluster = it->second;
if (CanMergeIntoCluster(nearest_cluster, &op, side_effect_analysis,
get_target, op_to_cluster_map)) {
nearest_cluster.ops.insert(&op);
op_to_cluster_map[&op] = &nearest_cluster;
continue;
}
all_clusters[target_name].push_back(nearest_cluster);
SetVector<Operation*> new_cluster_op_set;
new_cluster_op_set.insert(&op);
nearest_clusters[target_name] = Cluster{new_cluster_op_set, target_name};
op_to_cluster_map[&op] = &nearest_clusters[target_name];
}
for (auto& target_cluster : nearest_clusters) {
all_clusters[target_cluster.first()].push_back(target_cluster.second);
}
return all_clusters;
}
void ReorderOpResultUses(mlir::Operation* cluster) {
mlir::Block* const cluster_block = cluster->getBlock();
llvm::SetVector<mlir::Operation*> ops_to_reorder;
llvm::SmallVector<mlir::Value> worklist;
llvm::append_range(worklist, cluster->getResults());
while (!worklist.empty()) {
mlir::Value value = worklist.back();
worklist.pop_back();
for (mlir::Operation* const user : value.getUsers()) {
mlir::Operation* const op = cluster_block->findAncestorOpInBlock(*user);
if (op == nullptr || !op->isBeforeInBlock(cluster)) {
continue;
}
if (ops_to_reorder.insert(op)) {
llvm::append_range(worklist, op->getResults());
}
}
}
llvm::SmallVector<mlir::Operation*, 0> sorted = ops_to_reorder.takeVector();
llvm::sort(sorted, [](mlir::Operation* lhs, mlir::Operation* rhs) {
return lhs->isBeforeInBlock(rhs);
});
for (mlir::Operation* const op : llvm::reverse(sorted)) {
op->moveAfter(cluster);
}
}
} | #include "tensorflow/compiler/mlir/tensorflow/utils/cluster_util.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/core/platform/errors.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace mlir::TF {
namespace {
constexpr StringRef kTestClusterName = "tpu0";
absl::StatusOr<OwningOpRef<ModuleOp>> GetMlirModuleFromString(
StringRef string, MLIRContext* context) {
DialectRegistry mlir_registry;
RegisterAllTensorFlowDialects(mlir_registry);
context->appendDialectRegistry(mlir_registry);
OwningOpRef<ModuleOp> mlir_module;
auto status =
tensorflow::DeserializeMlirModule(string, context, &mlir_module);
if (!status.ok()) {
return status;
}
return mlir_module;
}
std::string GetDevice(Operation* op) {
auto device_attr = op->getAttrOfType<StringAttr>("device");
return device_attr ? device_attr.getValue().str() : "";
}
bool CanBeIgnoredInCluster(Operation* op) {
auto device_attr = op->getAttrOfType<StringAttr>("device");
return !device_attr || device_attr.getValue().empty();
}
llvm::StringMap<SmallVector<Cluster>> GetClusters(ModuleOp module) {
TF::SideEffectAnalysis side_effect_analysis(module);
auto main_func = module.lookupSymbol<func::FuncOp>("main");
const TF::SideEffectAnalysis::Info& info =
side_effect_analysis.GetAnalysisForFunc(main_func);
llvm::StringMap<SmallVector<Cluster>> clusters = BuildAllClusters(
main_func.front(), info, GetDevice, CanBeIgnoredInCluster);
return clusters;
}
TEST(BuildClusters, TestSingleCluster) {
static const char* const module_with_single_cluster =
R"(module {
func.func @main(%arg0: tensor<?xi32>) -> tensor<?xi32> {
%0 = "tf.A"(%arg0) : (tensor<?xi32>) -> tensor<?xi32>
%1 = "tf.B"(%0) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>
%2 = "tf.C"(%0, %1) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
%3 = "tf.D"(%2) : (tensor<?xi32>) -> tensor<?xi32>
func.return %3 : tensor<?xi32>
}
}
)";
MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(
OwningOpRef<ModuleOp> module,
GetMlirModuleFromString(module_with_single_cluster, &context));
auto clusters = GetClusters(module.get());
EXPECT_EQ(clusters.count(kTestClusterName), 1);
EXPECT_EQ(clusters.lookup(kTestClusterName).size(), 1);
EXPECT_EQ(clusters.lookup(kTestClusterName)[0].ops.size(), 2);
}
TEST(BuildClusters, TestMultipleClusters) {
static const char* const module_with_two_clusters =
R"(module {
func.func @main(%arg0: tensor<?xi32>) -> tensor<?xi32> {
%0 = "tf.A"(%arg0) : (tensor<?xi32>) -> tensor<?xi32>
%1 = "tf.B"(%0) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>
%2 = "tf.C"(%0, %1) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
%3 = "tf.D"(%2) : (tensor<?xi32>) -> tensor<?xi32>
%4 = "tf.E"(%3) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>
%5 = "tf.F"(%3, %4) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
func.return %5 : tensor<?xi32>
}
}
)";
MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(
OwningOpRef<ModuleOp> module,
GetMlirModuleFromString(module_with_two_clusters, &context));
auto clusters = GetClusters(module.get());
EXPECT_EQ(clusters.count(kTestClusterName), 1);
EXPECT_EQ(clusters[kTestClusterName].size(), 2);
EXPECT_EQ(clusters[kTestClusterName][0].ops.size(), 2);
EXPECT_EQ(clusters[kTestClusterName][1].ops.size(), 2);
}
TEST(BuildClusters, TestMultipleTargets) {
static const char* const module_with_two_clusters =
R"(module {
func.func @main(%arg0: tensor<?xi32>) -> tensor<?xi32> {
%0 = "tf.A"(%arg0) : (tensor<?xi32>) -> tensor<?xi32>
%1 = "tf.B"(%0) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>
%2 = "tf.C"(%0, %1) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
%3 = "tf.D"(%2) : (tensor<?xi32>) -> tensor<?xi32>
%4 = "tf.E"(%3) {device = "tpu1"} : (tensor<?xi32>) -> tensor<?xi32>
%5 = "tf.F"(%3, %4) {device = "tpu1"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
func.return %5 : tensor<?xi32>
}
}
)";
MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(
OwningOpRef<ModuleOp> module,
GetMlirModuleFromString(module_with_two_clusters, &context));
auto clusters = GetClusters(module.get());
constexpr StringRef kTarget0 = "tpu0";
EXPECT_EQ(clusters.count(kTarget0), 1);
EXPECT_EQ(clusters[kTarget0].size(), 1);
EXPECT_EQ(clusters[kTarget0][0].ops.size(), 2);
constexpr StringRef kTarget1 = "tpu1";
EXPECT_EQ(clusters.count(kTarget1), 1);
EXPECT_EQ(clusters[kTarget1].size(), 1);
EXPECT_EQ(clusters[kTarget1][0].ops.size(), 2);
}
TEST(BuildClusters, TestMergedClusters) {
static const char* const module_with_single_cluster =
R"(module {
func.func @main(%arg0: tensor<?xi32>) -> (tensor<?xi32>, tensor<?xi32>) {
%0 = "tf.Relu"(%arg0) : (tensor<?xi32>) -> tensor<?xi32>
%1 = "tf.Relu"(%0) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>
%2 = "tf.Add"(%0, %1) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
%3 = "tf.Relu"(%2) : (tensor<?xi32>) -> tensor<?xi32>
%4 = "tf.Relu"(%1) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>
%5 = "tf.Add"(%1, %2) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
func.return %3, %5 : tensor<?xi32>, tensor<?xi32>
}
}
)";
MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(
OwningOpRef<ModuleOp> module,
GetMlirModuleFromString(module_with_single_cluster, &context));
auto clusters = GetClusters(module.get());
EXPECT_EQ(clusters.count(kTestClusterName), 1);
EXPECT_EQ(clusters[kTestClusterName].size(), 1);
EXPECT_EQ(clusters[kTestClusterName][0].ops.size(), 4);
}
TEST(BuildClusters, TestMergedClustersWithDataDependen) {
static const char* const module_with_single_cluster =
R"(module {
func.func @main(%arg0: tensor<?xi32>, %arg1: tensor<?xi32>) -> (tensor<?xi32>, tensor<?xi32>) {
%0 = "tf.Relu"(%arg0) : (tensor<?xi32>) -> tensor<?xi32>
%1 = "tf.Relu"(%0) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>
%2 = "tf.Add"(%0, %1) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
%3 = "tf.Relu"(%arg1) {device = "tpu1"} : (tensor<?xi32>) -> tensor<?xi32>
%4 = "tf.Add"(%3, %arg1) {device = "tpu1"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
%5 = "tf.Relu"(%4) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>
%6 = "tf.Add"(%4, %5) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
func.return %3, %5 : tensor<?xi32>, tensor<?xi32>
}
}
)";
MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(
OwningOpRef<ModuleOp> module,
GetMlirModuleFromString(module_with_single_cluster, &context));
auto clusters = GetClusters(module.get());
EXPECT_EQ(clusters.count(kTestClusterName), 1);
EXPECT_EQ(clusters[kTestClusterName].size(), 1);
EXPECT_EQ(clusters[kTestClusterName][0].ops.size(), 4);
}
}
} |
1,239 | cpp | tensorflow/tensorflow | tf_saved_model | tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.cc | tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TENSORFLOW_IR_TF_SAVED_MODEL_H_
#define TENSORFLOW_COMPILER_MLIR_TENSORFLOW_IR_TF_SAVED_MODEL_H_
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Dialect.h"
#include "mlir/IR/OpDefinition.h"
namespace mlir {
namespace tf_saved_model {
inline constexpr StringRef kTfSavedModelExportedNamesAttr =
"tf_saved_model.exported_names";
inline constexpr StringRef kTfSavedModelIndexPathAttr =
"tf_saved_model.index_path";
inline constexpr StringRef kTfSavedModelInitializerTypeAttr =
"tf_saved_model.initializer_type";
inline constexpr StringRef kTfSavedModelInitializerRestoreType = "restore_op";
inline constexpr StringRef kTfSavedModelInitializerInitType = "init_op";
class TensorFlowSavedModelDialect : public Dialect {
public:
explicit TensorFlowSavedModelDialect(MLIRContext *context);
LogicalResult verifyRegionArgAttribute(Operation *op, unsigned region_index,
unsigned arg_index,
NamedAttribute named_attr) override;
LogicalResult verifyRegionResultAttribute(Operation *op,
unsigned region_index,
unsigned result_index,
NamedAttribute named_attr) override;
LogicalResult verifyOperationAttribute(Operation *op,
NamedAttribute named_attr) override;
static StringRef getDialectNamespace() { return "tf_saved_model"; }
};
}
}
#define GET_OP_CLASSES
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h.inc"
namespace mlir {
namespace tf_saved_model {
SmallVector<StringRef, 2> GetExportedNames(Operation *op);
bool IsExported(Operation *op);
bool HasTfSavedModelSemantics(ModuleOp module_op);
Operation *LookupBoundInput(func::FuncOp func, int arg_index,
const SymbolTable &symbol_table);
template <typename T>
T LookupBoundInputOfType(func::FuncOp func, int arg_index,
const SymbolTable &symbol_table) {
return llvm::dyn_cast_or_null<T>(
LookupBoundInput(func, arg_index, symbol_table));
}
Type GetBoundInputArgTypeFor(mlir::Operation *op);
SessionInitializerOp GetSessionInitializerOp(ModuleOp module_op);
SmallVector<StringRef, 2> GetSessionInitializerExportedName(ModuleOp module_op);
SmallVector<func::FuncOp, 2> GetInitializerFunctions(ModuleOp module_op);
func::FuncOp GetInitializerFunction(ModuleOp module_op,
StringRef initializer_type);
bool IsRestoreGraph(ModuleOp module);
}
}
#endif
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include <algorithm>
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/OpImplementation.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
namespace mlir {
namespace tf_saved_model {
static bool IsStrArrayAttr(Attribute attr) {
auto array = mlir::dyn_cast<ArrayAttr>(attr);
if (!array) return false;
return llvm::all_of(
array, [](Attribute attr) { return mlir::isa<StringAttr>(attr); });
}
LogicalResult VerifyTensorTypesCompatible(Type t1, Type t2) {
if (!mlir::isa<TensorType>(t1) || !mlir::isa<TensorType>(t2)) {
return failure();
}
return verifyCompatibleShape(mlir::cast<TensorType>(t1),
mlir::cast<TensorType>(t2));
}
LogicalResult GlobalTensorOp::verify() {
GlobalTensorOp global_tensor = *this;
if (global_tensor.getValue()) {
if (failed(VerifyTensorTypesCompatible(
global_tensor.getType(), global_tensor.getValue()->getType()))) {
return global_tensor.emitError()
<< "'type' and 'value' attributes should "
"have compatible tensor types";
}
}
if (!global_tensor.getIsMutable()) {
if (!mlir::cast<TensorType>(global_tensor.getType()).hasStaticShape()) {
return global_tensor.emitError()
<< "'type' attribute for immutable 'tf_saved_model.global_tensor' "
"should have a static shape";
}
}
return success();
}
LogicalResult SessionInitializerOp::verify() {
SessionInitializerOp session_initializer = *this;
mlir::SymbolTable symbol_table(
session_initializer->getParentOfType<ModuleOp>());
for (auto sym_ref : session_initializer.getInitializers()) {
auto init_func_op = symbol_table.lookup<mlir::func::FuncOp>(
mlir::cast<FlatSymbolRefAttr>(sym_ref).getValue());
if (!init_func_op)
return session_initializer.emitOpError()
<< "the initializer function does not exist";
if (!init_func_op.getFunctionType().getResults().empty())
return session_initializer.emitOpError()
<< "the initializer function should have no output";
auto exported_names = GetExportedNames(init_func_op);
if (exported_names.empty())
return session_initializer.emitOpError()
<< "the initializer function should be exported";
if (exported_names.size() != 1)
return session_initializer.emitOpError()
<< "the initializer function should have only one exported names";
}
return success();
}
}
}
#define GET_OP_CLASSES
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.cc.inc"
namespace mlir {
namespace tf_saved_model {
TensorFlowSavedModelDialect::TensorFlowSavedModelDialect(MLIRContext *context)
: Dialect("tf_saved_model", context,
TypeID::get<TensorFlowSavedModelDialect>()) {
context->loadDialect<TF::TensorFlowDialect>();
addOperations<
#define GET_OP_LIST
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.cc.inc"
>();
}
static LogicalResult VerifyIndexPath(Operation *op, NamedAttribute named_attr) {
auto attr = mlir::dyn_cast<ArrayAttr>(named_attr.getValue());
if (!attr) {
return op->emitError() << "'" << kTfSavedModelIndexPathAttr
<< "' attribute should be an ArrayAttr";
}
for (auto element : attr) {
if (mlir::isa<StringAttr>(element)) {
continue;
}
if (auto integer = mlir::dyn_cast<IntegerAttr>(element)) {
if (integer.getValue().getBitWidth() == 64) {
continue;
}
}
return op->emitError() << "'" << kTfSavedModelIndexPathAttr
<< "' elements should be strings or 64-bit integers";
}
return mlir::success();
}
Type GetBoundInputArgTypeFor(mlir::Operation *op) {
if (auto global_tensor = llvm::dyn_cast<GlobalTensorOp>(op)) {
auto type = mlir::cast<TensorType>(global_tensor.getType());
return RankedTensorType::get(
{}, TF::ResourceType::get({type}, type.getContext()));
}
if (auto asset = llvm::dyn_cast<AssetOp>(op)) {
return RankedTensorType::get({}, TF::StringType::get(asset.getContext()));
}
op->emitError() << "unknown symbol operation";
return {};
}
static LogicalResult VerifyBoundInputArgType(Operation *op_for_diagnostics,
Type arg_type,
mlir::Operation *symbol_op) {
auto expected_type = GetBoundInputArgTypeFor(symbol_op);
if (!expected_type) return failure();
if (arg_type != expected_type) {
return op_for_diagnostics->emitError()
<< "bound input with type " << arg_type << " expected to have type "
<< expected_type;
}
return success();
}
LogicalResult TensorFlowSavedModelDialect::verifyRegionArgAttribute(
Operation *op, unsigned region_index, unsigned arg_index,
NamedAttribute named_attr) {
if (named_attr.getName() == "tf_saved_model.bound_input") {
if (!mlir::isa<FlatSymbolRefAttr>(named_attr.getValue())) {
return op->emitError() << "'tf_saved_model.bound_input' attribute should "
"be a FlatSymbolRefAttr";
}
auto symbol_name =
mlir::cast<FlatSymbolRefAttr>(named_attr.getValue()).getValue();
auto module = op->getParentOfType<ModuleOp>();
mlir::Operation *symbol_op = module.lookupSymbol(symbol_name);
if (!symbol_op) {
return op->emitError() << "'tf_saved_model.bound_input' attribute must "
"reference a valid symbol, got invalid symbol '"
<< symbol_name << "'";
}
auto arg_type = cast<func::FuncOp>(op).getArgument(arg_index).getType();
return VerifyBoundInputArgType(op, arg_type, symbol_op);
}
if (named_attr.getName() == kTfSavedModelIndexPathAttr) {
return VerifyIndexPath(op, named_attr);
}
return op->emitError() << "unknown tf_saved_model dialect arg attribute '"
<< named_attr.getName().getValue() << "'";
}
LogicalResult TensorFlowSavedModelDialect::verifyRegionResultAttribute(
Operation *op, unsigned region_index, unsigned result_index,
NamedAttribute named_attr) {
if (named_attr.getName() == kTfSavedModelIndexPathAttr) {
return VerifyIndexPath(op, named_attr);
}
return op->emitError() << "unknown tf_saved_model dialect result attribute '"
<< named_attr.getName().getValue() << "'";
}
LogicalResult VerifySessionInitOp(SessionInitializerOp session_init_op,
SymbolTable &symbol_table) {
llvm::SmallDenseSet<StringAttr> init_types{};
for (auto init_sym :
session_init_op.getInitializers().getAsValueRange<FlatSymbolRefAttr>()) {
auto init_func = symbol_table.lookup<func::FuncOp>(init_sym);
if (!init_func) continue;
auto init_type =
init_func->getAttrOfType<StringAttr>(kTfSavedModelInitializerTypeAttr);
if (!init_type) continue;
if (init_types.contains(init_type)) {
return init_func->emitError()
<< "Attribute tf_saved_model.initializer_type should not have "
"duplicate values. Found duplicate: "
<< init_type;
}
init_types.insert(init_type);
}
return success();
}
static bool HasAnyTfSavedModelArgAttr(func::FuncOp func) {
for (int i = 0, e = func.getNumArguments(); i < e; i++) {
if (func.getArgAttr(i, kTfSavedModelIndexPathAttr) ||
func.getArgAttr(i, "tf_saved_model.bound_input")) {
return true;
}
}
for (int i = 0, e = func.getNumResults(); i < e; i++) {
if (func.getResultAttr(i, kTfSavedModelIndexPathAttr) ||
func.getResultAttr(i, "tf_saved_model.bound_input")) {
return true;
}
}
return false;
}
static LogicalResult VerifySavedModelModule(
ModuleOp module, TensorFlowSavedModelDialect *dialect) {
auto exported_names_ident =
StringAttr::get(dialect->getContext(), kTfSavedModelExportedNamesAttr);
DenseMap<StringRef, Operation *> exported_name_to_op;
for (auto &op : module) {
auto attr = op.getAttr(exported_names_ident);
if (!attr) continue;
if (failed(dialect->verifyOperationAttribute(
&op, {exported_names_ident, attr}))) {
return failure();
}
for (auto str : mlir::cast<ArrayAttr>(attr)) {
auto exported_name = mlir::cast<StringAttr>(str).getValue();
auto p = exported_name_to_op.insert({exported_name, &op});
if (!p.second) {
return op.emitError()
.append("duplicate exported name '", exported_name, "'")
.attachNote(p.first->getSecond()->getLoc())
.append("previously seen here");
}
}
}
for (auto func : module.getOps<func::FuncOp>()) {
const bool is_exported = IsExported(func);
if (is_exported && !func.isPublic()) {
return func.emitError()
<< "exported function @" << func.getName() << " should be public";
}
if (!is_exported && func.isPublic()) {
return func.emitError() << "non-exported function @" << func.getName()
<< " should be private";
}
if (!is_exported && HasAnyTfSavedModelArgAttr(func)) {
return func.emitError() << "can only apply 'tf_saved_model' argument "
"attributes to exported functions";
}
}
SymbolTable symbol_table(module);
auto session_initializers = module.getOps<SessionInitializerOp>();
if (!session_initializers.empty()) {
if (!llvm::hasSingleElement(session_initializers)) {
return (*++session_initializers.begin()).emitError()
<< "there must be no more than one session_initializer op";
}
if (failed(
VerifySessionInitOp(*session_initializers.begin(), symbol_table))) {
return failure();
}
}
auto is_init = [&session_initializers](mlir::func::FuncOp func) {
if (session_initializers.empty()) return false;
auto init_syms = (*session_initializers.begin()).getInitializers();
return std::any_of(
init_syms.begin(), init_syms.end(), [&](Attribute sym_ref) {
return mlir::cast<FlatSymbolRefAttr>(sym_ref).getValue() ==
func.getName();
});
};
auto symbol_uses = SymbolTable::getSymbolUses(&module.getBodyRegion());
if (!symbol_uses.has_value()) {
return module.emitError() << "modules with 'tf_saved_model.semantics' must "
"have analyzable symbol uses";
}
for (auto symbol_use : *symbol_uses) {
auto func = symbol_table.lookupNearestSymbolFrom<func::FuncOp>(
symbol_use.getUser(), symbol_use.getSymbolRef());
if (func && IsExported(func)) {
if (is_init(func) &&
llvm::isa<SessionInitializerOp>(symbol_use.getUser())) {
if (!func->getAttr(kTfSavedModelInitializerTypeAttr)) {
LOG(WARNING)
<< "func op in session_initializer op's initializers attribute "
<< "should have tf_saved_model.initializer_type attribute.";
}
continue;
}
return symbol_use.getUser()
->emitError("exported function cannot be internally referenced")
.attachNote(func.getLoc())
.append("references this exported function");
}
}
return success();
}
LogicalResult VerifyExportedFunc(func::FuncOp func) {
bool reached_bound_inputs = false;
auto module = func->getParentOfType<ModuleOp>();
for (int i = 0, e = func.getNumArguments(); i < e; i++) {
if (func.getArgAttr(i, "tf_saved_model.bound_input")) {
reached_bound_inputs = true;
continue;
}
if (func.getArgAttr(i, kTfSavedModelIndexPathAttr)) {
if (reached_bound_inputs) {
return func.emitError()
<< "all '" << kTfSavedModelIndexPathAttr
<< "' arg attributes should precede all "
"'tf_saved_model.bound_input' arg attributes";
}
continue;
}
if (func.getArgAttr(i, "tf.resource_name")) {
if (module->getAttr("tf_saved_model.under_construction")) continue;
return func.emitError() << "'tf.resource_name' attribute is not allowed "
"unless it is being under construction";
}
return func.emitError()
<< "all arguments should have '" << kTfSavedModelIndexPathAttr
<< "', 'tf_saved_model.bound_input' or 'tf.resource_name' "
"attributes";
}
llvm::SmallDenseSet<StringRef, 8> unique_bound_inputs;
for (int i = 0, e = func.getNumArguments(); i < e; i++) {
if (auto attr = func.getArgAttrOfType<FlatSymbolRefAttr>(
i, "tf_saved_model.bound_input")) {
if (!unique_bound_inputs.insert(attr.getValue()).second) {
if (module->getAttr("tf_saved_model.under_construction")) continue;
return func.emitError()
<< "duplicate 'tf_saved_model.bound_input' binding";
}
}
}
for (int i = 0, e = func.getNumResults(); i < e; i++) {
if (!func.getResultAttr(i, kTfSavedModelIndexPathAttr)) {
return func.emitError() << "all results should have '"
<< kTfSavedModelIndexPathAttr << "' attributes";
}
}
return success();
}
bool IsValidInitializerType(StringRef initializer_type) {
return initializer_type == kTfSavedModelInitializerRestoreType ||
initializer_type == kTfSavedModelInitializerInitType;
}
LogicalResult VerifyInitializerTypeAttr(Operation *op,
const NamedAttribute named_attr) {
if (!isa<func::FuncOp>(op)) {
return op->emitError() << "Attribute tf_saved_model.initializer_type "
<< "should be on a func::FuncOp.";
}
auto initializer_type_attr_value =
mlir::dyn_cast_or_null<StringAttr>(named_attr.getValue());
if (!initializer_type_attr_value) {
return op->emitError() << "Attribute tf_saved_model.initializer_type "
<< "should be a StringAttr.";
}
if (!IsValidInitializerType(initializer_type_attr_value)) {
return op->emitError() << "tf_saved_model.initializer_type should be one "
"of 'restore_op' or 'init_op'. Got: "
<< initializer_type_attr_value.str();
}
return success();
}
LogicalResult TensorFlowSavedModelDialect::verifyOperationAttribute(
Operation *op, NamedAttribute named_attr) {
if (named_attr.getName() == kTfSavedModelExportedNamesAttr) {
if (!isa<func::FuncOp, GlobalTensorOp>(op)) {
return op->emitError()
<< "'" << kTfSavedModelExportedNamesAttr
<< "' must be on a 'func' or 'tf_saved_model.global_tensor' op";
}
if (!IsStrArrayAttr(named_attr.getValue())) {
return op->emitError() << "'" << kTfSavedModelExportedNamesAttr
<< "' must be an array of strings";
}
if (!op->getParentOp()->getAttr("tf_saved_model.semantics")) {
return op->emitError() << "'" << kTfSavedModelExportedNamesAttr
<< "' must be on an op whose immediate parent has "
"attribute 'tf_saved_model.semantics'";
}
if (auto func = dyn_cast<func::FuncOp>(op)) {
if (failed(VerifyExportedFunc(func))) {
return failure();
}
}
return success();
}
if (named_attr.getName() == "tf_saved_model.semantics") {
auto module = dyn_cast<ModuleOp>(op);
if (!module) {
return op->emitError() << "'tf_saved_model.semantics' must "
"be on a module op";
}
return VerifySavedModelModule(module, this);
}
if (named_attr.getName() == "tf_saved_model.under_construction") {
return success();
}
if (named_attr.getName() == kTfSavedModelInitializerTypeAttr) {
return VerifyInitializerTypeAttr(op, named_attr);
}
return op->emitError() << "unknown tf_saved_model dialect attribute '"
<< named_attr.getName().getValue() << "'";
}
SmallVector<StringRef, 2> GetExportedNames(Operation *op) {
SmallVector<StringRef, 2> ret;
auto exported_names =
op->getAttrOfType<ArrayAttr>(kTfSavedModelExportedNamesAttr);
if (exported_names) {
for (auto name : exported_names) {
ret.push_back(mlir::cast<StringAttr>(name).getValue());
}
}
return ret;
}
bool IsExported(Operation *op) {
auto exported_names =
op->getAttrOfType<ArrayAttr>(kTfSavedModelExportedNamesAttr);
return exported_names && !exported_names.empty();
}
bool HasTfSavedModelSemantics(ModuleOp module) {
return module->getAttr("tf_saved_model.semantics") != nullptr;
}
Operation *LookupBoundInput(func::FuncOp func, int arg_index,
const SymbolTable &symbol_table) {
auto attr = func.getArgAttrOfType<FlatSymbolRefAttr>(
arg_index, "tf_saved_model.bound_input");
if (!attr) return nullptr;
return symbol_table.lookup(attr.getValue());
}
SessionInitializerOp GetSessionInitializerOp(mlir::ModuleOp op) {
auto initializers = op.getOps<SessionInitializerOp>();
if (initializers.empty()) return {};
return *initializers.begin();
}
class OptimizeSessionInitializerPattern
: public OpRewritePattern<SessionInitializerOp> {
public:
using OpRewritePattern::OpRewritePattern;
LogicalResult matchAndRewrite(SessionInitializerOp op,
PatternRewriter &rewriter) const override {
SymbolTable symbol_table(op->getParentOfType<ModuleOp>());
SmallVector<func::FuncOp, 2> to_remove;
SmallVector<mlir::Attribute, 2> to_keep;
for (auto sym_ref : op.getInitializers()) {
auto init_func_op = symbol_table.lookup<mlir::func::FuncOp>(
mlir::cast<FlatSymbolRefAttr>(sym_ref).getValue());
auto &operations = init_func_op.front().getOperations();
if ((operations.size() == 1 &&
operations.front().hasTrait<OpTrait::IsTerminator>()) ||
(operations.size() == 2 &&
dyn_cast<mlir::TF::NoOp>(operations.front()) &&
operations.back().hasTrait<OpTrait::IsTerminator>())) {
to_remove.push_back(init_func_op);
} else {
to_keep.push_back(sym_ref);
}
}
for (auto func_op : to_remove) rewriter.eraseOp(func_op);
if (to_keep.empty())
rewriter.eraseOp(op);
else
op->setAttr("initializers", rewriter.getArrayAttr(to_keep));
return success();
}
};
void SessionInitializerOp::getCanonicalizationPatterns(
RewritePatternSet &results, MLIRContext *context) {
results.add<OptimizeSessionInitializerPattern>(context);
}
SmallVector<StringRef, 2> GetSessionInitializerExportedName(ModuleOp op) {
auto session_initializer_op = GetSessionInitializerOp(op);
if (!session_initializer_op) return {};
SymbolTable symbol_table(op);
SmallVector<StringRef, 2> results;
for (auto sym_ref : session_initializer_op.getInitializers()) {
auto init_func_op = symbol_table.lookup<mlir::func::FuncOp>(
mlir::cast<FlatSymbolRefAttr>(sym_ref).getValue());
auto exported_names = GetExportedNames(init_func_op);
assert(exported_names.size() == 1);
results.push_back(exported_names[0]);
}
return results;
}
SmallVector<func::FuncOp, 2> GetInitializerFunctions(ModuleOp module_op) {
SessionInitializerOp session_initializer_op =
GetSessionInitializerOp(module_op);
if (!session_initializer_op) return {};
SymbolTable symbol_table(module_op);
SmallVector<func::FuncOp, 2> init_func_ops;
for (auto init_func_sym : session_initializer_op.getInitializers()
.getAsValueRange<FlatSymbolRefAttr>()) {
auto init_func_op = symbol_table.lookup<func::FuncOp>(init_func_sym);
init_func_ops.push_back(init_func_op);
}
return init_func_ops;
}
func::FuncOp GetInitializerFunction(ModuleOp module_op,
const StringRef initializer_type) {
SmallVector<func::FuncOp, 2> init_func_ops =
GetInitializerFunctions(module_op);
auto init_func_itr = absl::c_find_if(
init_func_ops, [initializer_type](const func::FuncOp init_func_op) {
const auto init_type_attr = init_func_op->getAttrOfType<StringAttr>(
kTfSavedModelInitializerTypeAttr);
return init_type_attr && init_type_attr == initializer_type;
});
return init_func_itr == init_func_ops.end() ? nullptr : *init_func_itr;
}
bool IsRestoreGraph(ModuleOp module) {
return module
.walk([](mlir::Operation *op) {
if (llvm::isa<mlir::TF::RestoreV2Op>(op)) {
return mlir::WalkResult::interrupt();
}
return mlir::WalkResult::advance();
})
.wasInterrupted();
}
}
} | #include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace tf_saved_model {
namespace {
using ::testing::Eq;
using ::testing::IsEmpty;
using ::testing::IsNull;
using ::testing::NotNull;
using ::testing::SizeIs;
class TfSavedModelTest : public ::testing::Test {
protected:
TfSavedModelTest() : ctx_() {
ctx_.loadDialect<TensorFlowSavedModelDialect, func::FuncDialect>();
}
MLIRContext ctx_;
};
ModuleOp ParseModuleOp(const StringRef module_op_str, Block& block,
MLIRContext& ctx) {
const LogicalResult parse_result =
parseSourceString(module_op_str, &block, ParserConfig(&ctx));
EXPECT_TRUE(succeeded(parse_result));
return cast<ModuleOp>(block.front());
}
TEST_F(TfSavedModelTest,
GetInitializerFunctionReturnsNullWhenNoSessionInitializerOp) {
constexpr StringRef kModuleOpStr =
R"mlir(module attributes {tf_saved_model.semantics} {})mlir";
Block block;
ModuleOp module_op = ParseModuleOp(kModuleOpStr, block, ctx_);
func::FuncOp init_func_op = GetInitializerFunction(
module_op, kTfSavedModelInitializerInitType);
EXPECT_THAT(init_func_op, IsNull());
}
TEST_F(TfSavedModelTest,
GetInitializerFunctionReturnsNullWhenInitializersEmpty) {
constexpr StringRef kModuleOpStr = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = []} : () -> ()
}
)mlir";
Block block;
ModuleOp module_op = ParseModuleOp(kModuleOpStr, block, ctx_);
func::FuncOp init_func_op = GetInitializerFunction(
module_op, kTfSavedModelInitializerInitType);
EXPECT_THAT(init_func_op, IsNull());
}
TEST_F(TfSavedModelTest,
GetInitializerFunctionReturnsFuncOpMatchingInitializerType) {
constexpr StringRef kModuleOpStr = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = [@init_func]} : () -> ()
func.func @init_func() attributes {tf_saved_model.exported_names = ["init_func"], tf_saved_model.initializer_type = "init_op"} {
func.return
}
}
)mlir";
Block block;
ModuleOp module_op = ParseModuleOp(kModuleOpStr, block, ctx_);
func::FuncOp init_func_op = GetInitializerFunction(
module_op, kTfSavedModelInitializerInitType);
EXPECT_THAT(init_func_op, NotNull());
EXPECT_THAT(init_func_op.getSymName(), "init_func");
EXPECT_THAT(
init_func_op->getAttrOfType<StringAttr>(kTfSavedModelInitializerTypeAttr),
kTfSavedModelInitializerInitType);
}
TEST_F(TfSavedModelTest, GetInitializerFunctionNoMatchingInitializerType) {
constexpr StringRef kModuleOpStr = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = [@init_func]} : () -> ()
func.func @init_func() attributes {tf_saved_model.exported_names = ["init_func"], tf_saved_model.initializer_type = "restore_op"} {
func.return
}
}
)mlir";
Block block;
ModuleOp module_op = ParseModuleOp(kModuleOpStr, block, ctx_);
func::FuncOp init_func_op = GetInitializerFunction(
module_op, kTfSavedModelInitializerInitType);
EXPECT_THAT(init_func_op, IsNull());
}
TEST_F(TfSavedModelTest, GetInitializerFunctionsEmptyWhenNoInitFunctions) {
constexpr StringRef kModuleOpStr = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = []} : () -> ()
}
)mlir";
Block block;
ModuleOp module_op = ParseModuleOp(kModuleOpStr, block, ctx_);
SmallVector<func::FuncOp, 2> init_func_ops =
GetInitializerFunctions(module_op);
EXPECT_THAT(init_func_ops, IsEmpty());
}
TEST_F(TfSavedModelTest,
GetInitializerFunctionsEmptyWhenNoSessionInitializerOp) {
constexpr StringRef kModuleOpStr =
R"mlir(module attributes {tf_saved_model.semantics} {})mlir";
Block block;
ModuleOp module_op = ParseModuleOp(kModuleOpStr, block, ctx_);
SmallVector<func::FuncOp, 2> init_func_ops =
GetInitializerFunctions(module_op);
EXPECT_THAT(init_func_ops, IsEmpty());
}
TEST_F(TfSavedModelTest, GetInitializerFunctionsReturnsMultipleFuncOps) {
constexpr StringRef kModuleOpStr = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = [@init_func1, @init_func2]} : () -> ()
func.func @init_func1() attributes {tf_saved_model.exported_names = ["init_func1"], tf_saved_model.initializer_type = "init_op"} {
func.return
}
func.func @init_func2() attributes {tf_saved_model.exported_names = ["init_func2"], tf_saved_model.initializer_type = "restore_op"} {
func.return
}
}
)mlir";
Block block;
ModuleOp module_op = ParseModuleOp(kModuleOpStr, block, ctx_);
SmallVector<func::FuncOp, 2> init_func_ops =
GetInitializerFunctions(module_op);
EXPECT_THAT(init_func_ops, SizeIs(2));
EXPECT_THAT(init_func_ops[0].getSymName(), Eq("init_func1"));
EXPECT_THAT(init_func_ops[1].getSymName(), Eq("init_func2"));
}
}
}
} |
1,240 | cpp | tensorflow/tensorflow | lower_cluster_to_runtime_ops | tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/lower_cluster_to_runtime_ops.cc | tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/lower_cluster_to_runtime_ops_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TENSORFLOW_TRANSFORMS_HOST_RUNTIME_LOWER_CLUSTER_TO_RUNTIME_OPS_H_
#define TENSORFLOW_COMPILER_MLIR_TENSORFLOW_TRANSFORMS_HOST_RUNTIME_LOWER_CLUSTER_TO_RUNTIME_OPS_H_
#include "absl/base/attributes.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/PassManager.h"
#include "xla/tsl/framework/device_type.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace tfrt_compiler {
tensorflow::Status RunLowerClusterToRuntimeOpsPassPipeline(
mlir::ModuleOp module, tsl::DeviceType xla_device_type,
llvm::StringRef module_name = llvm::StringRef());
void RegisterTPULowerClusterToRuntimeOpsPassPipeline();
void RegisterNonTPULowerClusterToRuntimeOpsPassPipeline();
}
}
#endif
#include <memory>
#include <string>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/Passes.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/runtime_passes.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/sparsecore/sparsecore_passes.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/attribute_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/data_dumper_logger_config.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "xla/tsl/framework/device_type.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/error_payloads.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/tpu/tpu_defs.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tsl/platform/error_logging.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace tfrt_compiler {
namespace {
using mlir::LogicalResult;
using mlir::OpPassManager;
using mlir::PassManager;
using mlir::func::FuncOp;
using mlir::TF::StandardPipelineOptions;
void EnablePassIRPrinting(PassManager& pm, const std::string& dump_group_name,
llvm::StringRef module_name) {
pm.getContext()->disableMultithreading();
pm.enableIRPrinting(std::make_unique<::tensorflow::DataDumperLoggerConfig>(
[module_name, dump_group_name](const std::string& pass_tag_name,
mlir::Operation* op) {
return DEBUG_DATA_DUMPER()->GetDumpFilename(
module_name.str(), dump_group_name, pass_tag_name);
},
"",
true));
pm.enableTiming();
}
}
void AddTPULowerClusterToRuntimeOpsPassPipeline(OpPassManager& pm,
llvm::StringRef module_name) {
pm.addPass(mlir::TFTPU::CreateTPURewritePass(module_name));
pm.addPass(mlir::createSymbolDCEPass());
pm.addNestedPass<FuncOp>(
mlir::TFDevice::CreateReplicateInvariantOpHoistingPass());
pm.addNestedPass<FuncOp>(mlir::TFDevice::CreateEmbeddingProgramKeyPass());
pm.addPass(mlir::TFTPU::CreateTPUMergeVariablesWithExecutePass());
pm.addNestedPass<FuncOp>(
mlir::TFTPU::CreateExtractTPUCopyWithDynamicShapeOpPass());
pm.addNestedPass<FuncOp>(
mlir::TFTPU::CreateTPUColocateCompositeResourceOps());
if (tensorflow::GetMlirCommonFlags()
->tf_mlir_enable_tpu_variable_runtime_reformatting_pass) {
pm.addPass(mlir::TFTPU::CreateTPUVariableRuntimeReformattingPass());
}
}
void AddNonTPULowerClusterToRuntimeOpsPassPipeline(
OpPassManager& pm, llvm::StringRef module_name) {
pm.addPass(mlir::TFDevice::CreateXlaRewritePass());
pm.addNestedPass<FuncOp>(mlir::createCanonicalizerPass());
pm.addNestedPass<FuncOp>(mlir::createCSEPass());
pm.addPass(mlir::createSymbolDCEPass());
}
void CreateTPULowerClusterToRuntimeOpsPassPipeline(
OpPassManager& pm, const StandardPipelineOptions& options) {
AddTPULowerClusterToRuntimeOpsPassPipeline(pm, "");
}
void CreateNonTPULowerClusterToRuntimeOpsPassPipeline(
OpPassManager& pm, const StandardPipelineOptions& options) {
AddNonTPULowerClusterToRuntimeOpsPassPipeline(pm, "");
}
tensorflow::Status RecordIfErrorStatus(const std::string error_prefix,
std::string bridge_type,
tsl::DeviceType device_type,
absl::Status status) {
if (status.ok()) {
return status;
}
VLOG(2) << error_prefix << " " << status;
tensorflow::metrics::UpdateTfMlirBridgeFirstPhaseCounter(
bridge_type,
mlir::TF::kMlirPh1BridgeCounterV2,
device_type.type_string(),
false,
"failure");
std::string bridge_subcomponent = "TFXLA_PHASE_ONE_MLIR_TPU_BRIDGE";
tsl::OkOrSetErrorCounterPayload(
tensorflow::core::platform::ErrorSourceProto::MLIR_BRIDGE_PHASE_1,
status);
if (device_type != DeviceType(DEVICE_TPU_XLA_JIT)) {
bridge_subcomponent = "TFXLA_PHASE_ONE_MLIR_CPU/GPU_BRIDGE";
}
tsl::error_logging::Log(mlir::TF::kBridgeComponent, bridge_subcomponent,
status.ToString())
.IgnoreError();
return status;
}
absl::Status RunLowerClusterToRuntimeOpsPassPipeline(
mlir::ModuleOp module, tsl::DeviceType xla_device_type,
llvm::StringRef module_name) {
PassManager runtime_lowering(module.getContext());
::tensorflow::applyTensorflowAndCLOptions(runtime_lowering);
if (xla_device_type == DeviceType(DEVICE_TPU_XLA_JIT)) {
AddTPULowerClusterToRuntimeOpsPassPipeline(runtime_lowering, module_name);
} else {
AddNonTPULowerClusterToRuntimeOpsPassPipeline(runtime_lowering,
module_name);
}
mlir::StatusScopedDiagnosticHandler diag_handler(
module.getContext(), false,
!VLOG_IS_ON(1));
if (VLOG_IS_ON(1) ||
DEBUG_DATA_DUMPER()->ShouldDump(module_name.str(), kDebugGroupMain)) {
::tensorflow::DumpMlirOpToFile(
DEBUG_DATA_DUMPER()->GetDumpFilename(module_name.str(), kDebugGroupMain,
"runtime_lowering_before"),
module, llvm::StringRef(), &runtime_lowering);
}
if (VLOG_IS_ON(2) || DEBUG_DATA_DUMPER()->ShouldDump(
module_name.str(), kDebugGroupRuntimeLowering)) {
EnablePassIRPrinting(runtime_lowering, kDebugGroupRuntimeLowering,
module_name);
}
LogicalResult result = runtime_lowering.run(module);
(void)result;
if (VLOG_IS_ON(1) ||
DEBUG_DATA_DUMPER()->ShouldDump(module_name.str(), kDebugGroupMain)) {
::tensorflow::DumpMlirOpToFile(
DEBUG_DATA_DUMPER()->GetDumpFilename(module_name.str(), kDebugGroupMain,
"runtime_lowering_after"),
module, llvm::StringRef(), &runtime_lowering);
}
std::string bridge_type = xla_device_type == DeviceType(DEVICE_TPU_XLA_JIT)
? mlir::TF::kMlirPh1BridgeCounterReplicated
: mlir::TF::kMlirPh1BridgeCounterNonReplicated;
auto result_status = diag_handler.ConsumeStatus();
TF_RETURN_IF_ERROR(
RecordIfErrorStatus("lower_cluster_to_runtime",
bridge_type, xla_device_type, result_status));
return absl::OkStatus();
}
void RegisterTPULowerClusterToRuntimeOpsPassPipeline() {
static mlir::PassPipelineRegistration<StandardPipelineOptions> pipeline(
"tfrt-lower-cluster-to-runtime-ops-tpu",
"Run all the passes involved after the clustering transformations from "
"the TF2XLA Bridge. Takes as input a Module with tf_device.cluster ops "
"and outputs TFRT runtime ops such as TPUCompile. This pipeline is for "
"TPU.",
CreateTPULowerClusterToRuntimeOpsPassPipeline);
}
void RegisterNonTPULowerClusterToRuntimeOpsPassPipeline() {
static mlir::PassPipelineRegistration<StandardPipelineOptions> pipeline(
"tfrt-lower-cluster-to-runtime-ops-non-tpu",
"Run all the passes involved after the clustering transformations from "
"the TF2XLA Bridge. Takes as input a Module with tf_device.cluster ops "
"and outputs TFRT runtime ops such as XlaLaunch. This is for CPU/GPU",
CreateNonTPULowerClusterToRuntimeOpsPassPipeline);
}
}
} | #include "tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/lower_cluster_to_runtime_ops.h"
#include <cstdint>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Pass/PassManager.h"
#include "tensorflow/compiler/mlir/register_common_dialects.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/attribute_utils.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/tsl/framework/device_type.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/tpu/tpu_defs.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tsl/lib/core/status_test_util.h"
namespace tensorflow {
namespace tfrt_compiler {
namespace {
using mlir::DialectRegistry;
using mlir::MLIRContext;
using mlir::ModuleOp;
using mlir::OwningOpRef;
using mlir::func::FuncOp;
using ::tensorflow::monitoring::testing::CellReader;
using tsl::DeviceType;
std::string TestDataPath() {
return tensorflow::GetDataDependencyFilepath(
"tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/testdata/");
}
static constexpr char kCompilationStreamz[] =
"/tensorflow/core/tf_mlir_bridge_first_phase_v2_count";
class LowerClusterToRuntimeOpsTest : public ::testing::Test {
public:
LowerClusterToRuntimeOpsTest() {
mlir::RegisterCommonToolingDialects(registry_);
context_.appendDialectRegistry(registry_);
context_.loadAllAvailableDialects();
env_ = Env::Default();
test_group_name_ = "TestGroup";
test_dir_ = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", test_dir_.c_str(), 1);
}
absl::Status CreateMlirModule(std::string mlir_module_filename) {
std::string mlir_module_path = TestDataPath() + mlir_module_filename;
mlir_module_ =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context_);
if (!mlir_module_) {
return absl::Status(
absl::StatusCode::kNotFound,
absl::StrCat("Could not find MLIR module at ", mlir_module_path));
}
return absl::OkStatus();
}
DialectRegistry registry_;
MLIRContext context_;
OwningOpRef<mlir::ModuleOp> mlir_module_;
Env* env_;
std::string test_dir_;
std::string test_group_name_;
};
TEST_F(LowerClusterToRuntimeOpsTest, SanityCheck) {
TF_ASSERT_OK(CreateMlirModule("empty_func.mlir"));
TF_EXPECT_OK(RunLowerClusterToRuntimeOpsPassPipeline(
*mlir_module_, DeviceType(DEVICE_TPU_XLA_JIT)));
}
TEST_F(LowerClusterToRuntimeOpsTest, LowersClusterOpsTPU) {
TF_ASSERT_OK(CreateMlirModule("basic_cluster.mlir"));
TF_EXPECT_OK(RunLowerClusterToRuntimeOpsPassPipeline(
*mlir_module_, DeviceType(DEVICE_TPU_XLA_JIT)));
FuncOp main = mlir_module_->lookupSymbol<FuncOp>("main");
ASSERT_TRUE(main);
bool has_cluster_op = false;
main.walk([&](mlir::tf_device::ClusterOp) {
has_cluster_op = true;
return mlir::WalkResult::interrupt();
});
EXPECT_FALSE(has_cluster_op);
}
TEST_F(LowerClusterToRuntimeOpsTest, LowersClusterOpsCPU) {
TF_ASSERT_OK(CreateMlirModule("basic_cluster.mlir"));
TF_EXPECT_OK(RunLowerClusterToRuntimeOpsPassPipeline(
*mlir_module_, DeviceType(DEVICE_CPU_XLA_JIT)));
FuncOp main = mlir_module_->lookupSymbol<FuncOp>("main");
ASSERT_TRUE(main);
bool has_cluster_op = false;
main.walk([&](mlir::tf_device::ClusterOp) {
has_cluster_op = true;
return mlir::WalkResult::interrupt();
});
EXPECT_FALSE(has_cluster_op);
}
TEST_F(LowerClusterToRuntimeOpsTest, LowersClusterOpsGPU) {
TF_ASSERT_OK(CreateMlirModule("basic_cluster.mlir"));
TF_EXPECT_OK(RunLowerClusterToRuntimeOpsPassPipeline(
*mlir_module_, DeviceType(DEVICE_GPU_XLA_JIT)));
FuncOp main = mlir_module_->lookupSymbol<FuncOp>("main");
ASSERT_TRUE(main);
bool has_cluster_op = false;
main.walk([&](mlir::tf_device::ClusterOp) {
has_cluster_op = true;
return mlir::WalkResult::interrupt();
});
EXPECT_FALSE(has_cluster_op);
}
TEST_F(LowerClusterToRuntimeOpsTest, ErrorsWithBadCluster) {
CellReader<int64_t> compilation_status(kCompilationStreamz);
TF_ASSERT_OK(CreateMlirModule("malformed_cluster.mlir"));
EXPECT_FALSE(RunLowerClusterToRuntimeOpsPassPipeline(
*mlir_module_, DeviceType(DEVICE_TPU_XLA_JIT))
.ok());
EXPECT_EQ(
compilation_status.Delta(mlir::TF::kMlirPh1BridgeCounterReplicated,
mlir::TF::kMlirPh1BridgeCounterV2, "XLA_TPU_JIT",
"fallback_disabled", "failure"),
1);
}
TEST_F(LowerClusterToRuntimeOpsTest, DumpsPipelinePasses) {
std::vector<std::string> files;
TF_ASSERT_OK(env_->GetChildren(test_dir_, &files));
EXPECT_THAT(files, ::testing::IsEmpty());
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
setenv("TF_DUMP_GRAPH_GROUPS", "main,runtime_lowering", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
TF_ASSERT_OK(CreateMlirModule("basic_cluster.mlir"));
TF_EXPECT_OK(RunLowerClusterToRuntimeOpsPassPipeline(
*mlir_module_, DeviceType(DEVICE_TPU_XLA_JIT)));
TF_ASSERT_OK(env_->GetChildren(test_dir_, &files));
EXPECT_THAT(files, ::testing::SizeIs(15));
}
}
}
} |
1,241 | cpp | tensorflow/tensorflow | tpu_metadata_utils | tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_metadata_utils.cc | tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_metadata_utils_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TENSORFLOW_TRANSFORMS_HOST_RUNTIME_TPU_METADATA_UTILS_H_
#define TENSORFLOW_COMPILER_MLIR_TENSORFLOW_TRANSFORMS_HOST_RUNTIME_TPU_METADATA_UTILS_H_
#include <optional>
#include "mlir/IR/Diagnostics.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
namespace mlir {
namespace TFTPU {
LogicalResult SetMetadataProtoFromClusterFuncOp(
tf_device::ClusterFuncOp op, int num_replicas, int num_cores_per_replica,
std::optional<xla::DeviceAssignmentProto>&& xla_device_assignment,
tensorflow::tpu::TPUCompileMetadataProto* metadata);
}
}
#endif
#include "tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_metadata_utils.h"
#include <optional>
#include <string>
#include <utility>
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/FormatVariadic.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/attribute_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
namespace mlir {
namespace TFTPU {
namespace {
constexpr char kStepMarkerLocationAttr[] = "step_marker_location";
constexpr char kUseXlaSpmdAttr[] = "use_spmd_for_xla_partitioning";
constexpr char kBadStringArrayElementMsg[] =
"bad '{0}' attribute at index {1}, not a string";
constexpr char kBadArrayElementMsg[] =
"bad '{0}' attribute at index {1} with value '{2}': failed to parse to {3}";
constexpr char kBadArrayAttrLengthMsg[] =
"bad '{0}' attribute, expected array attribute of size {1}, got size {2}";
std::string CreateMissingAttributeMsg(llvm::StringRef attribute) {
return llvm::formatv("requires attribute '{0}'", attribute).str();
}
LogicalResult SetMetadataProtoStepMarkerLocation(
tf_device::ClusterFuncOp op,
tensorflow::tpu::TPUCompileMetadataProto* metadata) {
auto step_marker_location =
op->getAttrOfType<StringAttr>(kStepMarkerLocationAttr);
if (!step_marker_location)
return op.emitOpError(CreateMissingAttributeMsg(kStepMarkerLocationAttr));
xla::DebugOptions::StepMarkerLocation location =
xla::DebugOptions::STEP_MARK_AT_ENTRY;
if (!step_marker_location.getValue().empty() &&
!xla::DebugOptions::StepMarkerLocation_Parse(
std::string(step_marker_location.getValue()), &location))
return op.emitOpError(llvm::formatv("bad '{0}' attribute with value '{1}'",
kStepMarkerLocationAttr,
step_marker_location.getValue()));
metadata->set_step_marker_location(location);
return success();
}
LogicalResult SetOpSharding(Operation* op, Attribute attr, llvm::StringRef name,
int index, xla::OpSharding* sharding_ptr) {
auto sharding_attr = mlir::dyn_cast<StringAttr>(attr);
if (!sharding_attr)
return op->emitOpError(
llvm::formatv(kBadStringArrayElementMsg, name, index));
if (tensorflow::DecodeShardingAttribute(sharding_attr, *sharding_ptr)
.failed()) {
return op->emitOpError(llvm::formatv(kBadArrayElementMsg, name, index,
sharding_attr.getValue(),
"xla::OpSharding"));
}
return success();
}
LogicalResult SetMetadataProtoArgs(
tf_device::ClusterFuncOp op,
tensorflow::tpu::TPUCompileMetadataProto* metadata) {
auto input_shardings =
op->getAttrOfType<ArrayAttr>(tensorflow::kInputShardingAttr);
if (!input_shardings)
return op.emitOpError(
CreateMissingAttributeMsg(tensorflow::kInputShardingAttr));
if (input_shardings.size() != op.getNumOperands())
return op.emitOpError(
llvm::formatv(kBadArrayAttrLengthMsg, tensorflow::kInputShardingAttr,
op.getNumOperands(), input_shardings.size()));
mlir::StringAttr replication_attr_name = mlir::StringAttr::get(
op.getContext(), "mhlo.is_same_data_across_replicas");
auto dynamic_arg_idx = op->getAttrOfType<ArrayAttr>(TF::kDynamicArgIndexAttr);
llvm::SmallSet<int, 4> dynamic_arg_idx_set;
if (dynamic_arg_idx) {
for (auto idx : dynamic_arg_idx.getValue()) {
dynamic_arg_idx_set.insert(mlir::dyn_cast<IntegerAttr>(idx).getInt());
}
}
for (auto operand_type_and_idx : llvm::enumerate(op.getOperandTypes())) {
Type operand_type = operand_type_and_idx.value();
int index = operand_type_and_idx.index();
tensorflow::tpu::TPUCompileMetadataProto::Arg* arg = metadata->add_args();
tensorflow::DataType dtype;
tensorflow::Status status =
tensorflow::ConvertToDataType(operand_type, &dtype);
if (!status.ok())
return op.emitOpError(
llvm::formatv("failed to determine operand type at index {0}: {1}",
index, status.message()));
arg->set_dtype(dtype);
if (dtype == tensorflow::DT_RESOURCE)
arg->set_kind(tensorflow::tpu::TPUCompileMetadataProto::Arg::VARIABLE);
else
arg->set_kind(tensorflow::tpu::TPUCompileMetadataProto::Arg::PARAMETER);
*arg->mutable_shape() = tensorflow::TensorShapeProto();
if (auto ranked_tensor_type =
mlir::dyn_cast<RankedTensorType>(operand_type)) {
tensorflow::TensorShapeProto shape_proto;
ConvertToTensorShapeProto(ranked_tensor_type.getShape(), &shape_proto);
*arg->mutable_shape() = std::move(shape_proto);
} else {
arg->mutable_shape()->set_unknown_rank(true);
}
if (failed(SetOpSharding(op, input_shardings.getValue()[index],
tensorflow::kInputShardingAttr, index,
arg->mutable_sharding())))
return failure();
auto attr = op.getFuncOp().getArgAttrOfType<mlir::BoolAttr>(
index, replication_attr_name);
arg->set_is_same_data_across_replicas(attr != nullptr && attr.getValue());
arg->mutable_is_bounded_dynamic_dim()->Add(
dynamic_arg_idx_set.contains(index));
}
return success();
}
LogicalResult SetMetadataProtoRetvals(
tf_device::ClusterFuncOp op,
tensorflow::tpu::TPUCompileMetadataProto* metadata) {
auto output_shardings =
op->getAttrOfType<ArrayAttr>(tensorflow::kOutputShardingAttr);
if (!output_shardings)
return op.emitOpError(
CreateMissingAttributeMsg(tensorflow::kOutputShardingAttr));
if (output_shardings.size() != op.getNumResults())
return op.emitOpError(
llvm::formatv(kBadArrayAttrLengthMsg, tensorflow::kOutputShardingAttr,
op.getNumResults(), output_shardings.size()));
for (auto output_sharding_and_idx : llvm::enumerate(output_shardings))
if (failed(SetOpSharding(op, output_sharding_and_idx.value(),
tensorflow::kOutputShardingAttr,
output_sharding_and_idx.index(),
metadata->add_retvals()->mutable_sharding())))
return failure();
return success();
}
}
LogicalResult SetMetadataProtoFromClusterFuncOp(
tf_device::ClusterFuncOp op, int num_replicas, int num_cores_per_replica,
std::optional<xla::DeviceAssignmentProto>&& xla_device_assignment,
tensorflow::tpu::TPUCompileMetadataProto* metadata) {
if (auto options_attr =
op->getAttrOfType<StringAttr>("tpu_compile_options_proto")) {
if (!metadata->mutable_compile_options()->ParseFromArray(
options_attr.data(), options_attr.size())) {
return failure();
}
}
metadata->set_num_replicas(num_replicas);
metadata->set_num_cores_per_replica(num_cores_per_replica);
if (failed(SetMetadataProtoStepMarkerLocation(op, metadata)))
return failure();
if (xla_device_assignment.has_value())
*metadata->mutable_device_assignment() =
std::move(xla_device_assignment.value());
auto use_spmd_attr = op->getAttrOfType<BoolAttr>(kUseXlaSpmdAttr);
if (!use_spmd_attr)
return op.emitOpError(CreateMissingAttributeMsg(kUseXlaSpmdAttr));
metadata->set_use_spmd_for_xla_partitioning(use_spmd_attr.getValue());
if (failed(SetMetadataProtoArgs(op, metadata))) return failure();
return SetMetadataProtoRetvals(op, metadata);
}
}
} | #include "tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_metadata_utils.h"
#include <ostream>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/register_common_dialects.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace mlir {
namespace TFTPU {
namespace {
using mlir::DialectRegistry;
using mlir::MLIRContext;
using mlir::ModuleOp;
using mlir::OwningOpRef;
class ProtoStringMatcher {
public:
explicit ProtoStringMatcher(const tsl::protobuf::Message& expected)
: expected_(expected.SerializeAsString()) {}
template <typename Message>
bool MatchAndExplain(const Message& p, testing::MatchResultListener*) const {
return p.SerializeAsString() == expected_;
}
void DescribeTo(::std::ostream* os) const { *os << expected_; }
void DescribeNegationTo(::std::ostream* os) const {
*os << "not equal to expected message: " << expected_;
}
private:
const std::string expected_;
};
inline ::testing::PolymorphicMatcher<ProtoStringMatcher> EqualsProto(
const tsl::protobuf::Message& x) {
return ::testing::MakePolymorphicMatcher(ProtoStringMatcher(x));
}
std::string TestDataPath() {
return tensorflow::GetDataDependencyFilepath(
"tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/testdata/");
}
class TpuMetadataUtilsTest : public ::testing::Test {
public:
TpuMetadataUtilsTest() {
mlir::RegisterCommonToolingDialects(registry_);
context_.appendDialectRegistry(registry_);
context_.loadAllAvailableDialects();
}
absl::StatusOr<std::vector<mlir::tf_device::ClusterFuncOp>> GetClusterFuncOps(
absl::string_view mlir_module_filename) {
TF_RETURN_IF_ERROR(CreateMlirModule(mlir_module_filename));
std::vector<mlir::tf_device::ClusterFuncOp> cluster_func_ops;
mlir_module_->walk([&](mlir::tf_device::ClusterFuncOp op) {
cluster_func_ops.push_back(op);
});
return cluster_func_ops;
}
private:
absl::Status CreateMlirModule(absl::string_view mlir_module_filename) {
std::string mlir_module_path =
absl::StrCat(TestDataPath(), mlir_module_filename);
mlir_module_ =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context_);
if (!mlir_module_) {
return absl::Status(
absl::StatusCode::kNotFound,
absl::StrCat("Could not find MLIR module at ", mlir_module_path));
}
return absl::OkStatus();
}
DialectRegistry registry_;
MLIRContext context_;
OwningOpRef<mlir::ModuleOp> mlir_module_;
};
TEST_F(TpuMetadataUtilsTest, SingleDevice) {
TF_ASSERT_OK_AND_ASSIGN(auto cluster_func_ops,
GetClusterFuncOps("basic_cluster.mlir"));
mlir::tf_device::ClusterFuncOp cluster_func_op = cluster_func_ops.front();
tensorflow::tpu::TPUCompileMetadataProto compile_metadata;
ASSERT_TRUE(mlir::succeeded(SetMetadataProtoFromClusterFuncOp(
cluster_func_op,
1, 1, {}, &compile_metadata)));
tensorflow::tpu::TPUCompileMetadataProto expected_compile_metadata;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
R"pb(
num_replicas: 1 num_cores_per_replica: 1
)pb",
&expected_compile_metadata));
EXPECT_THAT(compile_metadata, EqualsProto(expected_compile_metadata));
}
TEST_F(TpuMetadataUtilsTest, spmd) {
TF_ASSERT_OK_AND_ASSIGN(auto cluster_func_ops,
GetClusterFuncOps("spmd.mlir"));
mlir::tf_device::ClusterFuncOp cluster_func_op = cluster_func_ops.front();
tensorflow::tpu::TPUCompileMetadataProto compile_metadata;
ASSERT_TRUE(mlir::succeeded(SetMetadataProtoFromClusterFuncOp(
cluster_func_op,
1, 2, {}, &compile_metadata)));
tensorflow::tpu::TPUCompileMetadataProto expected_compile_metadata;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
R"pb(
args {
dtype: DT_FLOAT
shape { unknown_rank: true }
kind: PARAMETER
sharding {
type: OTHER
tile_assignment_dimensions: 2
tile_assignment_dimensions: 1
tile_assignment_devices: 0
tile_assignment_devices: 1
}
is_bounded_dynamic_dim: false
}
retvals { sharding {} }
num_replicas: 1
num_cores_per_replica: 2
use_spmd_for_xla_partitioning: true
compile_options {}
)pb",
&expected_compile_metadata));
EXPECT_THAT(compile_metadata, EqualsProto(expected_compile_metadata));
}
}
}
} |
1,242 | cpp | tensorflow/tensorflow | codegen | tensorflow/compiler/aot/codegen.cc | tensorflow/compiler/aot/codegen_test.cc | #ifndef TENSORFLOW_COMPILER_AOT_CODEGEN_H_
#define TENSORFLOW_COMPILER_AOT_CODEGEN_H_
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/aot/compile.h"
#include "tensorflow/compiler/tf2xla/tf2xla.pb.h"
namespace tensorflow {
namespace tfcompile {
struct CodegenOpts {
string class_name;
string target_triple;
std::vector<string> namespaces;
bool gen_name_to_index = false;
bool gen_program_shape = false;
bool gen_hlo_profile_printer_data = false;
bool use_xla_runtime = false;
};
struct MetadataResult {
std::vector<string> header_variable_decls;
string program_shape_access_shim;
string hlo_profile_printer_data_access_shim;
string object_file_data;
};
Status GenerateMetadata(const CodegenOpts& opts,
const CompileResult& compile_result,
MetadataResult* metadata_result);
Status GenerateHeader(const CodegenOpts& opts, const tf2xla::Config& config,
const CompileResult& compile_result,
const MetadataResult& metadata_result, string* header);
Status ParseCppClass(const string& cpp_class, string* class_name,
std::vector<string>* namespaces);
Status ValidateCppIdent(absl::string_view ident, absl::string_view msg);
}
}
#endif
#include "tensorflow/compiler/aot/codegen.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/str_split.h"
#include "absl/strings/substitute.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/aot/embedded_protocol_buffers.h"
#include "tensorflow/compiler/tf2xla/tf2xla.pb.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "xla/cpu_function_runtime.h"
#include "xla/service/compiler.h"
#include "xla/service/cpu/buffer_info_util.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
namespace tfcompile {
namespace {
using BufferInfo = xla::cpu_function_runtime::BufferInfo;
bool IsAlpha(char c) {
return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z');
}
bool IsAlphaNum(char c) { return IsAlpha(c) || (c >= '0' && c <= '9'); }
Status XLATypeToCpp(xla::PrimitiveType type, string* str) {
switch (type) {
case xla::PRED:
*str = "bool";
break;
case xla::S8:
*str = "tensorflow::int8";
break;
case xla::S16:
*str = "tensorflow::int16";
break;
case xla::S32:
*str = "tensorflow::int32";
break;
case xla::S64:
*str = "int64_t";
break;
case xla::U8:
*str = "tensorflow::uint8";
break;
case xla::U16:
*str = "tensorflow::uint16";
break;
case xla::U32:
*str = "tensorflow::uint32";
break;
case xla::U64:
*str = "tensorflow::uint64";
break;
case xla::F32:
*str = "float";
break;
case xla::F64:
*str = "double";
break;
default:
return errors::Unimplemented("XLA type ", xla::PrimitiveType_Name(type),
" has no equivalent in C++");
}
return absl::OkStatus();
}
size_t TotalBufferBytes(const std::vector<BufferInfo>& buffer_infos) {
return std::accumulate(buffer_infos.begin(), buffer_infos.end(), size_t{0},
[](size_t size, const BufferInfo& buffer_info) {
return size + buffer_info.size();
});
}
std::vector<BufferInfo> ExtractEntryParamBufferInfos(
const std::vector<BufferInfo>& buffer_infos) {
std::vector<BufferInfo> result;
std::copy_if(buffer_infos.begin(), buffer_infos.end(),
std::back_inserter(result), [](const BufferInfo& buffer_info) {
return buffer_info.is_entry_parameter();
});
return result;
}
std::vector<BufferInfo> ExtractTempBufferInfos(
const std::vector<BufferInfo>& buffer_infos) {
std::vector<BufferInfo> result;
std::copy_if(buffer_infos.begin(), buffer_infos.end(),
std::back_inserter(result), [](const BufferInfo& buffer_info) {
return buffer_info.is_temp_buffer();
});
return result;
}
Status AddRewritesForShape(int i, const xla::Shape& shape,
std::vector<std::pair<string, string>>* rewrites) {
string type;
TF_RETURN_IF_ERROR(XLATypeToCpp(shape.element_type(), &type));
std::vector<string> dim_vars;
string dim_sizes, indices;
int count = 1;
if (shape.rank() == 0 ||
(shape.dimensions_size() == 1 && shape.dimensions(0) == 1)) {
dim_sizes = "[1]";
indices = "[0]";
} else {
for (int dim = 0; dim < shape.dimensions_size(); ++dim) {
dim_vars.push_back(absl::StrCat("size_t dim", dim));
dim_sizes += absl::StrCat("[", shape.dimensions(dim), "]");
indices += absl::StrCat("[dim", dim, "]");
count *= shape.dimensions(dim);
}
}
rewrites->push_back({"{{I}}", absl::StrCat(i)});
rewrites->push_back({"{{TYPE}}", type});
rewrites->push_back({"{{DIM_VARS}}", absl::StrJoin(dim_vars, ", ")});
rewrites->push_back({"{{DIM_SIZES}}", dim_sizes});
rewrites->push_back({"{{INDICES}}", indices});
rewrites->push_back({"{{COUNT}}", absl::StrCat(count)});
return absl::OkStatus();
}
string RewriteWithName(const string& name, string code,
const std::vector<std::pair<string, string>>& rewrites) {
absl::StrReplaceAll(rewrites, &code);
absl::StrReplaceAll({{"{{NAME}}", name}}, &code);
return code;
}
Status GenArgMethods(const tf2xla::Config& config,
const xla::ProgramShapeProto& ps,
const CompileResult& compile_result, string* methods) {
const int num_args = ps.parameters_size();
if (config.feed_size() + config.variable_size() < num_args) {
return errors::InvalidArgument(
"mismatch between feed_size(", config.feed_size(), ")+variable_size(",
config.variable_size(), ") and num_args(", num_args, ")");
}
for (int i = 0; i < config.feed_size(); ++i) {
std::vector<std::pair<string, string>> rewrites;
TF_RETURN_IF_ERROR(
AddRewritesForShape(i, xla::Shape(ps.parameters(i)), &rewrites));
const string code = R"(
void set_arg{{NAME}}_data(const void* data) {
set_arg_data({{I}}, data);
}
{{TYPE}}* arg{{NAME}}_data() {
return static_cast<{{TYPE}}*>(arg_data({{I}}));
}
{{TYPE}}& arg{{NAME}}({{DIM_VARS}}) {
return (*static_cast<{{TYPE}}(*){{DIM_SIZES}}>(
arg_data({{I}}))){{INDICES}};
}
const {{TYPE}}* arg{{NAME}}_data() const {
return static_cast<const {{TYPE}}*>(arg_data({{I}}));
}
const {{TYPE}}& arg{{NAME}}({{DIM_VARS}}) const {
return (*static_cast<const {{TYPE}}(*){{DIM_SIZES}}>(
arg_data({{I}}))){{INDICES}};
}
int arg{{NAME}}_size() const {
return {{COUNT}} * sizeof({{TYPE}});
}
int arg{{NAME}}_count() const {
return {{COUNT}};
}
)";
*methods += RewriteWithName(absl::StrCat(i), code, rewrites);
if (!config.feed(i).name().empty()) {
*methods += RewriteWithName("_" + config.feed(i).name(), code, rewrites);
}
}
return absl::OkStatus();
}
Status GenResultMethods(const tf2xla::Config& config,
const xla::ProgramShapeProto& ps, string* methods) {
if (ps.result().element_type() != xla::TUPLE) {
return errors::Internal("codegen requires the XLA result to be a tuple");
}
size_t num_results = ps.result().tuple_shapes_size();
int readonly_variables = absl::c_count_if(
config.variable(),
[](const tf2xla::Variable& var) { return var.readonly(); });
const int actual_num_results =
config.fetch_size() + config.variable_size() - readonly_variables;
if (actual_num_results != num_results) {
return errors::InvalidArgument("mismatch between fetch_size(",
config.fetch_size(), ")+variable_size(",
config.variable_size(), ") and tuple_size(",
ps.result().tuple_shapes_size(), ")");
}
for (int i = 0; i < config.fetch_size(); ++i) {
std::vector<std::pair<string, string>> rewrites;
TF_RETURN_IF_ERROR(AddRewritesForShape(
i, xla::Shape(ps.result().tuple_shapes(i)), &rewrites));
string code = R"(
{{TYPE}}* result{{NAME}}_data() {
return static_cast<{{TYPE}}*>(result_data({{I}}));
}
{{TYPE}}& result{{NAME}}({{DIM_VARS}}) {
return (*static_cast<{{TYPE}}(*){{DIM_SIZES}}>(
result_data({{I}}))){{INDICES}};
}
const {{TYPE}}* result{{NAME}}_data() const {
return static_cast<const {{TYPE}}*>(result_data({{I}}));
}
const {{TYPE}}& result{{NAME}}({{DIM_VARS}}) const {
return (*static_cast<const {{TYPE}}(*){{DIM_SIZES}}>(
result_data({{I}}))){{INDICES}};
}
int result{{NAME}}_size() const {
return {{COUNT}} * sizeof({{TYPE}});
}
int result{{NAME}}_count() const {
return {{COUNT}};
}
)";
*methods += RewriteWithName(absl::StrCat(i), code, rewrites);
if (!config.fetch(i).name().empty()) {
*methods += RewriteWithName("_" + config.fetch(i).name(), code, rewrites);
}
}
return absl::OkStatus();
}
Status GenVariableMethods(const tf2xla::Config& config,
const xla::ProgramShapeProto& ps, string* methods) {
const int num_args = ps.parameters_size();
for (int i = config.feed_size(); i < num_args; ++i) {
std::vector<std::pair<string, string>> rewrites;
TF_RETURN_IF_ERROR(
AddRewritesForShape(i, xla::Shape(ps.parameters(i)), &rewrites));
const string code = R"(
void set_var_{{NAME}}_data({{MAYBE_CONST}}{{TYPE}}* data) {
set_arg_data({{I}}, data);
}
{{MAYBE_CONST}}{{TYPE}}* var_{{NAME}}_data() {
return static_cast<{{MAYBE_CONST}}{{TYPE}}*>(arg_data({{I}}));
}
{{MAYBE_CONST}}{{TYPE}}& var_{{NAME}}({{DIM_VARS}}) {
return (*static_cast<{{MAYBE_CONST}}{{TYPE}}(*){{DIM_SIZES}}>(
arg_data({{I}}))){{INDICES}};
}
const {{TYPE}}* var_{{NAME}}_data() const {
return static_cast<const {{TYPE}}*>(arg_data({{I}}));
}
const {{TYPE}}& var_{{NAME}}({{DIM_VARS}}) const {
return (*static_cast<const {{TYPE}}(*){{DIM_SIZES}}>(
arg_data({{I}}))){{INDICES}};
}
int var_{{NAME}}_size() const {
return {{COUNT}} * sizeof({{TYPE}});
}
int var_{{NAME}}_count() const {
return {{COUNT}};
}
)";
const tf2xla::Variable& var = config.variable(i - config.feed_size());
rewrites.emplace_back("{{MAYBE_CONST}}", var.readonly() ? "const " : "");
*methods += RewriteWithName(
var.name().empty() ? var.node_name() : var.name(), code, rewrites);
}
return absl::OkStatus();
}
Status GenArgShapeInfos(const xla::ProgramShapeProto& ps, string* infos) {
for (int i = 0; i < ps.parameters_size(); ++i) {
const xla::ShapeProto& shape = ps.parameters(i);
if (shape.element_type() == xla::TUPLE) {
return absl::InternalError(
absl::StrCat("parameter ", i,
": codegen requires XLA parameters to "
"be non-tuples."));
}
*infos += absl::Substitute(R"( static constexpr int32_t kArg$0Shapes[] = {
$1
};
)",
i,
shape.dimensions_size() > 0
? absl::StrJoin(shape.dimensions(), ", ")
: "-1");
}
*infos += R"( static const ShapeInfo* ArgShapeInfos() {
static constexpr ShapeInfo kArgShapeInfoTable[kNumArgs] = {
)";
for (int i = 0; i < ps.parameters_size(); ++i) {
const xla::ShapeProto& shape = ps.parameters(i);
*infos +=
absl::Substitute("{ kArg$0Shapes, $1 },\n", i, shape.dimensions_size());
}
*infos += R"( };
return kArgShapeInfoTable;
})";
return absl::OkStatus();
}
Status GenResultShapeInfos(const xla::ProgramShapeProto& ps, string* infos) {
if (ps.result().element_type() != xla::TUPLE) {
return absl::InternalError("codegen requires the XLA result to be a tuple");
}
for (int i = 0; i < ps.result().tuple_shapes_size(); ++i) {
const xla::ShapeProto& shape = ps.result().tuple_shapes(i);
*infos += absl::Substitute(
R"( static constexpr int32_t kResult$0Shapes[] = {
$1
};
)",
i,
shape.dimensions_size() > 0 ? absl::StrJoin(shape.dimensions(), ", ")
: "-1");
}
*infos += R"( static const ShapeInfo* ResultShapeInfos() {
static constexpr ShapeInfo kResultShapeInfoTable[kNumResults] = {
)";
for (int i = 0; i < ps.result().tuple_shapes_size(); ++i) {
const xla::ShapeProto& shape = ps.result().tuple_shapes(i);
*infos += absl::Substitute("{ kResult$0Shapes, $1 },\n", i,
shape.dimensions_size());
}
*infos += R"( };
return kResultShapeInfoTable;
})";
return absl::OkStatus();
}
template <typename T>
string GenNameToIndexCode(const T& entries, bool generate) {
if (!generate) {
return "{\n return nullptr;\n }";
}
int end = entries.size();
for (int i = entries.size() - 1; i >= 0; --i) {
if (!entries[i].name().empty()) {
break;
}
end = i;
}
string code = "{\n static const char* kNames[] = {";
for (int i = 0; i < end; ++i) {
if (i > 0) {
code += ", ";
}
code += "\"";
code += entries[i].name();
code += "\"";
}
if (end > 0) {
code += ", ";
}
code += "nullptr};\n return kNames;\n }";
return code;
}
Status ValidateFeedFetchCppNames(const tf2xla::Config& config) {
for (const tf2xla::Feed& feed : config.feed()) {
if (!feed.name().empty()) {
TF_RETURN_IF_ERROR(ValidateCppIdent(feed.name(), "feed name"));
}
}
for (const tf2xla::Fetch& fetch : config.fetch()) {
if (!fetch.name().empty()) {
TF_RETURN_IF_ERROR(ValidateCppIdent(fetch.name(), "fetch name"));
}
}
for (const tf2xla::Variable& variable : config.variable()) {
if (!variable.name().empty()) {
TF_RETURN_IF_ERROR(ValidateCppIdent(variable.name(), "variable name"));
} else {
TF_RETURN_IF_ERROR(
ValidateCppIdent(variable.node_name(), "variable name"));
}
}
return absl::OkStatus();
}
std::vector<string> BufferInfosToCppExpression(
const std::vector<BufferInfo>& buffer_infos) {
std::vector<string> buffer_infos_as_strings;
std::transform(buffer_infos.begin(), buffer_infos.end(),
std::back_inserter(buffer_infos_as_strings),
[](const BufferInfo& buffer_info) {
xla::cpu_function_runtime::EncodedBufferInfo encoded =
buffer_info.Encode();
auto param_to_str = [](uint32_t param) -> std::string {
return param == ~0U ? "~0U" : absl::StrCat(param, "U");
};
return absl::StrCat(
"::xla::cpu_function_runtime::BufferInfo("
"::xla::cpu_function_runtime::EncodedBufferInfo{",
encoded.packed_kind_and_size, "ULL, ",
param_to_str(encoded.entry_param_number), ", ",
param_to_str(encoded.result_param_number), "})");
});
return buffer_infos_as_strings;
}
Status CheckEqual(size_t a, size_t b, absl::string_view error_msg) {
if (a != b) {
return absl::InternalError(
absl::StrCat(error_msg, ". Expected ", a, ", got ", b, "."));
}
return absl::OkStatus();
}
}
Status GenerateHeader(const CodegenOpts& opts, const tf2xla::Config& config,
const CompileResult& compile_result,
const MetadataResult& metadata_result, string* header) {
TF_RETURN_IF_ERROR(ValidateConfig(config));
TF_RETURN_IF_ERROR(ValidateFeedFetchCppNames(config));
const int64_t result_index = compile_result.aot->result_buffer_index();
const std::vector<BufferInfo>& buffer_infos =
compile_result.aot->buffer_infos();
const std::vector<int32> arg_index_table =
::xla::cpu::CreateArgIndexTableFromBufferInfos(buffer_infos);
const std::vector<int32> result_index_table =
::xla::cpu::CreateResultIndexTableFromBufferInfos(buffer_infos);
std::vector<string> buffer_infos_as_strings =
BufferInfosToCppExpression(buffer_infos);
const int64_t buffer_infos_size = buffer_infos.size();
if (result_index < 0 || result_index >= buffer_infos_size) {
return errors::InvalidArgument("result index: ", result_index,
" is outside the range of temp sizes: [0,",
buffer_infos.size(), ")");
}
std::vector<BufferInfo> buffer_infos_for_args =
ExtractEntryParamBufferInfos(buffer_infos);
std::vector<BufferInfo> buffer_infos_for_temps =
ExtractTempBufferInfos(buffer_infos);
const xla::ProgramShapeProto& ps = compile_result.program_shape;
string methods_arg, methods_result, methods_variable;
TF_RETURN_IF_ERROR(GenArgMethods(config, ps, compile_result, &methods_arg));
TF_RETURN_IF_ERROR(GenResultMethods(config, ps, &methods_result));
TF_RETURN_IF_ERROR(GenVariableMethods(config, ps, &methods_variable));
string arg_shape_infos, result_shape_infos;
TF_RETURN_IF_ERROR(GenArgShapeInfos(ps, &arg_shape_infos));
TF_RETURN_IF_ERROR(
CheckEqual(ps.parameters_size(), arg_index_table.size(),
"Arg number mismatch, proto vs. arg_index_table"));
TF_RETURN_IF_ERROR(GenResultShapeInfos(ps, &result_shape_infos));
TF_RETURN_IF_ERROR(
CheckEqual(ps.result().tuple_shapes_size(), result_index_table.size(),
"Result number mismatch, proto vs. result_index_table"));
const size_t arg_bytes_aligned =
xla::cpu_function_runtime::AlignedBufferBytes(
buffer_infos_for_args.data(), buffer_infos_for_args.size(),
true);
const size_t arg_bytes_total = TotalBufferBytes(buffer_infos_for_args);
const size_t temp_bytes_aligned =
xla::cpu_function_runtime::AlignedBufferBytes(
buffer_infos_for_temps.data(), buffer_infos_for_temps.size(),
true);
const size_t temp_bytes_total = TotalBufferBytes(buffer_infos_for_temps);
string ns_start;
for (const string& n : opts.namespaces) {
ns_start += absl::StrCat("namespace ", n, " {\n");
}
ns_start += "\n";
string ns_end("\n");
for (int i = opts.namespaces.size() - 1; i >= 0; --i) {
const string& n = opts.namespaces[i];
ns_end += absl::StrCat("}
}
const string arg_names_code =
GenNameToIndexCode(config.feed(), opts.gen_name_to_index);
auto variable_copy = config.variable();
for (auto& var : variable_copy) {
if (var.name().empty()) {
var.set_name(var.node_name());
}
}
const string variable_names_code =
GenNameToIndexCode(variable_copy, opts.gen_name_to_index);
const string result_names_code =
GenNameToIndexCode(config.fetch(), opts.gen_name_to_index);
const string include_xla_data_proto =
opts.gen_program_shape
? R"(#include "xla/xla_data.pb.h")"
: "";
const string include_hlo_profile_printer_data_proto =
opts.gen_hlo_profile_printer_data
? R"(#include "xla/service/hlo_profile_printer_data.pb.h")"
: "";
const string assign_profile_counters_size =
opts.gen_hlo_profile_printer_data
? "set_static_data_profile_counters_size(data, "
"get_static_data_hlo_profile_printer_data(data)->"
"profile_counters_size());"
: "";
*header =
R"(
#ifndef TFCOMPILE_GENERATED_{{ENTRY}}_H_
#define TFCOMPILE_GENERATED_{{ENTRY}}_H_
{{INCLUDE_XLA_DATA_PROTO}}
{{INCLUDE_HLO_PROFILE_PRINTER_DATA_PROTO}}
#include "tensorflow/compiler/tf2xla/xla_compiled_cpu_function.h"
#include "tensorflow/core/platform/types.h"
namespace Eigen { struct ThreadPoolDevice; }
namespace xla { class ExecutableRunOptions; }
extern "C" void {{ENTRY}}(
void* result, const ::xla::ExecutableRunOptions* run_options,
const void** args, void** temps, XlaCustomCallStatus* status,
int64_t* profile_counters);
{{DECLS_FROM_OBJ_FILE}}
{{NS_START}}
class {{CLASS}} final : public tensorflow::XlaCompiledCpuFunction {
public:
static constexpr size_t kNumArgs = {{ARG_NUM}};
static constexpr size_t kNumResults = {{RESULT_NUM}};
static constexpr size_t kNumVariables = {{VARIABLE_NUM}};
static const ::int64_t ArgSize(::tensorflow::int32 index) {
return BufferInfos()[ArgIndexToBufferIndex()[index]].size();
}
static const tensorflow::XlaCompiledCpuFunction::StaticData& StaticData() {
static XlaCompiledCpuFunction::StaticData* kStaticData = [](){
XlaCompiledCpuFunction::StaticData* data =
new XlaCompiledCpuFunction::StaticData;
set_static_data_raw_function(data, {{ENTRY}});
set_static_data_buffer_infos(data, BufferInfos());
set_static_data_num_buffers(data, kNumBuffers);
set_static_data_result_index_table(data, ResultIndexToBufferIndex());
set_static_data_num_results(data, kNumResults);
set_static_data_arg_index_table(data, ArgIndexToBufferIndex());
set_static_data_num_args(data, kNumArgs);
set_static_data_num_variables(data, kNumVariables);
set_static_data_result_index(data, kResultIndex);
set_static_data_arg_shape_infos(data, ArgShapeInfos());
set_static_data_result_shape_infos(data, ResultShapeInfos());
set_static_data_arg_names(data, StaticArgNames());
set_static_data_variable_names(data, StaticVariableNames());
set_static_data_result_names(data, StaticResultNames());
set_static_data_program_shape(data, StaticProgramShape());
set_static_data_hlo_profile_printer_data(
data, StaticHloProfilePrinterData());
{{ASSIGN_PROFILE_COUNTERS_SIZE}}
return data;
}();
return *kStaticData;
}
{{CLASS}}(AllocMode alloc_mode =
AllocMode::ARGS_VARIABLES_RESULTS_PROFILES_AND_TEMPS)
: XlaCompiledCpuFunction(StaticData(), alloc_mode) {}
{{CLASS}}(const {{CLASS}}&) = delete;
{{CLASS}}& operator=(const {{CLASS}}&) = delete;
{{METHODS_ARG}}
{{METHODS_RESULT}} | #include "tensorflow/compiler/aot/codegen.h"
#include <algorithm>
#include <string>
#include <vector>
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/TargetSelect.h"
#include "xla/cpu_function_runtime.h"
#include "xla/shape_util.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace tfcompile {
namespace {
using ::xla::cpu_function_runtime::BufferInfo;
void ExpectErrorContains(const Status& status, absl::string_view str) {
EXPECT_NE(absl::OkStatus(), status);
EXPECT_TRUE(absl::StrContains(status.message(), str))
<< "expected error: " << status.message() << " to contain: " << str;
}
TEST(ValidateCppIdent, Simple) {
TF_EXPECT_OK(ValidateCppIdent("a", ""));
TF_EXPECT_OK(ValidateCppIdent("abc", ""));
TF_EXPECT_OK(ValidateCppIdent("_abc", ""));
TF_EXPECT_OK(ValidateCppIdent("_abc123", ""));
string ident;
for (char c = 'a'; c <= 'z'; c++) {
ident.append(1, c);
}
for (char c = 'A'; c <= 'Z'; c++) {
ident.append(1, c);
}
for (char c = '0'; c <= '9'; c++) {
ident.append(1, c);
}
ident += "_";
TF_EXPECT_OK(ValidateCppIdent(ident, ""));
ExpectErrorContains(ValidateCppIdent("", ""), "empty identifier");
ExpectErrorContains(ValidateCppIdent(" ", ""), "illegal leading char");
ExpectErrorContains(ValidateCppIdent("0", ""), "illegal leading char");
ExpectErrorContains(ValidateCppIdent(".", ""), "illegal leading char");
ExpectErrorContains(ValidateCppIdent(":", ""), "illegal leading char");
ExpectErrorContains(ValidateCppIdent("a.", ""), "illegal char");
ExpectErrorContains(ValidateCppIdent("a:", ""), "illegal char");
ExpectErrorContains(ValidateCppIdent("a:", ""), "illegal char");
}
class ParseCppClassTest : public ::testing::Test {
protected:
void ExpectOK(const string& cpp_class, const string& want_class_name,
const std::vector<string>& want_namespaces) {
string class_name;
std::vector<string> namespaces;
TF_EXPECT_OK(ParseCppClass(cpp_class, &class_name, &namespaces));
EXPECT_EQ(class_name, want_class_name);
EXPECT_EQ(namespaces, want_namespaces);
}
void ExpectFail(const string& cpp_class) {
string class_name;
std::vector<string> namespaces;
EXPECT_NE(ParseCppClass(cpp_class, &class_name, &namespaces),
absl::OkStatus())
<< cpp_class;
}
};
TEST_F(ParseCppClassTest, ParseOK) {
ExpectOK("MyClass", "MyClass", {});
ExpectOK("_MyClass", "_MyClass", {});
ExpectOK("a::MyClass", "MyClass", {"a"});
ExpectOK("a::foo::MyClass", "MyClass", {"a", "foo"});
ExpectOK("a::foo::b::MyClass", "MyClass", {"a", "foo", "b"});
ExpectOK("a::foo::b::bar::MyClass", "MyClass", {"a", "foo", "b", "bar"});
ExpectOK("foo::MyClass", "MyClass", {"foo"});
ExpectOK("_foo::MyClass", "MyClass", {"_foo"});
ExpectOK("_foo::_MyClass", "_MyClass", {"_foo"});
ExpectOK("::foo::bar::MyClass", "MyClass", {"foo", "bar"});
ExpectOK("::_foo::MyClass", "MyClass", {"_foo"});
ExpectOK("::_foo::_MyClass", "_MyClass", {"_foo"});
string ident;
for (char c = 'a'; c <= 'z'; c++) {
ident.append(1, c);
}
for (char c = 'A'; c <= 'Z'; c++) {
ident.append(1, c);
}
for (char c = '0'; c <= '9'; c++) {
ident.append(1, c);
}
ident += "_";
ExpectOK(ident, ident, {});
ExpectOK(ident + "::" + ident, ident, {ident});
ExpectOK(ident + "::" + ident + "::" + ident, ident, {ident, ident});
}
TEST_F(ParseCppClassTest, ParseFail) {
ExpectFail("");
ExpectFail("::");
ExpectFail("0");
ExpectFail("a.b");
ExpectFail("a:b");
ExpectFail(":foo::bar");
ExpectFail("good::.bad");
ExpectFail("good:::bad");
ExpectFail("good::bad::");
ExpectFail("good::::bad");
ExpectFail("::::bad");
ExpectFail("good:: bad");
ExpectFail("good::0bad");
}
static void CompareWithGoldenFile(
const string& tensorflow_relative_golden_file_name,
const string& expected_contents, bool ignore_cr) {
string sanitized_expected_contents(expected_contents);
if (ignore_cr) {
sanitized_expected_contents.erase(
std::remove(sanitized_expected_contents.begin(),
sanitized_expected_contents.end(), '\r'),
sanitized_expected_contents.end());
}
const bool update_golden = false;
string golden_file_name =
GetDataDependencyFilepath(tensorflow_relative_golden_file_name);
if (update_golden) {
TF_EXPECT_OK(
WriteStringToFile(Env::Default(), golden_file_name, expected_contents));
}
string golden_file_contents;
TF_ASSERT_OK(ReadFileToString(Env::Default(), golden_file_name,
&golden_file_contents));
if (ignore_cr) {
golden_file_contents.erase(std::remove(golden_file_contents.begin(),
golden_file_contents.end(), '\r'),
golden_file_contents.end());
}
EXPECT_EQ(golden_file_contents, expected_contents);
}
#if TF_LLVM_X86_AVAILABLE
TEST(CodegenTest, Golden) {
LLVMInitializeX86Target();
LLVMInitializeX86TargetInfo();
LLVMInitializeX86TargetMC();
LLVMInitializeX86AsmPrinter();
CodegenOpts opts;
opts.class_name = "MyClass";
opts.target_triple = "x86_64-pc-linux";
opts.namespaces = {"foo", "bar"};
opts.gen_name_to_index = true;
opts.gen_program_shape = true;
tf2xla::Config config;
tf2xla::Feed* feed = config.add_feed();
feed->mutable_id()->set_node_name("feed0");
feed->set_name("myfeed");
feed = config.add_feed();
feed->mutable_id()->set_node_name("feed1");
tf2xla::Fetch* fetch = config.add_fetch();
fetch->mutable_id()->set_node_name("fetch0");
fetch->set_name("myfetch");
tf2xla::Variable* variable = config.add_variable();
variable->set_node_name("myvar_readonly");
variable->mutable_shape()->add_dim()->set_size(1);
variable->set_type(DT_FLOAT);
variable->set_readonly(true);
tf2xla::Variable* variable2 = config.add_variable();
variable2->set_node_name("myvar");
variable2->mutable_shape()->add_dim()->set_size(1);
variable2->set_type(DT_FLOAT);
tf2xla::Variable* variable3 = config.add_variable();
variable3->set_node_name("my/var");
variable3->set_name("myvar2");
variable3->mutable_shape()->add_dim()->set_size(5);
variable3->set_type(DT_INT32);
CompileResult compile_result;
compile_result.aot.reset(new xla::cpu::CpuAotCompilationResult(
{},
{BufferInfo::MakeTempBuffer(3 * 8),
BufferInfo::MakeEntryParameter(8, 0),
BufferInfo::MakeTempBuffer(1),
BufferInfo::MakeEntryParameter(96, 1),
BufferInfo::MakeTempBuffer(1),
BufferInfo::MakeEntryParameter(96, 2),
BufferInfo::MakeTempBuffer(1),
BufferInfo::MakeEntryParameter(96, 3),
BufferInfo::MakeResultParameter(5 * 6 * 4,
0),
BufferInfo::MakeEntryParameter(96, 4),
BufferInfo::MakeResultParameter(1 * 4,
1),
BufferInfo::MakeResultParameter(5 * 4,
2)},
0, nullptr, {}));
compile_result.program_shape =
xla::ShapeUtil::MakeProgramShape(
{
xla::ShapeUtil::MakeShape(xla::F32, {1, 2}),
xla::ShapeUtil::MakeShape(xla::S64, {3, 4}),
xla::ShapeUtil::MakeShape(xla::F32, {1}),
xla::ShapeUtil::MakeShape(xla::F32, {1}),
xla::ShapeUtil::MakeShape(xla::S32, {5}),
},
xla::ShapeUtil::MakeTupleShape({
xla::ShapeUtil::MakeShape(xla::U32, {5, 6}),
xla::ShapeUtil::MakeShape(xla::F32, {1}),
xla::ShapeUtil::MakeShape(xla::S32, {5}),
}))
.ToProto();
compile_result.entry_point = "entry_point";
compile_result.pointer_size = 8;
MetadataResult metadata_result;
TF_ASSERT_OK(GenerateMetadata(opts, compile_result, &metadata_result));
CompareWithGoldenFile("tensorflow/compiler/aot/codegen_test_o.golden",
metadata_result.object_file_data, false);
string header;
TF_ASSERT_OK(
GenerateHeader(opts, config, compile_result, metadata_result, &header));
CompareWithGoldenFile("tensorflow/compiler/aot/codegen_test_h.golden", header,
true);
}
#endif
}
}
} |
1,243 | cpp | tensorflow/tensorflow | benchmark | tensorflow/compiler/aot/benchmark.cc | tensorflow/lite/tools/benchmark/benchmark_test.cc | #ifndef TENSORFLOW_COMPILER_AOT_BENCHMARK_H_
#define TENSORFLOW_COMPILER_AOT_BENCHMARK_H_
#include <functional>
#include <string>
#include <vector>
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace tfcompile {
namespace benchmark {
struct Options {
static constexpr int64_t kDefaultMicros = 3000000;
int64_t max_iters = 0;
int64_t max_micros = 0;
};
struct Stats {
std::vector<int64_t> per_iter_us;
int64_t total_us;
Stats() : total_us(0) { per_iter_us.reserve(5000); }
};
void DumpStatsToStdout(const Stats& stats);
typedef std::function<void()> BenchmarkFn;
void Benchmark(const Options& options, const BenchmarkFn& fn, Stats* stats);
}
}
}
#endif
#include "tensorflow/compiler/aot/benchmark.h"
#include <sys/time.h>
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace tfcompile {
namespace benchmark {
static uint64 NowMicros() {
struct timeval tv;
gettimeofday(&tv, nullptr);
return static_cast<uint64>(tv.tv_sec) * 1000000 + tv.tv_usec;
}
void DumpStatsToStdout(const Stats& stats) {
std::vector<int64_t> sorted_us(stats.per_iter_us);
std::sort(sorted_us.begin(), sorted_us.end());
const size_t count_us = sorted_us.size();
double sum_us = 0;
size_t count_us_trimmed = 0;
double sum_us_trimmed = 0;
size_t count_us_best = 0;
double sum_us_best = 0;
static constexpr float trim_ratio = 0.25;
static constexpr float best_ratio = 0.1;
const size_t count_trimmed = count_us * trim_ratio;
const size_t count_best = count_us * best_ratio;
for (size_t i = 0; i < sorted_us.size(); ++i) {
const int64_t us = sorted_us[i];
sum_us += us;
if (i >= count_trimmed && i < count_us - count_trimmed) {
sum_us_trimmed += us;
++count_us_trimmed;
}
if (i < count_best) {
sum_us_best += us;
++count_us_best;
}
}
const int kBufSize = 1000;
char buf[kBufSize];
snprintf(buf, kBufSize, "Mean with %2.0f%% trimmed:", trim_ratio * 100);
std::string label_trimmed(buf);
snprintf(buf, kBufSize, "Mean of %2.0f%% best:", best_ratio * 100);
std::string label_best(buf);
std::vector<std::pair<std::string, double>> groups = {
{"Best:", sorted_us.front()},
{"Worst:", sorted_us.back()},
{"Median:", sorted_us[count_us / 2]},
{"Mean:", sum_us / count_us},
{std::move(label_trimmed), sum_us_trimmed / count_us_trimmed},
{std::move(label_best), sum_us_best / count_us_best},
};
int max_label_size = 0;
double max_us = 0;
for (const auto& g : groups) {
if (g.first.size() > max_label_size) {
max_label_size = g.first.size();
}
if (g.second > max_us) {
max_us = g.second;
}
}
int max_digits = 1;
while (max_us >= 10.0) {
max_us /= 10.0;
++max_digits;
}
printf("Benchmark ran %zu iterations over %lld us\n", count_us,
static_cast<long long>(stats.total_us));
for (const auto& g : groups) {
printf(" %-*s %*.3f us\n", max_label_size, g.first.c_str(), max_digits + 4,
g.second);
}
}
void Benchmark(const Options& options, const BenchmarkFn& fn, Stats* stats) {
const int64_t max_us = (options.max_micros <= 0 && options.max_iters <= 0)
? Options::kDefaultMicros
: options.max_micros;
printf("Running benchmark for %lld us\n", static_cast<long long>(max_us));
const int64_t start_us = NowMicros();
int64_t iters = 0;
while (true) {
const int64_t iter_start_us = NowMicros();
fn();
const int64_t end_us = NowMicros();
stats->per_iter_us.push_back(end_us - iter_start_us);
const int64_t total_us = end_us - start_us;
++iters;
if ((max_us > 0 && total_us >= max_us) ||
(options.max_iters > 0 && iters >= options.max_iters)) {
stats->total_us = total_us;
break;
}
}
}
}
}
} | #include "tensorflow/compiler/aot/benchmark.h"
#include "tensorflow/compiler/aot/test_graph_tfadd.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace tfcompile {
namespace benchmark {
namespace {
TEST(Benchmark, Benchmark) {
AddComp add;
Options options;
options.max_iters = 1;
Stats stats1;
Benchmark(options, [&] { add.Run(); }, &stats1);
EXPECT_EQ(stats1.per_iter_us.size(), 1);
options.max_iters = 5;
Stats stats5;
Benchmark(options, [&] { add.Run(); }, &stats5);
EXPECT_EQ(stats5.per_iter_us.size(), 5);
}
}
}
}
} |
1,244 | cpp | tensorflow/tensorflow | logging | third_party/xla/third_party/tsl/tsl/platform/default/logging.cc | third_party/xla/third_party/tsl/tsl/platform/logging_test.cc | #if defined(_WIN32)
#pragma warning(disable : 4716)
#endif
#ifndef TENSORFLOW_TSL_PLATFORM_DEFAULT_LOGGING_H_
#define TENSORFLOW_TSL_PLATFORM_DEFAULT_LOGGING_H_
#include <atomic>
#include <limits>
#include <memory>
#include <sstream>
#include <string>
#include <vector>
#include "absl/base/log_severity.h"
#include "absl/strings/string_view.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/types.h"
#undef ERROR
#undef LOG
#undef LOG_EVERY_N
#undef LOG_FIRST_N
#undef LOG_EVERY_POW_2
#undef LOG_EVERY_N_SEC
#undef VLOG
#undef CHECK
#undef CHECK_EQ
#undef CHECK_NE
#undef CHECK_LT
#undef CHECK_LE
#undef CHECK_GT
#undef CHECK_GE
#undef DCHECK
#undef DCHECK_EQ
#undef DCHECK_NE
#undef DCHECK_LT
#undef DCHECK_LE
#undef DCHECK_GT
#undef DCHECK_GE
#undef QCHECK
#undef QCHECK_EQ
#undef QCHECK_NE
#undef QCHECK_LT
#undef QCHECK_LE
#undef QCHECK_GT
#undef QCHECK_GE
#undef PCHECK
namespace tsl {
const int INFO = 0;
const int WARNING = 1;
const int ERROR = 2;
const int FATAL = 3;
const int NUM_SEVERITIES = 4;
namespace internal {
void LogString(const char* fname, int line, int severity,
const std::string& message);
class LogMessage : public std::basic_ostringstream<char> {
public:
LogMessage(const char* fname, int line, int severity);
~LogMessage() override;
LogMessage& AtLocation(const char* fname, int line);
static int64_t MaxVLogLevel();
static bool VmoduleActivated(const char* fname, int level);
protected:
void GenerateLogMessage();
private:
const char* fname_;
int line_;
int severity_;
};
struct Voidifier {
template <typename T>
void operator&(const T&) const {}
};
class LogMessageFatal : public LogMessage {
public:
LogMessageFatal(const char* file, int line) TF_ATTRIBUTE_COLD;
TF_ATTRIBUTE_NORETURN ~LogMessageFatal() override;
};
class LogMessageNull : public std::basic_ostringstream<char> {
public:
LogMessageNull() {}
~LogMessageNull() override {}
};
#define _TF_LOG_INFO \
::tsl::internal::LogMessage(__FILE__, __LINE__, ::tsl::INFO)
#define _TF_LOG_WARNING \
::tsl::internal::LogMessage(__FILE__, __LINE__, ::tsl::WARNING)
#define _TF_LOG_ERROR \
::tsl::internal::LogMessage(__FILE__, __LINE__, ::tsl::ERROR)
#define _TF_LOG_FATAL ::tsl::internal::LogMessageFatal(__FILE__, __LINE__)
#define _TF_LOG_QFATAL _TF_LOG_FATAL
#ifdef NDEBUG
#define _TF_LOG_DFATAL _TF_LOG_ERROR
#else
#define _TF_LOG_DFATAL _TF_LOG_FATAL
#endif
#define LOG(severity) _TF_LOG_##severity
#ifdef IS_MOBILE_PLATFORM
#define VLOG_IS_ON(lvl) ((lvl) <= 0)
#else
#define VLOG_IS_ON(lvl) \
(([](int level, const char* fname) { \
static const bool vmodule_activated = \
::tsl::internal::LogMessage::VmoduleActivated(fname, level); \
return vmodule_activated; \
})(lvl, __FILE__))
#endif
#define VLOG(level) \
TF_PREDICT_TRUE(!VLOG_IS_ON(level)) \
? (void)0 \
: ::tsl::internal::Voidifier() & \
::tsl::internal::LogMessage(__FILE__, __LINE__, tsl::INFO)
#ifndef NDEBUG
#define DVLOG VLOG
#else
#define DVLOG(verbose_level) \
while (false && (verbose_level) > 0) ::tsl::internal::LogMessageNull()
#endif
class LogEveryNState {
public:
bool ShouldLog(int n);
uint32_t counter() { return counter_.load(std::memory_order_relaxed); }
private:
std::atomic<uint32> counter_{0};
};
class LogFirstNState {
public:
bool ShouldLog(int n);
uint32 counter() { return counter_.load(std::memory_order_relaxed); }
private:
std::atomic<uint32> counter_{0};
};
class LogEveryPow2State {
public:
bool ShouldLog(int ignored);
uint32 counter() { return counter_.load(std::memory_order_relaxed); }
private:
std::atomic<uint32> counter_{0};
};
class LogEveryNSecState {
public:
bool ShouldLog(double seconds);
uint32 counter() { return counter_.load(std::memory_order_relaxed); }
private:
std::atomic<uint32> counter_{0};
std::atomic<int64_t> next_log_time_cycles_{0};
};
#define LOGGING_INTERNAL_STATEFUL_CONDITION(kind, condition, arg) \
for (bool logging_internal_stateful_condition_do_log(condition); \
logging_internal_stateful_condition_do_log; \
logging_internal_stateful_condition_do_log = false) \
for (static ::tsl::internal::Log##kind##State \
logging_internal_stateful_condition_state; \
logging_internal_stateful_condition_do_log && \
logging_internal_stateful_condition_state.ShouldLog(arg); \
logging_internal_stateful_condition_do_log = false) \
for (const uint32_t COUNTER ABSL_ATTRIBUTE_UNUSED = \
logging_internal_stateful_condition_state.counter(); \
logging_internal_stateful_condition_do_log; \
logging_internal_stateful_condition_do_log = false)
#define LOG_EVERY_N(severity, n) \
LOGGING_INTERNAL_STATEFUL_CONDITION(EveryN, true, n) \
LOG(severity)
#define LOG_FIRST_N(severity, n) \
LOGGING_INTERNAL_STATEFUL_CONDITION(FirstN, true, n) \
LOG(severity)
#define LOG_EVERY_POW_2(severity) \
LOGGING_INTERNAL_STATEFUL_CONDITION(EveryPow2, true, 0) \
LOG(severity)
#define LOG_EVERY_N_SEC(severity, n_seconds) \
LOGGING_INTERNAL_STATEFUL_CONDITION(EveryNSec, true, n_seconds) \
LOG(severity)
#define CHECK(condition) \
if (TF_PREDICT_FALSE(!(condition))) \
LOG(FATAL) << "Check failed: " #condition " "
template <typename T>
inline const T& GetReferenceableValue(const T& t) {
return t;
}
inline char GetReferenceableValue(char t) { return t; }
inline unsigned char GetReferenceableValue(unsigned char t) { return t; }
inline signed char GetReferenceableValue(signed char t) { return t; }
inline int16 GetReferenceableValue(int16_t t) { return t; }
inline uint16 GetReferenceableValue(uint16 t) { return t; }
inline int GetReferenceableValue(int t) { return t; }
inline unsigned int GetReferenceableValue(unsigned int t) { return t; }
inline int64_t GetReferenceableValue(int64_t t) { return t; }
inline uint64 GetReferenceableValue(uint64 t) { return t; }
template <typename T>
inline void MakeCheckOpValueString(std::ostream* os, const T& v) {
(*os) << v;
}
template <>
void MakeCheckOpValueString(std::ostream* os, const char& v);
template <>
void MakeCheckOpValueString(std::ostream* os, const signed char& v);
template <>
void MakeCheckOpValueString(std::ostream* os, const unsigned char& v);
#if LANG_CXX11
template <>
void MakeCheckOpValueString(std::ostream* os, const std::nullptr_t& v);
#endif
struct CheckOpString {
explicit CheckOpString(string* str) : str_(str) {}
explicit operator bool() const { return TF_PREDICT_FALSE(str_ != nullptr); }
string* str_;
};
template <typename T1, typename T2>
string* MakeCheckOpString(const T1& v1, const T2& v2,
const char* exprtext) TF_ATTRIBUTE_NOINLINE;
class CheckOpMessageBuilder {
public:
explicit CheckOpMessageBuilder(const char* exprtext);
~CheckOpMessageBuilder();
std::ostream* ForVar1() { return stream_; }
std::ostream* ForVar2();
string* NewString();
private:
std::ostringstream* stream_;
};
template <typename T1, typename T2>
string* MakeCheckOpString(const T1& v1, const T2& v2, const char* exprtext) {
CheckOpMessageBuilder comb(exprtext);
MakeCheckOpValueString(comb.ForVar1(), v1);
MakeCheckOpValueString(comb.ForVar2(), v2);
return comb.NewString();
}
#define TF_DEFINE_CHECK_OP_IMPL(name, op) \
template <typename T1, typename T2> \
inline string* name##Impl(const T1& v1, const T2& v2, \
const char* exprtext) { \
if (TF_PREDICT_TRUE(v1 op v2)) \
return NULL; \
else \
return ::tsl::internal::MakeCheckOpString(v1, v2, exprtext); \
} \
inline string* name##Impl(int v1, int v2, const char* exprtext) { \
return name##Impl<int, int>(v1, v2, exprtext); \
}
TF_DEFINE_CHECK_OP_IMPL(Check_EQ, ==)
inline string* Check_EQImpl(int v1, size_t v2, const char* exprtext) {
if (TF_PREDICT_FALSE(v1 < 0))
::tsl::internal::MakeCheckOpString(v1, v2, exprtext);
return Check_EQImpl(size_t(v1), v2, exprtext);
}
inline string* Check_EQImpl(size_t v1, int v2, const char* exprtext) {
return Check_EQImpl(v2, v1, exprtext);
}
TF_DEFINE_CHECK_OP_IMPL(Check_NE, !=)
inline string* Check_NEImpl(int v1, size_t v2, const char* exprtext) {
if (v1 < 0) return NULL;
return Check_NEImpl(size_t(v1), v2, exprtext);
}
inline string* Check_NEImpl(size_t v1, int v2, const char* exprtext) {
return Check_NEImpl(v2, v1, exprtext);
}
TF_DEFINE_CHECK_OP_IMPL(Check_LE, <=)
inline string* Check_LEImpl(int v1, size_t v2, const char* exprtext) {
if (v1 <= 0) return NULL;
return Check_LEImpl(size_t(v1), v2, exprtext);
}
inline string* Check_LEImpl(size_t v1, int v2, const char* exprtext) {
if (TF_PREDICT_FALSE(v2 < 0))
return ::tsl::internal::MakeCheckOpString(v1, v2, exprtext);
return Check_LEImpl(v1, size_t(v2), exprtext);
}
TF_DEFINE_CHECK_OP_IMPL(Check_LT, <)
inline string* Check_LTImpl(int v1, size_t v2, const char* exprtext) {
if (v1 < 0) return NULL;
return Check_LTImpl(size_t(v1), v2, exprtext);
}
inline string* Check_LTImpl(size_t v1, int v2, const char* exprtext) {
if (v2 < 0) return ::tsl::internal::MakeCheckOpString(v1, v2, exprtext);
return Check_LTImpl(v1, size_t(v2), exprtext);
}
template <typename T1, typename T2>
inline string* Check_GEImpl(const T1& v1, const T2& v2, const char* exprtext) {
return Check_LEImpl(v2, v1, exprtext);
}
template <typename T1, typename T2>
inline string* Check_GTImpl(const T1& v1, const T2& v2, const char* exprtext) {
return Check_LTImpl(v2, v1, exprtext);
}
#undef TF_DEFINE_CHECK_OP_IMPL
#define CHECK_OP_LOG(name, op, val1, val2) \
while (::tsl::internal::CheckOpString _result{::tsl::internal::name##Impl( \
::tsl::internal::GetReferenceableValue(val1), \
::tsl::internal::GetReferenceableValue(val2), #val1 " " #op " " #val2)}) \
::tsl::internal::LogMessageFatal(__FILE__, __LINE__) << *(_result.str_)
#define CHECK_OP(name, op, val1, val2) CHECK_OP_LOG(name, op, val1, val2)
#define CHECK_EQ(val1, val2) CHECK_OP(Check_EQ, ==, val1, val2)
#define CHECK_NE(val1, val2) CHECK_OP(Check_NE, !=, val1, val2)
#define CHECK_LE(val1, val2) CHECK_OP(Check_LE, <=, val1, val2)
#define CHECK_LT(val1, val2) CHECK_OP(Check_LT, <, val1, val2)
#define CHECK_GE(val1, val2) CHECK_OP(Check_GE, >=, val1, val2)
#define CHECK_GT(val1, val2) CHECK_OP(Check_GT, >, val1, val2)
#define CHECK_NOTNULL(val) \
::tsl::internal::CheckNotNull(__FILE__, __LINE__, \
"'" #val "' Must be non NULL", (val))
#ifndef NDEBUG
#define DCHECK(condition) CHECK(condition)
#define DCHECK_EQ(val1, val2) CHECK_EQ(val1, val2)
#define DCHECK_NE(val1, val2) CHECK_NE(val1, val2)
#define DCHECK_LE(val1, val2) CHECK_LE(val1, val2)
#define DCHECK_LT(val1, val2) CHECK_LT(val1, val2)
#define DCHECK_GE(val1, val2) CHECK_GE(val1, val2)
#define DCHECK_GT(val1, val2) CHECK_GT(val1, val2)
#else
#define DCHECK(condition) \
while (false && (condition)) LOG(FATAL)
#define _TF_DCHECK_NOP(x, y) \
while (false && ((void)(x), (void)(y), 0)) LOG(FATAL)
#define DCHECK_EQ(x, y) _TF_DCHECK_NOP(x, y)
#define DCHECK_NE(x, y) _TF_DCHECK_NOP(x, y)
#define DCHECK_LE(x, y) _TF_DCHECK_NOP(x, y)
#define DCHECK_LT(x, y) _TF_DCHECK_NOP(x, y)
#define DCHECK_GE(x, y) _TF_DCHECK_NOP(x, y)
#define DCHECK_GT(x, y) _TF_DCHECK_NOP(x, y)
#endif
#define QCHECK(condition) CHECK(condition)
#define QCHECK_EQ(x, y) CHECK_EQ(x, y)
#define QCHECK_NE(x, y) CHECK_NE(x, y)
#define QCHECK_LE(x, y) CHECK_LE(x, y)
#define QCHECK_LT(x, y) CHECK_LT(x, y)
#define QCHECK_GE(x, y) CHECK_GE(x, y)
#define QCHECK_GT(x, y) CHECK_GT(x, y)
template <typename T>
T&& CheckNotNull(const char* file, int line, const char* exprtext, T&& t) {
if (t == nullptr) {
LogMessageFatal(file, line) << string(exprtext);
}
return std::forward<T>(t);
}
int64_t MinLogLevelFromEnv();
int64_t MaxVLogLevelFromEnv();
}
class TFLogEntry {
static absl::LogSeverity AsAbslLogSeverity(int severity) {
return static_cast<absl::LogSeverity>(severity);
}
public:
explicit TFLogEntry(int severity, absl::string_view message)
: severity_(AsAbslLogSeverity(severity)), message_(message) {}
explicit TFLogEntry(int severity, absl::string_view fname, int line,
absl::string_view message)
: severity_(AsAbslLogSeverity(severity)),
fname_(fname),
line_(line),
message_(message) {}
absl::LogSeverity log_severity() const { return severity_; }
std::string FName() const { return fname_; }
int Line() const { return line_; }
std::string ToString() const { return message_; }
absl::string_view text_message() const { return message_; }
absl::string_view text_message_with_prefix() const { return message_; }
private:
const absl::LogSeverity severity_;
const std::string fname_;
int line_ = -1;
const std::string message_;
};
class TFLogSink {
public:
virtual ~TFLogSink() = default;
virtual void Send(const TFLogEntry& entry) = 0;
virtual void WaitTillSent() {}
};
class TFDefaultLogSink : public TFLogSink {
public:
void Send(const TFLogEntry& entry) override;
};
void TFAddLogSink(TFLogSink* sink);
void TFRemoveLogSink(TFLogSink* sink);
std::vector<TFLogSink*> TFGetLogSinks();
void UpdateLogVerbosityIfDefined(const char* env_var);
}
#endif
#include "tsl/platform/default/logging.h"
#include "absl/base/internal/cycleclock.h"
#include "absl/base/internal/sysinfo.h"
#include "tsl/platform/env_time.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/mutex.h"
#if defined(PLATFORM_POSIX_ANDROID)
#include <android/log.h>
#include <iostream>
#include <sstream>
#endif
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <algorithm>
#include <queue>
#include <unordered_map>
namespace tsl {
namespace internal {
namespace {
class TFLogSinks {
public:
static TFLogSinks& Instance();
void Add(TFLogSink* sink);
void Remove(TFLogSink* sink);
std::vector<TFLogSink*> GetSinks() const;
void Send(const TFLogEntry& entry);
private:
TFLogSinks();
void SendToSink(TFLogSink& sink, const TFLogEntry& entry);
std::queue<TFLogEntry> log_entry_queue_;
static const size_t kMaxLogEntryQueueSize = 128;
mutable tsl::mutex mutex_;
std::vector<TFLogSink*> sinks_;
};
TFLogSinks::TFLogSinks() {
#ifndef NO_DEFAULT_LOGGER
static TFDefaultLogSink* default_sink = new TFDefaultLogSink();
sinks_.emplace_back(default_sink);
#endif
}
TFLogSinks& TFLogSinks::Instance() {
static TFLogSinks* instance = new TFLogSinks();
return *instance;
}
void TFLogSinks::Add(TFLogSink* sink) {
assert(sink != nullptr && "The sink must not be a nullptr");
tsl::mutex_lock lock(mutex_);
sinks_.emplace_back(sink);
if (sinks_.size() == 1) {
while (!log_entry_queue_.empty()) {
for (const auto& sink : sinks_) {
SendToSink(*sink, log_entry_queue_.front());
}
log_entry_queue_.pop();
}
}
}
void TFLogSinks::Remove(TFLogSink* sink) {
assert(sink != nullptr && "The sink must not be a nullptr");
tsl::mutex_lock lock(mutex_);
auto it = std::find(sinks_.begin(), sinks_.end(), sink);
if (it != sinks_.end()) sinks_.erase(it);
}
std::vector<TFLogSink*> TFLogSinks::GetSinks() const {
tsl::mutex_lock lock(mutex_);
return sinks_;
}
void TFLogSinks::Send(const TFLogEntry& entry) {
tsl::mutex_lock lock(mutex_);
if (sinks_.empty()) {
while (log_entry_queue_.size() >= kMaxLogEntryQueueSize) {
log_entry_queue_.pop();
}
log_entry_queue_.push(entry);
return;
}
while (!log_entry_queue_.empty()) {
for (const auto& sink : sinks_) {
SendToSink(*sink, log_entry_queue_.front());
}
log_entry_queue_.pop();
}
for (const auto& sink : sinks_) {
SendToSink(*sink, entry);
}
}
void TFLogSinks::SendToSink(TFLogSink& sink, const TFLogEntry& entry) {
sink.Send(entry);
sink.WaitTillSent();
}
class VlogFileMgr {
public:
VlogFileMgr();
~VlogFileMgr();
FILE* FilePtr() const;
private:
FILE* vlog_file_ptr;
char* vlog_file_name;
};
VlogFileMgr::VlogFileMgr() {
vlog_file_name = getenv("TF_CPP_VLOG_FILENAME");
vlog_file_ptr =
vlog_file_name == nullptr ? nullptr : fopen(vlog_file_name, "w");
if (vlog_file_ptr == nullptr) {
vlog_file_ptr = stderr;
}
}
VlogFileMgr::~VlogFileMgr() {
if (vlog_file_ptr != stderr) {
fclose(vlog_file_ptr);
}
}
FILE* VlogFileMgr::FilePtr() const { return vlog_file_ptr; }
int ParseInteger(const char* str, size_t size) {
string integer_str(str, size);
std::istringstream ss(integer_str);
int level = 0;
ss >> level;
return level;
}
int64_t LogLevelStrToInt(const char* tf_env_var_val) {
if (tf_env_var_val == nullptr) {
return 0;
}
return ParseInteger(tf_env_var_val, strlen(tf_env_var_val));
}
struct StringData {
struct Hasher {
size_t operator()(const StringData& sdata) const {
size_t hash = 5381;
const char* data = sdata.data;
for (const char* top = data + sdata.size; data < top; ++data) {
hash = ((hash << 5) + hash) + (*data);
}
return hash;
}
};
StringData() = default;
StringData(const char* data, size_t size) : data(data), size(size) {}
bool operator==(const StringData& rhs) const {
return size == rhs.size && memcmp(data, rhs.data, size) == 0;
}
const char* data = nullptr;
size_t size = 0;
};
using VmoduleMap = std::unordered_map<StringData, int, StringData::Hasher>;
VmoduleMap* VmodulesMapFromEnv() {
const char* env = getenv("TF_CPP_VMODULE");
if (env == nullptr) {
return nullptr;
}
const char* env_data = strdup(env);
VmoduleMap* result = new VmoduleMap();
while (true) {
const char* eq = strchr(env_data, '=');
if (eq == nullptr) {
break;
}
const char* after_eq = eq + 1;
const char* comma = strchr(after_eq, ',');
const char* new_env_data;
if (comma == nullptr) {
comma = strchr(after_eq, '\0');
new_env_data = comma;
} else {
new_env_data = comma + 1;
}
(*result)[StringData(env_data, eq - env_data)] = | #include "tsl/platform/logging.h"
#include <cerrno>
#include <cstddef>
#include <cstdio>
#include <cstdlib>
#include <memory>
#include <sstream>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "tsl/platform/path.h"
#include "tsl/platform/stacktrace_handler.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#ifdef PLATFORM_WINDOWS
#define popen _popen
#define pclose _pclose
#endif
static char* program_name;
namespace tsl {
namespace {
using ::testing::HasSubstr;
using ::testing::Not;
TEST(Logging, Log) {
LOG(INFO) << "Hello";
LOG(INFO) << "Another log message";
LOG(ERROR) << "Error message";
VLOG(1) << "A VLOG message";
VLOG(2) << "A higher VLOG message";
DVLOG(1) << "A DVLOG message";
DVLOG(2) << "A higher DVLOG message";
}
TEST(Logging, CheckChecks) {
CHECK(true);
CHECK(7 > 5);
string a("abc");
string b("xyz");
CHECK_EQ(a, a);
CHECK_NE(a, b);
CHECK_EQ(3, 3);
CHECK_NE(4, 3);
CHECK_GT(4, 3);
CHECK_GE(3, 3);
CHECK_LT(2, 3);
CHECK_LE(2, 3);
DCHECK(true);
DCHECK(7 > 5);
DCHECK_EQ(a, a);
DCHECK_NE(a, b);
DCHECK_EQ(3, 3);
DCHECK_NE(4, 3);
DCHECK_GT(4, 3);
DCHECK_GE(3, 3);
DCHECK_LT(2, 3);
DCHECK_LE(2, 3);
}
TEST(LoggingDeathTest, FailedChecks) {
string a("abc");
string b("xyz");
const char* p_const = "hello there";
const char* p_null_const = nullptr;
char mybuf[10];
char* p_non_const = mybuf;
char* p_null = nullptr;
CHECK_NOTNULL(p_const);
CHECK_NOTNULL(p_non_const);
ASSERT_DEATH(CHECK(false), "false");
ASSERT_DEATH(CHECK(9 < 7), "9 < 7");
ASSERT_DEATH(CHECK_EQ(a, b), "a == b");
ASSERT_DEATH(CHECK_EQ(3, 4), "3 == 4");
ASSERT_DEATH(CHECK_NE(3, 3), "3 != 3");
ASSERT_DEATH(CHECK_GT(2, 3), "2 > 3");
ASSERT_DEATH(CHECK_GE(2, 3), "2 >= 3");
ASSERT_DEATH(CHECK_LT(3, 2), "3 < 2");
ASSERT_DEATH(CHECK_LE(3, 2), "3 <= 2");
ASSERT_DEATH(CHECK(false), "false");
ASSERT_DEATH(printf("%s", CHECK_NOTNULL(p_null)), "Must be non NULL");
ASSERT_DEATH(printf("%s", CHECK_NOTNULL(p_null_const)), "Must be non NULL");
#ifndef NDEBUG
ASSERT_DEATH(DCHECK(9 < 7), "9 < 7");
ASSERT_DEATH(DCHECK(9 < 7), "9 < 7");
ASSERT_DEATH(DCHECK_EQ(a, b), "a == b");
ASSERT_DEATH(DCHECK_EQ(3, 4), "3 == 4");
ASSERT_DEATH(DCHECK_NE(3, 3), "3 != 3");
ASSERT_DEATH(DCHECK_GT(2, 3), "2 > 3");
ASSERT_DEATH(DCHECK_GE(2, 3), "2 >= 3");
ASSERT_DEATH(DCHECK_LT(3, 2), "3 < 2");
ASSERT_DEATH(DCHECK_LE(3, 2), "3 <= 2");
#endif
}
TEST(InternalLogString, Basic) {
internal::LogString(__FILE__, __LINE__, INFO, "Hello there");
}
class TestSink : public TFLogSink {
public:
void Send(const TFLogEntry& entry) override {
ss_ << entry.text_message() << std::endl;
}
std::string Get() const { return ss_.str(); }
private:
std::stringstream ss_;
};
TEST(LogSinkTest, testLogSinks) {
const int sinks_initial_size = TFGetLogSinks().size();
TestSink sink;
TFAddLogSink(&sink);
EXPECT_EQ(TFGetLogSinks().size(), sinks_initial_size + 1);
LOG(INFO) << "Foo";
LOG(INFO) << "Bar";
EXPECT_EQ(sink.Get(), "Foo\nBar\n");
TFRemoveLogSink(&sink);
EXPECT_EQ(TFGetLogSinks().size(), sinks_initial_size);
}
std::string ReadFromFilePointer(FILE* fp) {
std::string result;
while (!feof(fp)) {
char buf[512];
size_t len = fread(buf, sizeof(buf[0]), 512, fp);
result.append(buf, len);
}
return result;
}
absl::StatusOr<std::string> ReadFromFile(const std::string& filename) {
std::shared_ptr<FILE> fp(fopen(filename.c_str(), "r"), fclose);
if (fp == nullptr) {
return absl::ErrnoToStatus(errno,
absl::StrFormat("Cannot fopen '%s'", filename));
}
return ReadFromFilePointer(fp.get());
}
class SubcommandTest : public ::testing::Test {
public:
static constexpr absl::string_view kLogVLog = "log_and_vlog";
static bool IsSubcommand(absl::string_view subcommand) {
return subcommand == kLogVLog;
}
static int Run(absl::string_view subcommand) {
CHECK_EQ(subcommand, kLogVLog);
LOG(INFO) << "LOG INFO";
LOG(WARNING) << "LOG WARNING";
LOG(ERROR) << "LOG ERROR";
LOG(INFO) << absl::StrFormat("VLOG_IS_ON(1)? %d", VLOG_IS_ON(1));
LOG(INFO) << absl::StrFormat("VLOG_IS_ON(2)? %d", VLOG_IS_ON(2));
LOG(INFO) << absl::StrFormat("VLOG_IS_ON(3)? %d", VLOG_IS_ON(3));
VLOG(1) << "VLevel 1";
VLOG(2) << "VLevel 2";
VLOG(3) << "VLevel 3";
return EXIT_SUCCESS;
}
protected:
absl::StatusOr<std::string> CaptureOutput(const char* invocation) {
std::shared_ptr<FILE> fp(popen(invocation, "r"), pclose);
if (fp == nullptr) {
return absl::ErrnoToStatus(
errno, absl::StrFormat("Cannot popen '%s'", invocation));
}
return ReadFromFilePointer(fp.get());
}
};
TEST_F(SubcommandTest, LogDefaultTest) {
std::string command = absl::StrFormat("%s %s", program_name, kLogVLog);
#if defined(PLATFORM_GOOGLE)
command += " --alsologtostderr";
#endif
command += " 2>&1";
TF_ASSERT_OK_AND_ASSIGN(std::string out, CaptureOutput(command.c_str()));
EXPECT_THAT(out, HasSubstr("LOG INFO"));
EXPECT_THAT(out, HasSubstr("LOG WARNING"));
EXPECT_THAT(out, HasSubstr("LOG ERROR"));
EXPECT_THAT(out, HasSubstr("VLOG_IS_ON(1)? 0"));
EXPECT_THAT(out, HasSubstr("VLOG_IS_ON(2)? 0"));
EXPECT_THAT(out, HasSubstr("VLOG_IS_ON(3)? 0"));
}
TEST_F(SubcommandTest, MinLogLevelTest) {
std::string command = absl::StrFormat("%s %s", program_name, kLogVLog);
#if defined(PLATFORM_GOOGLE)
command += " --minloglevel=1 --alsologtostderr";
#elif defined(PLATFORM_WINDOWS)
command = absl::StrFormat("set TF_CPP_MIN_LOG_LEVEL=1 && %s", command);
#else
command = absl::StrFormat("TF_CPP_MIN_LOG_LEVEL=1 %s", command);
#endif
command += " 2>&1";
TF_ASSERT_OK_AND_ASSIGN(std::string out, CaptureOutput(command.c_str()));
EXPECT_THAT(out, Not(HasSubstr("LOG INFO")));
EXPECT_THAT(out, HasSubstr("LOG WARNING"));
EXPECT_THAT(out, HasSubstr("LOG ERROR"));
}
TEST_F(SubcommandTest, VLogDefaultTest) {
std::string command = absl::StrFormat("%s %s", program_name, kLogVLog);
#if defined(PLATFORM_GOOGLE)
command += " --alsologtostderr";
#endif
command += " 2>&1";
TF_ASSERT_OK_AND_ASSIGN(std::string out, CaptureOutput(command.c_str()));
EXPECT_THAT(out, Not(HasSubstr("VLevel 1")));
EXPECT_THAT(out, Not(HasSubstr("VLevel 2")));
EXPECT_THAT(out, Not(HasSubstr("VLevel 3")));
}
TEST_F(SubcommandTest, MaxVLogLevelTest) {
std::string command = absl::StrFormat("%s %s", program_name, kLogVLog);
#if defined(PLATFORM_GOOGLE)
command += " --v=2 --alsologtostderr";
#elif defined(PLATFORM_WINDOWS)
command = absl::StrFormat("set TF_CPP_MAX_VLOG_LEVEL=2 && %s", command);
#else
command = absl::StrFormat("TF_CPP_MAX_VLOG_LEVEL=2 %s", command);
#endif
command += " 2>&1";
TF_ASSERT_OK_AND_ASSIGN(std::string out, CaptureOutput(command.c_str()));
EXPECT_THAT(out, HasSubstr("VLevel 1"));
EXPECT_THAT(out, HasSubstr("VLevel 2"));
EXPECT_THAT(out, Not(HasSubstr("VLevel 3")));
EXPECT_THAT(out, HasSubstr("VLOG_IS_ON(1)? 1"));
EXPECT_THAT(out, HasSubstr("VLOG_IS_ON(2)? 1"));
EXPECT_THAT(out, HasSubstr("VLOG_IS_ON(3)? 0"));
}
TEST_F(SubcommandTest, VModuleTest) {
std::string command = absl::StrFormat("%s %s", program_name, kLogVLog);
#if defined(PLATFORM_GOOGLE)
command += " --vmodule=logging_test=2,shoobadooba=3 --alsologtostderr";
#elif defined(PLATFORM_WINDOWS)
command = absl::StrFormat(
"set TF_CPP_VMODULE=logging_test=2,shoobadooba=3 && %s", command);
#else
command = absl::StrFormat("TF_CPP_VMODULE=logging_test=2,shoobadooba=3 %s",
command);
#endif
command += " 2>&1";
TF_ASSERT_OK_AND_ASSIGN(std::string out, CaptureOutput(command.c_str()));
EXPECT_THAT(out, HasSubstr("VLevel 1"));
EXPECT_THAT(out, HasSubstr("VLevel 2"));
EXPECT_THAT(out, Not(HasSubstr("VLevel 3")));
EXPECT_THAT(out, HasSubstr("VLOG_IS_ON(1)? 1"));
EXPECT_THAT(out, HasSubstr("VLOG_IS_ON(2)? 1"));
EXPECT_THAT(out, HasSubstr("VLOG_IS_ON(3)? 0"));
}
TEST_F(SubcommandTest, VLogFilenameTest) {
#if defined(PLATFORM_GOOGLE)
constexpr bool kVLogFilenameEnvVarIsSupported = false;
#else
constexpr bool kVLogFilenameEnvVarIsSupported = true;
#endif
if (!kVLogFilenameEnvVarIsSupported) {
GTEST_SKIP() << "Not supported on this platform";
}
std::string command = absl::StrFormat("%s %s", program_name, kLogVLog);
std::string filename = io::GetTempFilename("logging_test");
#if defined(PLATFORM_WINDOWS)
command = absl::StrFormat(
"set TF_CPP_VLOG_FILENAME=%s && set TF_CPP_MAX_VLOG_LEVEL=1 && %s",
filename, command);
#else
command = absl::StrFormat(
"TF_CPP_VLOG_FILENAME=%s TF_CPP_MAX_VLOG_LEVEL=1 %s", filename, command);
#endif
command += " 2>&1";
TF_ASSERT_OK_AND_ASSIGN(std::string out, CaptureOutput(command.c_str()));
EXPECT_THAT(out, Not(HasSubstr("LOG INFO")));
EXPECT_THAT(out, Not(HasSubstr("LOG WARNING")));
EXPECT_THAT(out, Not(HasSubstr("LOG ERROR")));
EXPECT_THAT(out, Not(HasSubstr("VLOG_IS_ON(1)?")));
EXPECT_THAT(out, Not(HasSubstr("VLOG_IS_ON(2)?")));
EXPECT_THAT(out, Not(HasSubstr("VLOG_IS_ON(3)?")));
EXPECT_THAT(out, Not(HasSubstr("VLevel 1")));
EXPECT_THAT(out, Not(HasSubstr("VLevel 2")));
EXPECT_THAT(out, Not(HasSubstr("VLevel 3")));
TF_ASSERT_OK_AND_ASSIGN(std::string log_file, ReadFromFile(filename));
EXPECT_THAT(log_file, HasSubstr("LOG INFO"));
EXPECT_THAT(log_file, HasSubstr("LOG WARNING"));
EXPECT_THAT(log_file, HasSubstr("LOG ERROR"));
EXPECT_THAT(log_file, HasSubstr("VLOG_IS_ON(1)"));
EXPECT_THAT(log_file, HasSubstr("VLOG_IS_ON(2)"));
EXPECT_THAT(log_file, HasSubstr("VLOG_IS_ON(3)"));
EXPECT_THAT(log_file, HasSubstr("VLevel 1"));
EXPECT_THAT(log_file, Not(HasSubstr("VLevel 2")));
EXPECT_THAT(log_file, Not(HasSubstr("VLevel 3")));
}
}
}
GTEST_API_ int main(int argc, char** argv) {
tsl::testing::InstallStacktraceHandler();
testing::InitGoogleTest(&argc, argv);
program_name = argv[0];
if (argc >= 2 && tsl::SubcommandTest::IsSubcommand(argv[1])) {
return tsl::SubcommandTest::Run(argv[1]);
}
return RUN_ALL_TESTS();
} |
1,245 | cpp | tensorflow/tensorflow | tf_status_helper | tensorflow/c/tf_status_helper.cc | tensorflow/c/tf_status_helper_test.cc | #ifndef TENSORFLOW_C_TF_STATUS_HELPER_H_
#define TENSORFLOW_C_TF_STATUS_HELPER_H_
#include <memory>
#include <utility>
#include "tensorflow/c/tf_status.h"
#include "tsl/platform/status.h"
namespace tsl {
void Set_TF_Status_from_Status(TF_Status* tf_status,
const absl::Status& status);
absl::Status StatusFromTF_Status(const TF_Status* tf_status);
}
namespace tensorflow {
using tsl::Set_TF_Status_from_Status;
using tsl::StatusFromTF_Status;
namespace internal {
struct TF_StatusDeleter {
void operator()(TF_Status* tf_status) const { TF_DeleteStatus(tf_status); }
};
}
using TF_StatusPtr = std::unique_ptr<TF_Status, internal::TF_StatusDeleter>;
}
#define TF_STATUS_ASSIGN_OR_RETURN(lhs, rexpr, c_status) \
_TF_STATUS_ASSIGN_OR_RETURN_IMPL( \
_TF_STATUS_CONCAT(_status_or_value, __COUNTER__), lhs, rexpr, c_status);
#define _TF_STATUS_ASSIGN_OR_RETURN_IMPL(statusor, lhs, rexpr, c_status) \
auto statusor = (rexpr); \
if (!statusor.ok()) { \
tensorflow::Set_TF_Status_from_Status(c_status, statusor.status()); \
return; \
} \
lhs = std::move(*statusor)
#define TF_STATUS_RETURN_IF_ERROR(rexpr, c_status) \
_TF_STATUS_RETURN_IF_ERROR_IMPL(_TF_STATUS_CONCAT(_status, __COUNTER__), \
rexpr, c_status);
#define _TF_STATUS_RETURN_IF_ERROR_IMPL(status, rexpr, c_status) \
auto status = (rexpr); \
if (!status.ok()) { \
tensorflow::Set_TF_Status_from_Status(c_status, status); \
return; \
}
#define _TF_STATUS_CONCAT(x, y) _TF_STATUS_CONCAT_IMPL(x, y)
#define _TF_STATUS_CONCAT_IMPL(x, y) x##y
#endif
#include "tensorflow/c/tf_status_helper.h"
#include <string>
#include "tensorflow/c/tf_status.h"
#include "xla/tsl/c/tsl_status_helper.h"
namespace tsl {
void Set_TF_Status_from_Status(TF_Status* tf_status,
const absl::Status& status) {
TF_SetStatus(tf_status, TSLCodeFromStatusCode(status.code()),
absl::StatusMessageAsCStr(status));
status.ForEachPayload(
[tf_status](absl::string_view key, const absl::Cord& value) {
std::string key_str(key);
std::string value_str(value);
TF_SetPayload(tf_status, key_str.c_str(), value_str.c_str());
});
}
absl::Status StatusFromTF_Status(const TF_Status* tf_status) {
absl::Status status(StatusCodeFromTSLCode(TF_GetCode(tf_status)),
TF_Message(tf_status));
TF_ForEachPayload(
tf_status,
[](const char* key, const char* value, void* capture) {
absl::Status* status = static_cast<absl::Status*>(capture);
status->SetPayload(key, absl::Cord(absl::string_view(value)));
},
&status);
return status;
}
} | #include "tensorflow/c/tf_status_helper.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace {
TEST(StatusHelper, TestStatusHelper) {
TSL_Status* s = TSL_NewStatus();
absl::Status cc_status(absl::InvalidArgumentError("some error"));
cc_status.SetPayload("key1", absl::Cord("value1"));
cc_status.SetPayload("key2", absl::Cord("value2"));
Set_TF_Status_from_Status(s, cc_status);
ASSERT_EQ(TSL_INVALID_ARGUMENT, TSL_GetCode(s));
ASSERT_EQ(std::string("some error"), TSL_Message(s));
absl::Status another_cc_status(StatusFromTF_Status(s));
ASSERT_FALSE(another_cc_status.ok());
ASSERT_EQ(std::string("some error"), another_cc_status.message());
ASSERT_EQ(error::INVALID_ARGUMENT, another_cc_status.code());
ASSERT_EQ(cc_status.GetPayload("key1"), another_cc_status.GetPayload("key1"));
ASSERT_EQ(cc_status.GetPayload("key2"), another_cc_status.GetPayload("key2"));
TSL_DeleteStatus(s);
}
}
} |
1,246 | cpp | tensorflow/tensorflow | env | third_party/xla/third_party/tsl/tsl/platform/windows/env.cc | tensorflow/core/platform/env_test.cc | #ifndef TENSORFLOW_TSL_PLATFORM_ENV_H_
#define TENSORFLOW_TSL_PLATFORM_ENV_H_
#include <stdint.h>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/functional/any_invocable.h"
#include "tsl/platform/env_time.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/file_system.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/numa.h"
#include "tsl/platform/platform.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status.h"
#include "tsl/platform/stringpiece.h"
#include "tsl/platform/types.h"
#ifdef PLATFORM_WINDOWS
#undef CopyFile
#undef DeleteFile
#endif
namespace tsl {
class Thread;
struct ThreadOptions;
class Env {
public:
Env();
virtual ~Env() = default;
static Env* Default();
virtual absl::Status GetFileSystemForFile(const std::string& fname,
FileSystem** result);
virtual absl::Status GetRegisteredFileSystemSchemes(
std::vector<std::string>* schemes);
virtual absl::Status RegisterFileSystem(const std::string& scheme,
FileSystemRegistry::Factory factory);
virtual absl::Status RegisterFileSystem(
const std::string& scheme, std::unique_ptr<FileSystem> filesystem);
absl::Status SetOption(const std::string& scheme, const std::string& key,
const std::string& value);
absl::Status SetOption(const std::string& scheme, const std::string& key,
const std::vector<string>& values);
absl::Status SetOption(const std::string& scheme, const std::string& key,
const std::vector<int64_t>& values);
absl::Status SetOption(const std::string& scheme, const std::string& key,
const std::vector<double>& values);
absl::Status FlushFileSystemCaches();
absl::Status NewRandomAccessFile(const std::string& fname,
std::unique_ptr<RandomAccessFile>* result);
absl::Status NewRandomAccessFile(const std::string& fname,
TransactionToken* token,
std::unique_ptr<RandomAccessFile>* result) {
return absl::OkStatus();
}
absl::Status NewWritableFile(const std::string& fname,
std::unique_ptr<WritableFile>* result);
absl::Status NewWritableFile(const std::string& fname,
TransactionToken* token,
std::unique_ptr<WritableFile>* result) {
return absl::OkStatus();
}
absl::Status NewAppendableFile(const std::string& fname,
std::unique_ptr<WritableFile>* result);
absl::Status NewAppendableFile(const std::string& fname,
TransactionToken* token,
std::unique_ptr<WritableFile>* result) {
return absl::OkStatus();
}
absl::Status NewReadOnlyMemoryRegionFromFile(
const std::string& fname, std::unique_ptr<ReadOnlyMemoryRegion>* result);
absl::Status NewReadOnlyMemoryRegionFromFile(
const std::string& fname, TransactionToken* token,
std::unique_ptr<ReadOnlyMemoryRegion>* result) {
return absl::OkStatus();
}
absl::Status FileExists(const std::string& fname);
absl::Status FileExists(const std::string& fname, TransactionToken* token) {
return absl::OkStatus();
}
bool FilesExist(const std::vector<string>& files,
std::vector<absl::Status>* status);
bool FilesExist(const std::vector<string>& files, TransactionToken* token,
std::vector<absl::Status>* status) {
return true;
}
absl::Status GetChildren(const std::string& dir, std::vector<string>* result);
absl::Status GetChildren(const std::string& dir, TransactionToken* token,
std::vector<string>* result) {
return absl::OkStatus();
}
virtual bool MatchPath(const std::string& path,
const std::string& pattern) = 0;
virtual absl::Status GetMatchingPaths(const std::string& pattern,
std::vector<string>* results);
absl::Status GetMatchingPaths(const std::string& pattern,
TransactionToken* token,
std::vector<string>* results) {
return absl::OkStatus();
}
absl::Status DeleteFile(const std::string& fname);
absl::Status DeleteFile(const std::string& fname, TransactionToken* token) {
return absl::OkStatus();
}
absl::Status DeleteRecursively(const std::string& dirname,
int64_t* undeleted_files,
int64_t* undeleted_dirs);
absl::Status DeleteRecursively(const std::string& dirname,
TransactionToken* token,
int64_t* undeleted_files,
int64_t* undeleted_dirs) {
return absl::OkStatus();
}
absl::Status RecursivelyCreateDir(const std::string& dirname);
absl::Status RecursivelyCreateDir(const std::string& dirname,
TransactionToken* token) {
return absl::OkStatus();
}
absl::Status CreateDir(const std::string& dirname);
absl::Status CreateDir(const std::string& dirname, TransactionToken* token) {
return absl::OkStatus();
}
absl::Status DeleteDir(const std::string& dirname);
absl::Status DeleteDir(const std::string& dirname, TransactionToken* token) {
return absl::OkStatus();
}
absl::Status Stat(const std::string& fname, FileStatistics* stat);
absl::Status Stat(const std::string& fname, TransactionToken* token,
FileStatistics* stat) {
return absl::OkStatus();
}
absl::Status IsDirectory(const std::string& fname);
absl::Status HasAtomicMove(const std::string& path, bool* has_atomic_move);
absl::Status CanCreateTempFile(const std::string& fname,
bool* can_create_temp_file);
absl::Status GetFileSize(const std::string& fname, uint64* file_size);
absl::Status GetFileSize(const std::string& fname, TransactionToken* token,
uint64* file_size) {
return absl::OkStatus();
}
absl::Status RenameFile(const std::string& src, const std::string& target);
absl::Status RenameFile(const std::string& src, const std::string& target,
TransactionToken* token) {
return absl::OkStatus();
}
absl::Status CopyFile(const std::string& src, const std::string& target);
absl::Status CopyFile(const std::string& src, const std::string& target,
TransactionToken* token) {
return absl::OkStatus();
}
absl::Status StartTransaction(const std::string& filename,
TransactionToken** token) {
*token = nullptr;
return absl::OkStatus();
}
absl::Status AddToTransaction(const std::string& path,
TransactionToken* token) {
return absl::OkStatus();
}
absl::Status GetTokenOrStartTransaction(const std::string& path,
TransactionToken** token) {
*token = nullptr;
return absl::OkStatus();
}
absl::Status GetTransactionForPath(const std::string& path,
TransactionToken** token) {
*token = nullptr;
return absl::OkStatus();
}
absl::Status EndTransaction(TransactionToken* token) {
return absl::OkStatus();
}
std::string GetExecutablePath();
bool LocalTempFilename(std::string* filename);
bool CreateUniqueFileName(std::string* prefix, const std::string& suffix);
virtual std::string GetRunfilesDir() = 0;
virtual uint64 NowNanos() const { return EnvTime::NowNanos(); }
virtual uint64 NowMicros() const { return EnvTime::NowMicros(); }
virtual uint64 NowSeconds() const { return EnvTime::NowSeconds(); }
virtual void SleepForMicroseconds(int64_t micros) = 0;
int32 GetProcessId();
virtual Thread* StartThread(
const ThreadOptions& thread_options, const std::string& name,
absl::AnyInvocable<void()> fn) TF_MUST_USE_RESULT = 0;
virtual int32 GetCurrentThreadId() = 0;
virtual bool GetCurrentThreadName(std::string* name) = 0;
virtual void SchedClosure(absl::AnyInvocable<void()> closure) = 0;
virtual void SchedClosureAfter(int64_t micros,
absl::AnyInvocable<void()> closure) = 0;
virtual absl::Status LoadDynamicLibrary(const char* library_filename,
void** handle) = 0;
virtual absl::Status GetSymbolFromLibrary(void* handle,
const char* symbol_name,
void** symbol) = 0;
virtual std::string FormatLibraryFileName(const std::string& name,
const std::string& version) = 0;
virtual void GetLocalTempDirectories(std::vector<string>* list) = 0;
private:
std::unique_ptr<FileSystemRegistry> file_system_registry_;
Env(const Env&) = delete;
void operator=(const Env&) = delete;
};
class EnvWrapper : public Env {
public:
explicit EnvWrapper(Env* t) : target_(t) {}
~EnvWrapper() override;
Env* target() const { return target_; }
absl::Status GetFileSystemForFile(const std::string& fname,
FileSystem** result) override {
return target_->GetFileSystemForFile(fname, result);
}
absl::Status GetRegisteredFileSystemSchemes(
std::vector<string>* schemes) override {
return target_->GetRegisteredFileSystemSchemes(schemes);
}
absl::Status RegisterFileSystem(
const std::string& scheme, FileSystemRegistry::Factory factory) override {
return target_->RegisterFileSystem(scheme, factory);
}
bool MatchPath(const std::string& path, const std::string& pattern) override {
return target_->MatchPath(path, pattern);
}
uint64 NowMicros() const override { return target_->NowMicros(); }
void SleepForMicroseconds(int64_t micros) override {
target_->SleepForMicroseconds(micros);
}
Thread* StartThread(const ThreadOptions& thread_options,
const std::string& name,
absl::AnyInvocable<void()> fn) override {
return target_->StartThread(thread_options, name, std::move(fn));
}
int32 GetCurrentThreadId() override { return target_->GetCurrentThreadId(); }
bool GetCurrentThreadName(std::string* name) override {
return target_->GetCurrentThreadName(name);
}
void SchedClosure(absl::AnyInvocable<void()> closure) override {
target_->SchedClosure(std::move(closure));
}
void SchedClosureAfter(int64_t micros,
absl::AnyInvocable<void()> closure) override {
target_->SchedClosureAfter(micros, std::move(closure));
}
absl::Status LoadDynamicLibrary(const char* library_filename,
void** handle) override {
return target_->LoadDynamicLibrary(library_filename, handle);
}
absl::Status GetSymbolFromLibrary(void* handle, const char* symbol_name,
void** symbol) override {
return target_->GetSymbolFromLibrary(handle, symbol_name, symbol);
}
std::string FormatLibraryFileName(const std::string& name,
const std::string& version) override {
return target_->FormatLibraryFileName(name, version);
}
std::string GetRunfilesDir() override { return target_->GetRunfilesDir(); }
private:
void GetLocalTempDirectories(std::vector<string>* list) override {
target_->GetLocalTempDirectories(list);
}
Env* target_;
};
class Thread {
public:
Thread() {}
virtual ~Thread();
private:
Thread(const Thread&) = delete;
void operator=(const Thread&) = delete;
};
int setenv(const char* name, const char* value, int overwrite);
int unsetenv(const char* name);
struct ThreadOptions {
size_t stack_size = 0;
size_t guard_size = 0;
int numa_node = port::kNUMANoAffinity;
};
absl::Status FileSystemCopyFile(FileSystem* src_fs, const std::string& src,
FileSystem* target_fs,
const std::string& target);
absl::Status ReadFileToString(Env* env, const std::string& fname,
std::string* data);
absl::Status WriteStringToFile(Env* env, const std::string& fname,
const StringPiece& data);
absl::Status WriteBinaryProto(Env* env, const std::string& fname,
const protobuf::MessageLite& proto);
absl::Status ReadBinaryProto(Env* env, const std::string& fname,
protobuf::MessageLite* proto);
inline absl::Status WriteTextProto(Env* ,
const std::string& ,
const protobuf::MessageLite& ) {
return errors::Unimplemented("Can't write text protos with protolite.");
}
absl::Status WriteTextProto(Env* env, const std::string& fname,
const protobuf::Message& proto);
inline absl::Status ReadTextProto(Env* ,
const std::string& ,
protobuf::MessageLite* ) {
return errors::Unimplemented("Can't parse text protos with protolite.");
}
absl::Status ReadTextProto(Env* env, const std::string& fname,
protobuf::Message* proto);
absl::Status ReadTextOrBinaryProto(Env* env, const std::string& fname,
protobuf::Message* proto);
absl::Status ReadTextOrBinaryProto(Env* env, const std::string& fname,
protobuf::MessageLite* proto);
namespace register_file_system {
template <typename Factory>
struct Register {
Register(Env* env, const std::string& scheme, bool try_modular_filesystems) {
if (try_modular_filesystems) {
const char* env_value = getenv("TF_USE_MODULAR_FILESYSTEM");
string load_plugin = env_value ? absl::AsciiStrToLower(env_value) : "";
if (load_plugin == "true" || load_plugin == "1") {
LOG(WARNING) << "Using modular file system for '" << scheme << "'."
<< " Please switch to tensorflow-io"
<< " (https:
<< " support of '" << scheme << "'.";
return;
}
}
env->RegisterFileSystem(scheme, []() -> FileSystem* { return new Factory; })
.IgnoreError();
}
};
}
}
#define REGISTER_FILE_SYSTEM_ENV(env, scheme, factory, modular) \
REGISTER_FILE_SYSTEM_UNIQ_HELPER(__COUNTER__, env, scheme, factory, modular)
#define REGISTER_FILE_SYSTEM_UNIQ_HELPER(ctr, env, scheme, factory, modular) \
REGISTER_FILE_SYSTEM_UNIQ(ctr, env, scheme, factory, modular)
#define REGISTER_FILE_SYSTEM_UNIQ(ctr, env, scheme, factory, modular) \
static ::tsl::register_file_system::Register<factory> register_ff##ctr \
TF_ATTRIBUTE_UNUSED = \
::tsl::register_file_system::Register<factory>(env, scheme, modular)
#define REGISTER_FILE_SYSTEM(scheme, factory) \
REGISTER_FILE_SYSTEM_ENV(::tsl::Env::Default(), scheme, factory, false);
#define REGISTER_LEGACY_FILE_SYSTEM(scheme, factory) \
REGISTER_FILE_SYSTEM_ENV(::tsl::Env::Default(), scheme, factory, true);
#endif
#include <dirent.h>
#include <dlfcn.h>
#include <errno.h>
#include <fcntl.h>
#include <fnmatch.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
#ifdef __FreeBSD__
#include <pthread_np.h>
#endif
#include <map>
#include <thread>
#include <vector>
#include "tsl/platform/default/posix_file_system.h"
#include "tsl/platform/env.h"
#include "tsl/platform/load_library.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/ram_file_system.h"
#include "tsl/platform/strcat.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tsl {
namespace {
mutex name_mutex(tsl::LINKER_INITIALIZED);
std::map<std::thread::id, string>& GetThreadNameRegistry()
TF_EXCLUSIVE_LOCKS_REQUIRED(name_mutex) {
static auto* thread_name_registry = new std::map<std::thread::id, string>();
return *thread_name_registry;
}
class PThread : public Thread {
public:
PThread(const ThreadOptions& thread_options, const std::string& name,
absl::AnyInvocable<void()> fn) {
ThreadParams* params = new ThreadParams;
params->name = name;
params->fn = std::move(fn);
pthread_attr_t attributes;
pthread_attr_init(&attributes);
if (thread_options.stack_size != 0) {
pthread_attr_setstacksize(&attributes, thread_options.stack_size);
}
int ret = pthread_create(&thread_, &attributes, &ThreadFn, para | #include "tensorflow/core/platform/env.h"
#include <sys/stat.h>
#include <memory>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/platform/cord.h"
#include "tensorflow/core/platform/null_file_system.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/str_util.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/lib/core/status_test_util.h"
namespace tsl {
namespace {
string CreateTestFile(Env* env, const string& filename, int length) {
string input(length, 0);
for (int i = 0; i < length; i++) input[i] = i;
TF_EXPECT_OK(WriteStringToFile(env, filename, input));
return input;
}
tensorflow::GraphDef CreateTestProto() {
tensorflow::GraphDef g;
tensorflow::NodeDef* node = g.add_node();
node->set_name("name1");
node->set_op("op1");
node = g.add_node();
node->set_name("name2");
node->set_op("op2");
return g;
}
static void ExpectHasSubstr(StringPiece s, StringPiece expected) {
EXPECT_TRUE(absl::StrContains(s, expected))
<< "'" << s << "' does not contain '" << expected << "'";
}
}
string BaseDir() { return io::JoinPath(testing::TmpDir(), "base_dir"); }
class DefaultEnvTest : public ::testing::Test {
protected:
void SetUp() override { TF_CHECK_OK(env_->CreateDir(BaseDir())); }
void TearDown() override {
int64_t undeleted_files, undeleted_dirs;
TF_CHECK_OK(
env_->DeleteRecursively(BaseDir(), &undeleted_files, &undeleted_dirs));
}
Env* env_ = Env::Default();
};
TEST_F(DefaultEnvTest, IncompleteReadOutOfRange) {
const string filename = io::JoinPath(BaseDir(), "out_of_range");
const string input = CreateTestFile(env_, filename, 2);
std::unique_ptr<RandomAccessFile> f;
TF_EXPECT_OK(env_->NewRandomAccessFile(filename, &f));
StringPiece result;
char scratch[3];
EXPECT_EQ(error::OUT_OF_RANGE, f->Read(0, 3, &result, scratch).code());
EXPECT_EQ(input, result);
TF_EXPECT_OK(f->Read(0, 2, &result, scratch));
EXPECT_EQ(input, result);
}
TEST_F(DefaultEnvTest, ReadFileToString) {
for (const int length : {0, 1, 1212, 2553, 4928, 8196, 9000, (1 << 20) - 1,
1 << 20, (1 << 20) + 1, (256 << 20) + 100}) {
const string filename =
io::JoinPath(BaseDir(), "bar", "..", strings::StrCat("file", length));
const string input = CreateTestFile(env_, filename, length);
string output;
TF_EXPECT_OK(ReadFileToString(env_, filename, &output));
EXPECT_EQ(length, output.size());
EXPECT_EQ(input, output);
FileStatistics stat;
TF_EXPECT_OK(env_->Stat(filename, &stat));
EXPECT_EQ(length, stat.length);
EXPECT_FALSE(stat.is_directory);
}
}
TEST_F(DefaultEnvTest, ReadWriteBinaryProto) {
const tensorflow::GraphDef proto = CreateTestProto();
const string filename = strings::StrCat(BaseDir(), "binary_proto");
TF_EXPECT_OK(WriteBinaryProto(env_, filename, proto));
tensorflow::GraphDef result;
TF_EXPECT_OK(ReadBinaryProto(env_, filename, &result));
EXPECT_EQ(result.DebugString(), proto.DebugString());
tensorflow::GraphDef result2;
TF_EXPECT_OK(ReadTextOrBinaryProto(env_, filename, &result2));
EXPECT_EQ(result2.DebugString(), proto.DebugString());
}
TEST_F(DefaultEnvTest, ReadWriteTextProto) {
const tensorflow::GraphDef proto = CreateTestProto();
const string filename = strings::StrCat(BaseDir(), "text_proto");
string as_text;
EXPECT_TRUE(protobuf::TextFormat::PrintToString(proto, &as_text));
TF_EXPECT_OK(WriteStringToFile(env_, filename, as_text));
tensorflow::GraphDef result;
TF_EXPECT_OK(ReadTextProto(env_, filename, &result));
EXPECT_EQ(result.DebugString(), proto.DebugString());
tensorflow::GraphDef result2;
TF_EXPECT_OK(ReadTextOrBinaryProto(env_, filename, &result2));
EXPECT_EQ(result2.DebugString(), proto.DebugString());
}
TEST_F(DefaultEnvTest, FileToReadonlyMemoryRegion) {
for (const int length : {1, 1212, 2553, 4928, 8196, 9000, (1 << 20) - 1,
1 << 20, (1 << 20) + 1}) {
const string filename =
io::JoinPath(BaseDir(), strings::StrCat("file", length));
const string input = CreateTestFile(env_, filename, length);
std::unique_ptr<ReadOnlyMemoryRegion> region;
TF_EXPECT_OK(env_->NewReadOnlyMemoryRegionFromFile(filename, ®ion));
ASSERT_NE(region, nullptr);
EXPECT_EQ(length, region->length());
EXPECT_EQ(input, string(reinterpret_cast<const char*>(region->data()),
region->length()));
FileStatistics stat;
TF_EXPECT_OK(env_->Stat(filename, &stat));
EXPECT_EQ(length, stat.length);
EXPECT_FALSE(stat.is_directory);
}
}
TEST_F(DefaultEnvTest, DeleteRecursively) {
const string parent_dir = io::JoinPath(BaseDir(), "root_dir");
const string child_dir1 = io::JoinPath(parent_dir, "child_dir1");
const string child_dir2 = io::JoinPath(parent_dir, "child_dir2");
TF_EXPECT_OK(env_->CreateDir(parent_dir));
const string root_file1 = io::JoinPath(parent_dir, "root_file1");
const string root_file2 = io::JoinPath(parent_dir, "root_file2");
const string root_file3 = io::JoinPath(parent_dir, ".root_file3");
CreateTestFile(env_, root_file1, 100);
CreateTestFile(env_, root_file2, 100);
CreateTestFile(env_, root_file3, 100);
TF_EXPECT_OK(env_->CreateDir(child_dir1));
const string child1_file1 = io::JoinPath(child_dir1, "child1_file1");
CreateTestFile(env_, child1_file1, 100);
TF_EXPECT_OK(env_->CreateDir(child_dir2));
int64_t undeleted_files, undeleted_dirs;
TF_EXPECT_OK(
env_->DeleteRecursively(parent_dir, &undeleted_files, &undeleted_dirs));
EXPECT_EQ(0, undeleted_files);
EXPECT_EQ(0, undeleted_dirs);
EXPECT_EQ(error::Code::NOT_FOUND, env_->FileExists(root_file1).code());
EXPECT_EQ(error::Code::NOT_FOUND, env_->FileExists(root_file2).code());
EXPECT_EQ(error::Code::NOT_FOUND, env_->FileExists(root_file3).code());
EXPECT_EQ(error::Code::NOT_FOUND, env_->FileExists(child1_file1).code());
}
TEST_F(DefaultEnvTest, DeleteRecursivelyFail) {
const string parent_dir = io::JoinPath(BaseDir(), "root_dir");
int64_t undeleted_files, undeleted_dirs;
absl::Status s =
env_->DeleteRecursively(parent_dir, &undeleted_files, &undeleted_dirs);
EXPECT_EQ(error::Code::NOT_FOUND, s.code());
EXPECT_EQ(0, undeleted_files);
EXPECT_EQ(1, undeleted_dirs);
}
TEST_F(DefaultEnvTest, RecursivelyCreateDir) {
const string create_path = io::JoinPath(BaseDir(), "a", "b", "c", "d");
TF_CHECK_OK(env_->RecursivelyCreateDir(create_path));
TF_CHECK_OK(env_->RecursivelyCreateDir(create_path));
TF_EXPECT_OK(env_->FileExists(create_path));
}
TEST_F(DefaultEnvTest, RecursivelyCreateDirEmpty) {
TF_CHECK_OK(env_->RecursivelyCreateDir(""));
}
TEST_F(DefaultEnvTest, RecursivelyCreateDirSubdirsExist) {
const string subdir_path = io::JoinPath(BaseDir(), "a", "b");
TF_CHECK_OK(env_->CreateDir(io::JoinPath(BaseDir(), "a")));
TF_CHECK_OK(env_->CreateDir(subdir_path));
TF_EXPECT_OK(env_->FileExists(subdir_path));
const string create_path = io::JoinPath(BaseDir(), "a", "b", "c", "d");
TF_CHECK_OK(env_->RecursivelyCreateDir(create_path));
TF_CHECK_OK(env_->RecursivelyCreateDir(create_path));
TF_EXPECT_OK(env_->FileExists(create_path));
TF_EXPECT_OK(env_->FileExists(io::JoinPath(BaseDir(), "a", "b", "c")));
}
TEST_F(DefaultEnvTest, LocalFileSystem) {
int expected_num_files = 0;
std::vector<string> matching_paths;
for (const int length : {0, 1, 1212, 2553, 4928, 8196, 9000, (1 << 20) - 1,
1 << 20, (1 << 20) + 1}) {
string filename = io::JoinPath(BaseDir(), strings::StrCat("len", length));
filename = strings::StrCat("file:
const string input = CreateTestFile(env_, filename, length);
++expected_num_files;
TF_EXPECT_OK(env_->GetMatchingPaths(
strings::StrCat("file:
&matching_paths));
EXPECT_EQ(expected_num_files, matching_paths.size());
TF_EXPECT_OK(env_->GetMatchingPaths(
io::JoinPath(BaseDir(), "l*"), &matching_paths));
EXPECT_EQ(expected_num_files, matching_paths.size());
string output;
TF_EXPECT_OK(ReadFileToString(env_, filename, &output));
EXPECT_EQ(length, output.size());
EXPECT_EQ(input, output);
FileStatistics stat;
TF_EXPECT_OK(env_->Stat(filename, &stat));
EXPECT_EQ(length, stat.length);
EXPECT_FALSE(stat.is_directory);
}
}
TEST_F(DefaultEnvTest, SleepForMicroseconds) {
const int64_t start = env_->NowMicros();
const int64_t sleep_time = 1e6 + 5e5;
env_->SleepForMicroseconds(sleep_time);
const int64_t delta = env_->NowMicros() - start;
EXPECT_GE(delta, sleep_time - 200);
}
class TmpDirFileSystem : public NullFileSystem {
public:
TF_USE_FILESYSTEM_METHODS_WITH_NO_TRANSACTION_SUPPORT;
absl::Status FileExists(const string& dir, TransactionToken* token) override {
StringPiece scheme, host, path;
io::ParseURI(dir, &scheme, &host, &path);
if (path.empty()) return errors::NotFound(dir, " not found");
if (path == "/flushed") {
if (flushed_) {
return absl::OkStatus();
} else {
return errors::NotFound("FlushCaches() not called yet");
}
}
return Env::Default()->FileExists(io::JoinPath(BaseDir(), path));
}
absl::Status CreateDir(const string& dir, TransactionToken* token) override {
StringPiece scheme, host, path;
io::ParseURI(dir, &scheme, &host, &path);
if (scheme != "tmpdirfs") {
return errors::FailedPrecondition("scheme must be tmpdirfs");
}
if (host != "testhost") {
return errors::FailedPrecondition("host must be testhost");
}
absl::Status status =
Env::Default()->CreateDir(io::JoinPath(BaseDir(), path));
if (status.ok()) {
created_directories_.push_back(std::string(path));
}
return status;
}
absl::Status IsDirectory(const string& dir,
TransactionToken* token) override {
StringPiece scheme, host, path;
io::ParseURI(dir, &scheme, &host, &path);
for (const auto& existing_dir : created_directories_)
if (existing_dir == path) return absl::OkStatus();
return errors::NotFound(dir, " not found");
}
void FlushCaches(TransactionToken* token) override { flushed_ = true; }
private:
bool flushed_ = false;
std::vector<std::string> created_directories_ = {"/"};
};
REGISTER_FILE_SYSTEM("tmpdirfs", TmpDirFileSystem);
TEST_F(DefaultEnvTest, FlushFileSystemCaches) {
Env* env = Env::Default();
const string flushed =
strings::StrCat("tmpdirfs:
EXPECT_EQ(error::Code::NOT_FOUND, env->FileExists(flushed).code());
TF_EXPECT_OK(env->FlushFileSystemCaches());
TF_EXPECT_OK(env->FileExists(flushed));
}
TEST_F(DefaultEnvTest, RecursivelyCreateDirWithUri) {
Env* env = Env::Default();
const string create_path = strings::StrCat(
"tmpdirfs:
EXPECT_EQ(error::Code::NOT_FOUND, env->FileExists(create_path).code());
TF_CHECK_OK(env->RecursivelyCreateDir(create_path));
TF_CHECK_OK(env->RecursivelyCreateDir(create_path));
TF_EXPECT_OK(env->FileExists(create_path));
}
TEST_F(DefaultEnvTest, GetExecutablePath) {
Env* env = Env::Default();
TF_EXPECT_OK(env->FileExists(env->GetExecutablePath()));
}
TEST_F(DefaultEnvTest, LocalTempFilename) {
Env* env = Env::Default();
string filename;
EXPECT_TRUE(env->LocalTempFilename(&filename));
EXPECT_FALSE(env->FileExists(filename).ok());
std::unique_ptr<WritableFile> file_to_write;
TF_CHECK_OK(env->NewWritableFile(filename, &file_to_write));
#if defined(PLATFORM_GOOGLE)
TF_CHECK_OK(file_to_write->Append("Nu"));
TF_CHECK_OK(file_to_write->Append(absl::Cord("ll")));
#else
TF_CHECK_OK(file_to_write->Append("Null"));
#endif
TF_CHECK_OK(file_to_write->Close());
TF_CHECK_OK(env->FileExists(filename));
std::unique_ptr<WritableFile> file_to_append;
TF_CHECK_OK(env->NewAppendableFile(filename, &file_to_append));
int64_t pos;
TF_CHECK_OK(file_to_append->Tell(&pos));
ASSERT_EQ(4, pos);
std::unique_ptr<RandomAccessFile> file_to_read;
TF_CHECK_OK(env->NewRandomAccessFile(filename, &file_to_read));
StringPiece content;
char scratch[1024];
CHECK_EQ(
error::OUT_OF_RANGE,
file_to_read->Read(0, 1024, &content, scratch).code());
EXPECT_EQ("Null", content);
TF_CHECK_OK(env->DeleteFile(filename));
EXPECT_FALSE(env->FileExists(filename).ok());
}
TEST_F(DefaultEnvTest, CreateUniqueFileName) {
Env* env = Env::Default();
string prefix = "tempfile-prefix-";
string suffix = ".tmp";
string filename = prefix;
EXPECT_TRUE(env->CreateUniqueFileName(&filename, suffix));
EXPECT_TRUE(absl::StartsWith(filename, prefix));
EXPECT_TRUE(str_util::EndsWith(filename, suffix));
}
TEST_F(DefaultEnvTest, GetProcessId) {
Env* env = Env::Default();
EXPECT_NE(env->GetProcessId(), 0);
}
TEST_F(DefaultEnvTest, GetThreadInformation) {
Env* env = Env::Default();
#if !defined(__APPLE__)
EXPECT_NE(env->GetCurrentThreadId(), 0);
#endif
string thread_name;
bool res = env->GetCurrentThreadName(&thread_name);
#if defined(PLATFORM_WINDOWS) || defined(__ANDROID__)
EXPECT_FALSE(res);
#elif !defined(__APPLE__)
EXPECT_TRUE(res);
EXPECT_GT(thread_name.size(), 0);
#endif
}
TEST_F(DefaultEnvTest, GetChildThreadInformation) {
Env* env = Env::Default();
Thread* child_thread = env->StartThread({}, "tf_child_thread", [env]() {
#if !defined(__APPLE__)
EXPECT_NE(env->GetCurrentThreadId(), 0);
#endif
string thread_name;
bool res = env->GetCurrentThreadName(&thread_name);
EXPECT_TRUE(res);
ExpectHasSubstr(thread_name, "tf_child_thread");
});
delete child_thread;
}
} |
1,247 | cpp | tensorflow/tensorflow | kernels | tensorflow/core/kernels/sparse/kernels.cc | tensorflow/core/kernels/sparse/kernels_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_SPARSE_KERNELS_H_
#define TENSORFLOW_CORE_KERNELS_SPARSE_KERNELS_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/kernels/sparse/sparse_matrix.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace functor {
template <typename Device>
struct CalculateNNZPerBatchMatrixFromIndices {
Status operator()(OpKernelContext* c, TTypes<int64_t>::ConstMatrix indices,
TTypes<int32>::Vec nnz_per_batch);
};
template <typename Device>
struct SparseTensorToCOOSparseMatrix {
void operator()(const Device& d, TTypes<int64_t>::ConstVec host_dense_shape,
TTypes<int64_t>::ConstMatrix indices,
TTypes<int32>::Vec coo_row_ind,
TTypes<int32>::Vec coo_col_ind);
};
template <typename Device>
struct COOSparseMatrixToSparseTensor {
Status operator()(OpKernelContext* c,
TTypes<int64_t>::ConstVec host_dense_shape,
TTypes<int32>::ConstVec host_batch_ptrs,
TTypes<int32>::Vec coo_row_ind,
TTypes<int32>::ConstVec coo_col_ind,
TTypes<int64_t>::Matrix indices);
};
template <typename Device>
struct COOSparseMatrixToCSRSparseMatrix {
Status operator()(OpKernelContext* c, const int rows, const int cols,
TTypes<int32>::UnalignedVec coo_row_ind,
TTypes<int32>::UnalignedVec csr_row_ptr);
};
struct SparseTensorToCSRSparseMatrixCPUFunctor {
Status operator()(int64_t batch_size, int num_rows, int num_cols,
TTypes<int64_t>::ConstMatrix indices,
TTypes<int32>::Vec batch_ptr,
TTypes<int32>::Vec csr_row_ptr,
TTypes<int32>::Vec csr_col_ind);
};
template <typename Device>
struct CSRSparseMatrixToCOOSparseMatrix {
Status operator()(OpKernelContext* c,
TTypes<int32>::UnalignedConstVec csr_row_ptr,
TTypes<int32>::UnalignedVec coo_row_ind);
};
template <typename Device, typename T>
struct CSRSparseMatrixMatMul {
explicit CSRSparseMatrixMatMul(const bool transpose_output);
Status Compute(OpKernelContext* ctx, const ConstCSRComponent<T>& a,
typename TTypes<T>::ConstMatrix b,
typename TTypes<T>::Matrix c);
};
template <typename Device, typename T>
class CSRSparseMatrixMatVec {
CSRSparseMatrixMatVec(bool transpose_a, bool adjoint_a);
Status Compute(OpKernelContext* ctx, const ConstCSRComponent<T>& a,
const T* x, T* y);
};
template <typename Device, typename T>
struct CSRStructureModifyingFunctor {
virtual ~CSRStructureModifyingFunctor() {}
virtual Status Initialize() = 0;
virtual Status GetWorkspaceSize(const ConstCSRComponent<T>& a,
const ConstCSRComponent<T>& b,
size_t* bufferSize) = 0;
virtual Status GetOutputStructure(const ConstCSRComponent<T>& a,
const ConstCSRComponent<T>& b,
TTypes<int32>::UnalignedVec c_row_ptr,
int* output_nnz, void* workspace) = 0;
virtual Status Compute(const ConstCSRComponent<T>& a,
const ConstCSRComponent<T>& b, CSRComponent<T>* c,
void* workspace) = 0;
};
template <typename Device, typename T>
struct CSRSparseMatrixAdd : public CSRStructureModifyingFunctor<Device, T> {
explicit CSRSparseMatrixAdd(OpKernelContext* ctx, const T alpha,
const T beta);
};
template <typename Device, typename T>
struct CSRSparseSparseMatrixMatMul
: public CSRStructureModifyingFunctor<Device, T> {
explicit CSRSparseSparseMatrixMatMul(OpKernelContext* ctx, bool transpose_a,
bool transpose_b);
};
template <typename Device, typename T>
struct CSRSparseMatrixTransposeComponent {
Status operator()(OpKernelContext* ctx, const ConstCSRComponent<T>& x,
CSRComponent<T>* y);
};
template <typename Device, typename T>
struct CSRSparseMatrixTranspose {
Status operator()(OpKernelContext* ctx, bool conjugate,
const CSRSparseMatrix& input_matrix,
CSRSparseMatrix* output_matrix);
};
template <typename Device, typename T>
struct CSRSparseMatrixSoftmax {
Status operator()(OpKernelContext* ctx, const CSRSparseMatrix& logits,
typename TTypes<T>::Vec softmax_values);
};
template <typename Device, typename T>
struct CSRSparseMatrixSoftmaxGrad {
Status operator()(OpKernelContext* ctx, const CSRSparseMatrix& softmax,
const CSRSparseMatrix& grad_softmax,
typename TTypes<T>::Vec gradient_values);
};
template <typename Device, typename T>
class CSRSparseMatrixMulScalar {
public:
explicit CSRSparseMatrixMulScalar() {}
Status Compute(OpKernelContext* ctx, const CSRSparseMatrix& a,
typename TTypes<T>::ConstScalar b, CSRSparseMatrix* c);
};
template <typename Device, typename T>
class CSRSparseMatrixBatchMulVec {
public:
explicit CSRSparseMatrixBatchMulVec() {}
Status Compute(OpKernelContext* ctx, const CSRSparseMatrix& a,
typename TTypes<T>::ConstFlat b, CSRSparseMatrix* c);
};
}
}
#endif
#include "tensorflow/core/kernels/sparse/kernels.h"
#include <numeric>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
namespace functor {
Status SparseTensorToCSRSparseMatrixCPUFunctor::operator()(
int64_t batch_size, int num_rows, int num_cols,
TTypes<int64_t>::ConstMatrix indices, TTypes<int32>::Vec batch_ptr,
TTypes<int32>::Vec csr_row_ptr, TTypes<int32>::Vec csr_col_ind) {
if (batch_ptr.size() != batch_size + 1) {
return errors::InvalidArgument(
"Expected batch_ptr.size() == batch_size + 1. Got: ", batch_ptr.size(),
" vs. ", batch_size + 1);
}
if (csr_row_ptr.size() != batch_size * (num_rows + 1)) {
return errors::InvalidArgument(
"Expected csr_row_ptr.size() == batch_size * (num_rows + 1). Got: ",
csr_row_ptr.size(), " vs. ", batch_size * (num_rows + 1));
}
const int64_t total_nnz = indices.dimension(0);
const int rank = indices.dimension(1);
if (rank == 2 && batch_size != 1) {
return errors::InvalidArgument(
"Expected batch_size == 1 when rank is 2. Got batch_size: ",
batch_size);
}
if (rank < 2 || rank > 3) {
return errors::InvalidArgument(
"Indices must have either 2 or 3 columns. Got size ",
indices.dimensions());
}
if (csr_col_ind.size() != total_nnz) {
return errors::InvalidArgument(
"Expected csr_col_ind.size() == total_nnz. Got: ", csr_col_ind.size(),
" vs. ", total_nnz);
}
int prev_batch = -1;
if (rank == 2) {
batch_ptr(0) = 0;
++prev_batch;
for (int64_t i = 0; i < total_nnz; ++i) {
int64_t row = indices(i, 0);
if (row < 0 || row >= num_rows) {
return errors::InvalidArgument("Row index ", row,
" is outside of valid range [0, ",
num_rows, ")");
}
int64_t col = indices(i, 1);
if (col < 0 || col >= num_cols) {
return errors::InvalidArgument("Column index ", col,
" is outside of valid range [0, ",
num_cols, ")");
}
int64_t ix = row + 1;
if (ix >= csr_row_ptr.size()) {
return errors::InvalidArgument("Got an index ", ix,
" that is outside of csr_row_ptr");
}
csr_row_ptr(ix) += 1;
csr_col_ind(i) = col;
}
} else {
for (int64_t i = 0; i < total_nnz; ++i) {
const int cur_batch = indices(i, 0);
if (cur_batch < 0 || cur_batch >= batch_size) {
return errors::InvalidArgument("Batch index ", cur_batch,
" is outside of valid range [0, ",
batch_size, ")");
}
int64_t row = indices(i, 1);
if (row < 0 || row >= num_rows) {
return errors::InvalidArgument("Row index ", row,
" is outside of valid range [0, ",
num_rows, ")");
}
int64_t col = indices(i, 2);
if (col < 0 || col >= num_cols) {
return errors::InvalidArgument("Column index ", col,
" is outside of valid range [0, ",
num_cols, ")");
}
int64_t ix = cur_batch * (num_rows + 1) + row + 1;
if (ix >= csr_row_ptr.size()) {
return errors::InvalidArgument("Got an index ", ix,
" that is outside of csr_row_ptr");
}
csr_row_ptr(ix) += 1;
csr_col_ind(i) = col;
while (prev_batch < cur_batch) {
batch_ptr(prev_batch + 1) = i;
++prev_batch;
}
}
}
while (prev_batch < batch_size) {
batch_ptr(prev_batch + 1) = total_nnz;
++prev_batch;
}
for (int batch_idx = 0; batch_idx < batch_size; ++batch_idx) {
auto* row_ptr_batch = csr_row_ptr.data() + batch_idx * (num_rows + 1);
std::partial_sum(row_ptr_batch, row_ptr_batch + num_rows + 1,
row_ptr_batch);
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/kernels/sparse/kernels.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
namespace tensorflow {
namespace {
TEST(SparseTensorToCSRSparseMatrix, SingleBatchConversion) {
const auto indices =
test::AsTensor<int64_t>({0, 0, 2, 3, 2, 4, 3, 0}, TensorShape({4, 2}));
Tensor batch_ptr(DT_INT32, {2});
Tensor csr_col_ind(DT_INT32, {4});
auto csr_row_ptr = test::AsTensor<int32>({0, 0, 0, 0, 0});
functor::SparseTensorToCSRSparseMatrixCPUFunctor coo_to_csr;
TF_EXPECT_OK(coo_to_csr(1, 4, 5,
indices.template matrix<int64_t>(),
batch_ptr.vec<int32>(), csr_row_ptr.vec<int32>(),
csr_col_ind.vec<int32>()));
test::ExpectTensorEqual<int32>(batch_ptr, test::AsTensor<int32>({0, 4}));
test::ExpectTensorEqual<int32>(csr_row_ptr,
test::AsTensor<int32>({0, 1, 1, 3, 4}));
test::ExpectTensorEqual<int32>(csr_col_ind,
test::AsTensor<int32>({0, 3, 4, 0}));
}
TEST(SparseTensorToCSRSparseMatrix, BatchConversion) {
const auto indices = test::AsTensor<int64_t>({0, 0, 0,
0, 2, 3,
2, 0, 1},
TensorShape({3, 3}));
Tensor batch_ptr(DT_INT32, {4});
Tensor csr_col_ind(DT_INT32, {3});
Tensor csr_row_ptr(DT_INT32, {12});
test::FillFn<int32>(&csr_row_ptr, [](int unused) { return 0; });
functor::SparseTensorToCSRSparseMatrixCPUFunctor coo_to_csr;
TF_EXPECT_OK(coo_to_csr(3, 3, 4,
indices.template matrix<int64_t>(),
batch_ptr.vec<int32>(), csr_row_ptr.vec<int32>(),
csr_col_ind.vec<int32>()));
test::ExpectTensorEqual<int32>(batch_ptr,
test::AsTensor<int32>({0, 2, 2, 3}));
test::ExpectTensorEqual<int32>(csr_row_ptr,
test::AsTensor<int32>({0, 1, 1, 2,
0, 0, 0, 0,
0, 1, 1, 1}));
test::ExpectTensorEqual<int32>(csr_col_ind, test::AsTensor<int32>({0, 3, 1}));
}
TEST(SparseTensorToCSRSparseMatrix, InvalidBatchThrowsIllegalArgument) {
const auto indices =
test::AsTensor<int64_t>({0, 0, 0,
4, 2, 3,
2, 0, 1},
TensorShape({3, 3}));
Tensor batch_ptr(DT_INT32, {4});
Tensor csr_col_ind(DT_INT32, {3});
Tensor csr_row_ptr(DT_INT32, {12});
test::FillFn<int32>(&csr_row_ptr, [](int unused) { return 0; });
functor::SparseTensorToCSRSparseMatrixCPUFunctor coo_to_csr;
EXPECT_THAT(
coo_to_csr(3, 3, 4,
indices.template matrix<int64_t>(), batch_ptr.vec<int32>(),
csr_row_ptr.vec<int32>(), csr_col_ind.vec<int32>()),
tsl::testing::StatusIs(tsl::error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex(
"Batch index .* is outside of valid range")));
}
TEST(SparseTensorToCSRSparseMatrix, InvalidRowThrowsIllegalArgument) {
const auto indices = test::AsTensor<int64_t>({0, 0, 0,
1, 4, 3,
2, 0, 1},
TensorShape({3, 3}));
Tensor batch_ptr(DT_INT32, {4});
Tensor csr_col_ind(DT_INT32, {3});
Tensor csr_row_ptr(DT_INT32, {12});
test::FillFn<int32>(&csr_row_ptr, [](int unused) { return 0; });
functor::SparseTensorToCSRSparseMatrixCPUFunctor coo_to_csr;
EXPECT_THAT(
coo_to_csr(3, 3, 4,
indices.template matrix<int64_t>(), batch_ptr.vec<int32>(),
csr_row_ptr.vec<int32>(), csr_col_ind.vec<int32>()),
tsl::testing::StatusIs(
tsl::error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex("Row index .* is outside of valid range")));
}
TEST(SparseTensorToCSRSparseMatrix, InvalidColThrowsIllegalArgument) {
const auto indices = test::AsTensor<int64_t>({0, 0, 0,
1, 2, 6,
2, 0, 1},
TensorShape({3, 3}));
Tensor batch_ptr(DT_INT32, {4});
Tensor csr_col_ind(DT_INT32, {3});
Tensor csr_row_ptr(DT_INT32, {12});
test::FillFn<int32>(&csr_row_ptr, [](int unused) { return 0; });
functor::SparseTensorToCSRSparseMatrixCPUFunctor coo_to_csr;
EXPECT_THAT(
coo_to_csr(3, 3, 4,
indices.template matrix<int64_t>(), batch_ptr.vec<int32>(),
csr_row_ptr.vec<int32>(), csr_col_ind.vec<int32>()),
tsl::testing::StatusIs(tsl::error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex(
"Column index .* is outside of valid range")));
}
TEST(SparseTensorToCSRSparseMatrix, InvalidRankIllegalArgument) {
const auto indices =
test::AsTensor<int64_t>({0, 0, 0, 0,
1, 2, 2, 3,
2, 0, 1, 2},
TensorShape({3, 4}));
Tensor batch_ptr(DT_INT32, {4});
Tensor csr_col_ind(DT_INT32, {3});
Tensor csr_row_ptr(DT_INT32, {12});
test::FillFn<int32>(&csr_row_ptr, [](int unused) { return 0; });
functor::SparseTensorToCSRSparseMatrixCPUFunctor coo_to_csr;
EXPECT_THAT(
coo_to_csr(3, 3, 4,
indices.template matrix<int64_t>(), batch_ptr.vec<int32>(),
csr_row_ptr.vec<int32>(), csr_col_ind.vec<int32>()),
tsl::testing::StatusIs(tsl::error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex(
"Indices must have either 2 or 3 columns.")));
}
}
}
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
} |
1,248 | cpp | tensorflow/tensorflow | grappler_test | tensorflow/core/grappler/utils/grappler_test.cc | tensorflow/core/grappler/utils/grappler_test_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_UTILS_GRAPPLER_TEST_H_
#define TENSORFLOW_CORE_GRAPPLER_UTILS_GRAPPLER_TEST_H_
#include <vector>
#include "absl/strings/string_view.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace grappler {
class GrapplerTest : public ::testing::Test {
public:
GrapplerTest();
protected:
void DisableAllOptimizers();
void EnableAllOptimizers();
std::vector<Tensor> EvaluateNodes(
const GraphDef& graph, const std::vector<string>& node_names) const;
std::vector<Tensor> EvaluateNodes(
const GraphDef& graph, const std::vector<string>& node_names,
const std::vector<std::pair<string, Tensor>>& inputs) const;
std::vector<Tensor> EvaluateFetchNodes(const GrapplerItem& item) const;
NodeDef* AddNode(const string& name, const string& op,
const std::vector<string>& inputs,
const std::vector<std::pair<string, AttrValue>>& attributes,
GraphDef* graph) const;
void DisableAllOptimizers(RewriterConfig* cfg);
void CompareGraphs(GraphDef want, GraphDef got) const;
void CompareNodes(const NodeDef& want, const NodeDef& got) const;
void CompareFunctions(FunctionDef want, FunctionDef got) const;
bool IsNodesDirectlyConnected(const NodeMap& node_map, const string& src,
const string& dst, int position = 0);
int CountOpNodes(const GraphDef& graph, const string& op);
template <DataType DTYPE>
Tensor GenerateRandomTensor(const TensorShape& shape) const {
typedef typename EnumToDataType<DTYPE>::Type T;
Tensor tensor(DTYPE, shape);
for (auto i = 0; i < tensor.NumElements(); i++)
tensor.flat<T>()(i) = i + random::New64() % 10;
return tensor;
}
template <DataType DTYPE>
Tensor GenerateTensorWithSetRandom(const TensorShape& shape) const {
typedef typename EnumToDataType<DTYPE>::Type T;
Tensor tensor(DTYPE, shape);
tensor.flat<T>().setRandom();
return tensor;
}
template <DataType DTYPE>
Tensor GenerateConstantTensor(
const TensorShape& shape,
typename EnumToDataType<DTYPE>::Type value) const {
typedef typename EnumToDataType<DTYPE>::Type T;
Tensor tensor(DTYPE, shape);
for (auto i = 0; i < tensor.NumElements(); i++) tensor.flat<T>()(i) = value;
return tensor;
}
inline tensorflow::Scope CreateScopeWithDevice(absl::string_view device) {
return tensorflow::Scope::NewRootScope().WithDevice(string(device));
}
private:
SessionOptions options_;
};
}
}
#endif
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include <memory>
#include "absl/algorithm/container.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tensorflow/core/public/session.h"
namespace tensorflow {
namespace grappler {
namespace {
void CompareGraphNodes(protobuf::RepeatedPtrField<NodeDef>* want,
protobuf::RepeatedPtrField<NodeDef>* got) {
auto comparator = [](const NodeDef& n1, const NodeDef& n2) -> bool {
return n1.name() < n2.name();
};
std::sort(want->begin(), want->end(), comparator);
std::sort(got->begin(), got->end(), comparator);
ASSERT_EQ(want->size(), got->size());
for (int i = 0; i < want->size(); ++i) {
NodeDef& want_node = (*want)[i];
NodeDef& got_node = (*got)[i];
EXPECT_EQ(want_node.op(), got_node.op());
EXPECT_EQ(want_node.name(), got_node.name());
EXPECT_EQ(want_node.device(), got_node.device());
ASSERT_EQ(want_node.input_size(), got_node.input_size())
<< "want_node =\n"
<< want_node.DebugString() << "\ngot_node =\n"
<< got_node.DebugString();
const auto is_control = [](const string& input) -> bool {
return ParseTensorName(input).index() < 0;
};
auto want_inputs = want_node.mutable_input();
auto got_inputs = got_node.mutable_input();
std::sort(absl::c_find_if(*want_inputs, is_control), want_inputs->end());
std::sort(absl::c_find_if(*got_inputs, is_control), got_inputs->end());
for (int j = 0; j < want_node.input_size(); ++j) {
const TensorId want_tensor = ParseTensorName(want_node.input(j));
const TensorId got_tensor = ParseTensorName(got_node.input(j));
EXPECT_EQ(want_tensor.ToString(), got_tensor.ToString());
}
}
}
void SetAllOptimizers(RewriterConfig* cfg, RewriterConfig::Toggle value) {
cfg->set_arithmetic_optimization(value);
cfg->set_auto_mixed_precision(value);
cfg->set_auto_mixed_precision_onednn_bfloat16(value);
cfg->set_common_subgraph_elimination(value);
cfg->set_constant_folding(value);
cfg->set_debug_stripper(value);
cfg->set_dependency_optimization(value);
cfg->set_function_optimization(value);
cfg->set_implementation_selector(value);
cfg->set_layout_optimizer(value);
cfg->set_loop_optimization(value);
cfg->set_pin_to_host_optimization(value);
cfg->set_remapping(value);
cfg->set_scoped_allocator_optimization(value);
cfg->set_shape_optimization(value);
}
}
GrapplerTest::GrapplerTest() {
DisableAllOptimizers();
}
void GrapplerTest::DisableAllOptimizers() {
SetAllOptimizers(
options_.config.mutable_graph_options()->mutable_rewrite_options(),
RewriterConfig::OFF);
}
void GrapplerTest::EnableAllOptimizers() {
SetAllOptimizers(
options_.config.mutable_graph_options()->mutable_rewrite_options(),
RewriterConfig::ON);
}
std::vector<Tensor> GrapplerTest::EvaluateNodes(
const GraphDef& graph, const std::vector<string>& node_names) const {
return EvaluateNodes(graph, node_names, {});
}
std::vector<Tensor> GrapplerTest::EvaluateNodes(
const GraphDef& graph, const std::vector<string>& node_names,
const std::vector<std::pair<string, Tensor>>& inputs) const {
std::unique_ptr<tensorflow::Session> session(NewSession(options_));
TF_CHECK_OK(session->Create(graph));
RunOptions run_options;
std::vector<Tensor> output_tensors;
TF_CHECK_OK(session->Run(run_options, inputs, node_names, node_names,
&output_tensors, nullptr));
TF_CHECK_OK(session->Close());
return output_tensors;
}
std::vector<Tensor> GrapplerTest::EvaluateFetchNodes(
const GrapplerItem& item) const {
std::unique_ptr<tensorflow::Session> session(NewSession(options_));
TF_CHECK_OK(session->Create(item.graph));
RunOptions run_options;
if (!item.init_ops.empty()) {
std::vector<Tensor> dummy;
TF_CHECK_OK(
session->Run(run_options, {}, {}, item.init_ops, &dummy, nullptr));
}
std::vector<Tensor> output_tensors;
TF_CHECK_OK(session->Run(run_options, item.feed, item.fetch, {},
&output_tensors, nullptr));
TF_CHECK_OK(session->Close());
return output_tensors;
}
NodeDef* GrapplerTest::AddNode(
const string& name, const string& op, const std::vector<string>& inputs,
const std::vector<std::pair<string, AttrValue>>& attributes,
GraphDef* graph) const {
NodeDef* node = graph->add_node();
node->set_name(name);
node->set_op(op);
for (const string& input : inputs) {
node->add_input(input);
}
for (auto attr : attributes) {
(*node->mutable_attr())[attr.first] = attr.second;
}
return node;
}
void GrapplerTest::CompareGraphs(GraphDef want, GraphDef got) const {
CompareGraphNodes(want.mutable_node(), got.mutable_node());
}
void GrapplerTest::CompareFunctions(FunctionDef want, FunctionDef got) const {
CompareGraphNodes(want.mutable_node_def(), got.mutable_node_def());
}
void GrapplerTest::CompareNodes(const NodeDef& want, const NodeDef& got) const {
EXPECT_EQ(want.name(), got.name());
EXPECT_EQ(want.op(), got.op());
std::vector<string> want_inputs(want.input().begin(), want.input().end());
std::vector<string> got_inputs(got.input().begin(), got.input().end());
EXPECT_EQ(want_inputs, got_inputs);
const auto attr_name = [](const std::pair<const string, AttrValue>& attr) {
return attr.first;
};
std::vector<string> want_attrs;
std::vector<string> got_attrs;
absl::c_transform(want.attr(), std::back_inserter(want_attrs), attr_name);
absl::c_transform(got.attr(), std::back_inserter(got_attrs), attr_name);
absl::c_sort(want_attrs);
absl::c_sort(got_attrs);
EXPECT_EQ(want_attrs, got_attrs);
for (const string& attr : want_attrs) {
EXPECT_TRUE(AreAttrValuesEqual(want.attr().at(attr), got.attr().at(attr)));
}
}
bool GrapplerTest::IsNodesDirectlyConnected(const NodeMap& node_map,
const string& src,
const string& dst, int position) {
const NodeDef* src_node = node_map.GetNode(src);
const NodeDef* dst_node = node_map.GetNode(dst);
EXPECT_TRUE(src_node != nullptr) << src << " node not found";
EXPECT_TRUE(dst_node != nullptr) << dst << " node not found";
return src_node && dst_node && dst_node->input(position) == src_node->name();
}
int GrapplerTest::CountOpNodes(const GraphDef& graph, const string& op) {
return std::count_if(graph.node().begin(), graph.node().end(),
[&op](const NodeDef& node) { return node.op() == op; });
}
}
} | #include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class GrapplerTestTest : public GrapplerTest {};
TEST_F(GrapplerTestTest, CompareIdenticalGraphs) {
tensorflow::Scope s1 = tensorflow::Scope::NewRootScope();
auto s1_a = ops::Variable(s1.WithOpName("a"), {2, 2}, DT_FLOAT);
auto s1_b = ops::Variable(s1.WithOpName("b"), {2, 2}, DT_FLOAT);
auto s1_add = ops::Add(s1.WithOpName("Add_1"), s1_a, s1_b);
tensorflow::Scope s2 = tensorflow::Scope::NewRootScope();
auto s2_a = ops::Variable(s2.WithOpName("a"), {2, 2}, DT_FLOAT);
auto s2_b = ops::Variable(s2.WithOpName("b"), {2, 2}, DT_FLOAT);
auto s2_add = ops::Add(s2.WithOpName("Add_1"), s2_a, s2_b);
GraphDef graph1;
TF_ASSERT_OK(s1.ToGraphDef(&graph1));
GraphDef graph2;
TF_ASSERT_OK(s2.ToGraphDef(&graph2));
CompareGraphs(graph1, graph2);
}
TEST_F(GrapplerTestTest, CheckNodesConnectivity) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto a = ops::Variable(s.WithOpName("a"), {2, 2}, DT_FLOAT);
auto b = ops::Variable(s.WithOpName("b"), {2, 2}, DT_FLOAT);
auto add_1 = ops::Add(s.WithOpName("Add_1"), a, b);
auto add_2 = ops::Add(s.WithOpName("Add_2"), add_1, b);
GraphDef graph;
TF_ASSERT_OK(s.ToGraphDef(&graph));
NodeMap node_map(&graph);
EXPECT_TRUE(IsNodesDirectlyConnected(node_map, "a", "Add_1", 0));
EXPECT_TRUE(IsNodesDirectlyConnected(node_map, "b", "Add_1", 1));
EXPECT_FALSE(IsNodesDirectlyConnected(node_map, "a", "Add_2", 0));
EXPECT_TRUE(IsNodesDirectlyConnected(node_map, "b", "Add_2", 1));
}
TEST_F(GrapplerTestTest, CountOpNodes) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto a = ops::Variable(s.WithOpName("a"), {2, 2}, DT_FLOAT);
auto b = ops::Variable(s.WithOpName("b"), {2, 2}, DT_FLOAT);
auto c = ops::Variable(s.WithOpName("c"), {2, 2}, DT_FLOAT);
auto add_ab = ops::Add(s.WithOpName("Add_ab"), a, b);
auto add_bc = ops::Add(s.WithOpName("Add_bc"), b, c);
auto mul_ab = ops::Mul(s.WithOpName("Mull_ab"), a, b);
auto mul_bc = ops::Mul(s.WithOpName("Mull_bc"), a, b);
InputList inputs{
Output(add_ab),
Output(add_bc),
Output(mul_ab),
Output(mul_bc),
};
auto add_all = ops::AddN(s.WithOpName("Add_all"), inputs);
GraphDef graph;
TF_ASSERT_OK(s.ToGraphDef(&graph));
EXPECT_EQ(2, CountOpNodes(graph, "Add"));
EXPECT_EQ(2, CountOpNodes(graph, "Mul"));
EXPECT_EQ(1, CountOpNodes(graph, "AddN"));
EXPECT_EQ(0, CountOpNodes(graph, "Transpose"));
}
TEST_F(GrapplerTestTest, EvaluateNodes) {
EnableAllOptimizers();
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("c"), {1.0f, 2.0f}, {1, 2});
Output b = ops::Const(s.WithOpName("d"), {3.0f, 4.0f}, {1, 2});
Output mul = ops::Mul(s.WithOpName("mul"), a, b);
GrapplerItem item;
item.fetch = {"mul"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors.size(), 1);
EXPECT_EQ(tensors[0].flat<float>()(0), 3.0f);
EXPECT_EQ(tensors[0].flat<float>()(1), 8.0f);
}
TEST_F(GrapplerTestTest, EvaluateNodesInvalidFetch) {
EnableAllOptimizers();
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("c"), {1.0f, 2.0f}, {1, 2});
Output b = ops::Const(s.WithOpName("d"), {3.0f, 4.0f}, {1, 2});
Output mul = ops::Mul(s.WithOpName("mul"), a, b);
GrapplerItem item;
item.fetch = {"no_such_node"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
EXPECT_DEATH(EvaluateNodes(item.graph, item.fetch),
"Tensor no_such_node:0, specified in either "
"feed_devices or fetch_devices was not found in the Graph");
}
}
}
} |
1,249 | cpp | tensorflow/tensorflow | grappler | tensorflow/c/experimental/grappler/grappler.cc | tensorflow/core/grappler/utils/grappler_test.cc | #ifndef TENSORFLOW_C_EXPERIMENTAL_GRAPPLER_GRAPPLER_H_
#define TENSORFLOW_C_EXPERIMENTAL_GRAPPLER_GRAPPLER_H_
#include <stddef.h>
#include <stdint.h>
#include "tensorflow/c/c_api.h"
#include "tensorflow/c/c_api_macros.h"
#include "tensorflow/c/tf_status.h"
#define GO_MAJOR 0
#define GO_MINOR 0
#define GO_PATCH 1
#ifdef __cplusplus
extern "C" {
#endif
typedef enum TF_TriState {
TF_TriState_Default = 0,
TF_TriState_Off,
TF_TriState_On,
} TF_TriState;
typedef struct TF_GrapplerItem TF_GrapplerItem;
typedef struct TP_OptimizerConfigs {
size_t struct_size;
void* ext;
TF_TriState disable_model_pruning;
TF_TriState implementation_selector;
TF_TriState function_optimization;
TF_TriState common_subgraph_elimination;
TF_TriState arithmetic_optimization;
TF_TriState debug_stripper;
TF_TriState constant_folding;
TF_TriState shape_optimization;
TF_TriState auto_mixed_precision;
TF_TriState auto_mixed_precision_onednn_bfloat16;
TF_TriState auto_mixed_precision_mkl;
TF_TriState pin_to_host_optimization;
TF_TriState layout_optimizer;
TF_TriState remapping;
TF_TriState loop_optimization;
TF_TriState dependency_optimization;
TF_TriState auto_parallel;
TF_TriState memory_optimization;
TF_TriState scoped_allocator_optimization;
} TP_OptimizerConfigs;
#define TP_OPTIMIZER_CONFIGS_STRUCT_SIZE \
TF_OFFSET_OF_END(TP_OptimizerConfigs, scoped_allocator_optimization)
typedef struct TP_Optimizer {
size_t struct_size;
void* ext;
void* (*create_func)();
void (*optimize_func)(void*, const TF_Buffer*, const TF_GrapplerItem*,
TF_Buffer*, TF_Status*);
void (*destroy_func)(void*);
} TP_Optimizer;
#define TP_OPTIMIZER_STRUCT_SIZE TF_OFFSET_OF_END(TP_Optimizer, destroy_func)
typedef struct TP_OptimizerRegistrationParams {
size_t struct_size;
void* ext;
int32_t major_version;
int32_t minor_version;
int32_t patch_version;
const char* device_type;
TP_OptimizerConfigs* optimizer_configs;
TP_Optimizer* optimizer;
} TP_OptimizerRegistrationParams;
#define TP_OPTIMIZER_REGISTRATION_PARAMS_STRUCT_SIZE \
TF_OFFSET_OF_END(TP_OptimizerRegistrationParams, optimizer)
void TF_InitGraph(TP_OptimizerRegistrationParams* params, TF_Status* status);
TF_CAPI_EXPORT extern void TF_GetNodesToPreserveListSize(
const TF_GrapplerItem* item, int* num_values, size_t* storage_size,
TF_Status* status);
TF_CAPI_EXPORT extern void TF_GetNodesToPreserveList(
const TF_GrapplerItem* item, char** values, size_t* lengths, int num_values,
void* storage, size_t storage_size, TF_Status* status);
TF_CAPI_EXPORT extern void TF_GetFetchNodesListSize(const TF_GrapplerItem* item,
int* num_values,
size_t* storage_size,
TF_Status* status);
TF_CAPI_EXPORT extern void TF_GetFetchNodesList(const TF_GrapplerItem* item,
char** values, size_t* lengths,
int num_values, void* storage,
size_t storage_size,
TF_Status* status);
typedef struct TF_GraphProperties TF_GraphProperties;
TF_CAPI_EXPORT extern TF_GraphProperties* TF_NewGraphProperties(
const TF_GrapplerItem* item);
TF_CAPI_EXPORT extern void TF_DeleteGraphProperties(
TF_GraphProperties* graph_properties);
TF_CAPI_EXPORT extern void TF_InferStatically(
TF_GraphProperties* graph_properties, TF_Bool assume_valid_feeds,
TF_Bool aggressive_shape_inference, TF_Bool include_input_tensor_values,
TF_Bool include_output_tensor_values, TF_Status* s);
TF_CAPI_EXPORT extern void TF_GetInputPropertiesListSize(
TF_GraphProperties* graph_properties, const char* name, int* num_values,
TF_Status* status);
TF_CAPI_EXPORT extern void TF_GetOutputPropertiesListSize(
TF_GraphProperties* graph_properties, const char* name, int* num_values,
TF_Status* status);
TF_CAPI_EXPORT extern void TF_GetInputPropertiesList(
TF_GraphProperties* graph_properties, const char* name,
TF_Buffer** properties, int num_values, TF_Status* status);
TF_CAPI_EXPORT extern void TF_GetOutputPropertiesList(
TF_GraphProperties* graph_properties, const char* name,
TF_Buffer** properties, int num_values, TF_Status* status);
typedef struct TF_FunctionLibraryDefinition TF_FunctionLibraryDefinition;
TF_CAPI_EXPORT extern TF_FunctionLibraryDefinition*
TF_NewFunctionLibraryDefinition(const TF_Buffer* graph_buf, TF_Status* status);
TF_CAPI_EXPORT extern void TF_DeleteFunctionLibraryDefinition(
TF_FunctionLibraryDefinition* fn_lib);
TF_CAPI_EXPORT extern void TF_LookUpOpDef(TF_FunctionLibraryDefinition* fn_lib,
const char* name, TF_Buffer* buf,
TF_Status* s);
#ifdef __cplusplus
}
#endif
#endif
#include "tensorflow/c/experimental/grappler/grappler.h"
#include <algorithm>
#include <cstddef>
#include <cstring>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/c/c_api_macros.h"
#include "tensorflow/c/experimental/grappler/grappler_internal.h"
#include "tensorflow/c/tf_buffer.h"
#include "tensorflow/c/tf_buffer_internal.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/costs/op_performance_data.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
namespace {
#define VALIDATE_STRUCT_SIZE(STRUCT_NAME, STRUCT_OBJ, SIZE_VALUE_NAME) \
do { \
if (STRUCT_OBJ.struct_size == 0) { \
return absl::Status(absl::StatusCode::kFailedPrecondition, \
"struct_size field in " #STRUCT_NAME \
" must be set to " #SIZE_VALUE_NAME "."); \
} \
} while (0)
#define VALIDATE_MEMBER(STRUCT_NAME, STRUCT_OBJ, NAME) \
do { \
if (STRUCT_OBJ.NAME == 0) { \
return absl::Status(absl::StatusCode::kFailedPrecondition, \
"'" #NAME "' field in " #STRUCT_NAME \
" must be set."); \
} \
} while (0)
absl::Status ValidateTPOptimizerRegistrationParams(
const TP_OptimizerRegistrationParams& params) {
VALIDATE_STRUCT_SIZE(TP_OptimizerRegistrationParams, params,
TP_OPTIMIZER_REGISTRATION_PARAMS_STRUCT_SIZE);
VALIDATE_MEMBER(TP_OptimizerRegistrationParams, params, device_type);
return absl::OkStatus();
}
absl::Status ValidateTPOptimizer(const TP_Optimizer& optimizer) {
VALIDATE_STRUCT_SIZE(TP_Optimizer, optimizer, TP_OPTIMIZER_STRUCT_SIZE);
VALIDATE_MEMBER(TP_Optimizer, optimizer, optimize_func);
return absl::OkStatus();
}
absl::Status ValidateTPOptimizerConfigs(const TP_OptimizerConfigs& configs) {
VALIDATE_STRUCT_SIZE(TP_OptimizerConfigs, configs,
TP_OPTIMIZER_CONFIGS_STRUCT_SIZE);
return absl::OkStatus();
}
#undef VALIDATE_MEMBER
#undef VALIDATE_STRUCT_SIZE
}
namespace tensorflow {
namespace grappler {
Status CGraphOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph_def) {
OwnedTFStatus c_status(TF_NewStatus());
OwnedTFBuffer graph_buf(TF_NewBuffer());
OwnedTFBuffer optimized_graph_buf(TF_NewBuffer());
TF_RETURN_IF_ERROR(MessageToBuffer(item.graph, graph_buf.get()));
optimizer_.optimize_func(c_optimizer_, graph_buf.get(),
reinterpret_cast<const TF_GrapplerItem*>(&item),
optimized_graph_buf.get(), c_status.get());
TF_RETURN_IF_ERROR(tsl::StatusFromTF_Status(c_status.get()));
TF_RETURN_IF_ERROR(
BufferToMessage(optimized_graph_buf.get(), optimized_graph_def));
return absl::OkStatus();
}
#define CONFIG_TOGGLE(optimizer) \
if (tp_configs.optimizer == TF_TriState_Off) \
configs.toggle_config[#optimizer] = RewriterConfig::OFF; \
else \
configs.toggle_config[#optimizer] = RewriterConfig::ON;
void CGraphOptimizerRegister(
const PluginGraphOptimizerRegistry::Creator& creator,
const TP_OptimizerConfigs tp_configs, const char* device_type) {
ConfigList configs;
if (tp_configs.disable_model_pruning == TF_TriState_On)
configs.disable_model_pruning = true;
else
configs.disable_model_pruning = false;
CONFIG_TOGGLE(implementation_selector);
CONFIG_TOGGLE(function_optimization);
CONFIG_TOGGLE(common_subgraph_elimination);
CONFIG_TOGGLE(arithmetic_optimization);
CONFIG_TOGGLE(debug_stripper);
CONFIG_TOGGLE(constant_folding);
CONFIG_TOGGLE(shape_optimization);
CONFIG_TOGGLE(auto_mixed_precision);
CONFIG_TOGGLE(auto_mixed_precision_onednn_bfloat16);
CONFIG_TOGGLE(auto_mixed_precision_mkl);
CONFIG_TOGGLE(pin_to_host_optimization);
CONFIG_TOGGLE(layout_optimizer);
CONFIG_TOGGLE(remapping);
CONFIG_TOGGLE(loop_optimization);
CONFIG_TOGGLE(dependency_optimization);
CONFIG_TOGGLE(auto_parallel);
CONFIG_TOGGLE(memory_optimization);
CONFIG_TOGGLE(scoped_allocator_optimization);
PluginGraphOptimizerRegistry::RegisterPluginOptimizerOrDie(
creator, device_type, configs);
}
#undef CONFIG_TOGGLE
absl::Status InitGraphPlugin(void* dso_handle) {
tsl::Env* env = tsl::Env::Default();
void* dso_symbol;
TF_RETURN_IF_ERROR(
env->GetSymbolFromLibrary(dso_handle, "TF_InitGraph", &dso_symbol));
auto init_fn = reinterpret_cast<TFInitGraphPluginFn>(dso_symbol);
return InitGraphPlugin(init_fn);
}
absl::Status InitGraphPlugin(TFInitGraphPluginFn init_fn) {
TP_OptimizerRegistrationParams params{
TP_OPTIMIZER_REGISTRATION_PARAMS_STRUCT_SIZE};
TP_Optimizer optimizer{TP_OPTIMIZER_STRUCT_SIZE};
TP_OptimizerConfigs optimizer_configs{TP_OPTIMIZER_CONFIGS_STRUCT_SIZE};
params.major_version = GO_MAJOR;
params.minor_version = GO_MINOR;
params.patch_version = GO_PATCH;
params.optimizer = &optimizer;
params.optimizer_configs = &optimizer_configs;
OwnedTFStatus c_status(TF_NewStatus());
init_fn(¶ms, c_status.get());
TF_RETURN_IF_ERROR(tsl::StatusFromTF_Status(c_status.get()));
TF_RETURN_IF_ERROR(ValidateTPOptimizerRegistrationParams(params));
TF_RETURN_IF_ERROR(ValidateTPOptimizer(optimizer));
TF_RETURN_IF_ERROR(ValidateTPOptimizerConfigs(optimizer_configs));
CGraphOptimizerRegister(
[=]() { return new CGraphOptimizer(optimizer, params.device_type); },
optimizer_configs, params.device_type);
return absl::OkStatus();
}
}
}
void TF_GetNodesToPreserveListSize(const TF_GrapplerItem* item, int* num_values,
size_t* storage_size, TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
const std::unordered_set<std::string>& nodes =
reinterpret_cast<const tensorflow::grappler::GrapplerItem*>(item)
->NodesToPreserve();
*num_values = nodes.size();
*storage_size = 0;
for (const std::string& str : nodes) {
*storage_size += str.size();
}
}
void TF_GetNodesToPreserveList(const TF_GrapplerItem* item, char** values,
size_t* lengths, int num_values, void* storage,
size_t storage_size, TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
const std::unordered_set<std::string>& nodes =
reinterpret_cast<const tensorflow::grappler::GrapplerItem*>(item)
->NodesToPreserve();
char* p = static_cast<char*>(storage);
int index = 0;
for (const std::string& s : nodes) {
if (index >= num_values) break;
values[index] = p;
lengths[index] = s.size();
if ((p + s.size()) > (static_cast<char*>(storage) + storage_size)) {
tsl::Set_TF_Status_from_Status(
status,
absl::InvalidArgumentError(
"Not enough storage to hold the requested list of nodes"));
return;
}
memcpy(values[index], s.data(), s.size());
p += s.size();
index++;
}
}
void TF_GetFetchNodesListSize(const TF_GrapplerItem* item, int* num_values,
size_t* storage_size, TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
const std::vector<std::string>& nodes =
reinterpret_cast<const tensorflow::grappler::GrapplerItem*>(item)->fetch;
*num_values = nodes.size();
*storage_size = 0;
for (const std::string& str : nodes) {
*storage_size += str.size();
}
}
void TF_GetFetchNodesList(const TF_GrapplerItem* item, char** values,
size_t* lengths, int num_values, void* storage,
size_t storage_size, TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
const std::vector<std::string>& nodes =
reinterpret_cast<const tensorflow::grappler::GrapplerItem*>(item)->fetch;
const int len = std::min(num_values, static_cast<int>(nodes.size()));
char* p = static_cast<char*>(storage);
for (int index = 0; index < len; ++index) {
const std::string& s = nodes[index];
values[index] = p;
lengths[index] = s.size();
if ((p + s.size()) > (static_cast<char*>(storage) + storage_size)) {
tsl::Set_TF_Status_from_Status(
status,
absl::InvalidArgumentError(
"Not enough storage to hold the requested list of nodes"));
return;
}
memcpy(values[index], s.data(), s.size());
p += s.size();
}
}
TF_GraphProperties* TF_NewGraphProperties(const TF_GrapplerItem* item) {
return reinterpret_cast<TF_GraphProperties*>(
new tensorflow::grappler::GraphProperties(
*reinterpret_cast<const tensorflow::grappler::GrapplerItem*>(item)));
}
void TF_DeleteGraphProperties(TF_GraphProperties* graph_properties) {
if (graph_properties == nullptr) return;
delete reinterpret_cast<tensorflow::grappler::GraphProperties*>(
graph_properties);
}
void TF_InferStatically(TF_GraphProperties* graph_properties,
TF_Bool assume_valid_feeds,
TF_Bool aggressive_shape_inference,
TF_Bool include_input_tensor_values,
TF_Bool include_output_tensor_values,
TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
absl::Status s =
reinterpret_cast<tensorflow::grappler::GraphProperties*>(graph_properties)
->InferStatically(assume_valid_feeds, aggressive_shape_inference,
include_input_tensor_values,
include_output_tensor_values);
if (!s.ok()) {
tsl::Set_TF_Status_from_Status(status, s);
}
}
void TF_GetInputPropertiesListSize(TF_GraphProperties* graph_properties,
const char* name, int* num_values,
TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
*num_values =
reinterpret_cast<tensorflow::grappler::GraphProperties*>(graph_properties)
->GetInputProperties(name)
.size();
}
void TF_GetOutputPropertiesListSize(TF_GraphProperties* graph_properties,
const char* name, int* num_values,
TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
*num_values =
reinterpret_cast<tensorflow::grappler::GraphProperties*>(graph_properties)
->GetOutputProperties(name)
.size();
}
void TF_GetInputPropertiesList(TF_GraphProperties* graph_properties,
const char* name, TF_Buffer** properties,
int num_values, TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
const std::vector<tensorflow::OpInfo::TensorProperties>& tensor_properties =
reinterpret_cast<tensorflow::grappler::GraphProperties*>(graph_properties)
->GetInputProperties(name);
const int len =
std::min(num_values, static_cast<int>(tensor_properties.size()));
for (int i = 0; i < len; ++i) {
absl::Status s =
tensorflow::MessageToBuffer(tensor_properties[i], properties[i]);
if (!s.ok()) {
tsl::Set_TF_Status_from_Status(status, s);
return;
}
}
}
void TF_GetOutputPropertiesList(TF_GraphProperties* graph_properties,
const char* name, TF_Buffer** properties,
int num_values, TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
const std::vector<tensorflow::OpInfo::TensorProperties>& tensor_properties =
reinterpret_cast<tensorflow::grappler::GraphProperties*>(graph_properties)
->GetOutputProperties(name);
const int len =
std::min(num_values, static_cast<int>(tensor_properties.size()));
for (int i = 0; i < len; ++i) {
absl::Status s =
tensorflow::MessageToBuffer(tensor_properties[i], properties[i]);
if (!s.ok()) {
tsl::Set_TF_Status_from_Status(status, s);
return;
}
}
}
TF_FunctionLibraryDefinition* TF_NewFunctionLibraryDefinition(
const TF_Buffer* graph_buf, TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
tensorflow::GraphDef graph_def;
absl::Status s = tensorflow::BufferToMessage(graph_buf, &graph_def);
if (!s.ok()) {
tsl::Set_TF_Status_from_Status(status, s);
return nullptr;
}
return reinterpret_cast<TF_FunctionLibraryDefinition*>(
new tensorflow::FunctionLibraryDefinition(
tensorflow::OpRegistry::Global(), graph_def.library()));
}
void TF_DeleteFunctionLibraryDefinition(TF_FunctionLibraryDefinition* fn_lib) {
if (fn_lib == nullptr) return;
delete reinterpret_cast<tensorflow::FunctionLibraryDefinition*>(fn_lib);
}
void TF_LookUpOpDef(TF_FunctionLibraryDefinition* fn_lib, const char* name,
TF_Buffer* buf, TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
const tensorflow::OpDef* op_def_ptr = nullptr;
absl::Status s =
reinterpret_cast<tensorflow::FunctionLibraryDefinition*>(fn_lib)
->LookUpOpDef(name, &op_def_ptr);
if (!s.ok()) {
tsl::Set_TF_Status_from_Status(status, s);
return;
}
s = tensorflow::MessageToBuffer(*op_def_ptr, buf);
if (!s.ok()) {
tsl::Set_TF_Status_from_Status(status, s);
return;
}
} | #include "tensorflow/core/grappler/utils/grappler_test.h"
#include <memory>
#include "absl/algorithm/container.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tensorflow/core/public/session.h"
namespace tensorflow {
namespace grappler {
namespace {
void CompareGraphNodes(protobuf::RepeatedPtrField<NodeDef>* want,
protobuf::RepeatedPtrField<NodeDef>* got) {
auto comparator = [](const NodeDef& n1, const NodeDef& n2) -> bool {
return n1.name() < n2.name();
};
std::sort(want->begin(), want->end(), comparator);
std::sort(got->begin(), got->end(), comparator);
ASSERT_EQ(want->size(), got->size());
for (int i = 0; i < want->size(); ++i) {
NodeDef& want_node = (*want)[i];
NodeDef& got_node = (*got)[i];
EXPECT_EQ(want_node.op(), got_node.op());
EXPECT_EQ(want_node.name(), got_node.name());
EXPECT_EQ(want_node.device(), got_node.device());
ASSERT_EQ(want_node.input_size(), got_node.input_size())
<< "want_node =\n"
<< want_node.DebugString() << "\ngot_node =\n"
<< got_node.DebugString();
const auto is_control = [](const string& input) -> bool {
return ParseTensorName(input).index() < 0;
};
auto want_inputs = want_node.mutable_input();
auto got_inputs = got_node.mutable_input();
std::sort(absl::c_find_if(*want_inputs, is_control), want_inputs->end());
std::sort(absl::c_find_if(*got_inputs, is_control), got_inputs->end());
for (int j = 0; j < want_node.input_size(); ++j) {
const TensorId want_tensor = ParseTensorName(want_node.input(j));
const TensorId got_tensor = ParseTensorName(got_node.input(j));
EXPECT_EQ(want_tensor.ToString(), got_tensor.ToString());
}
}
}
void SetAllOptimizers(RewriterConfig* cfg, RewriterConfig::Toggle value) {
cfg->set_arithmetic_optimization(value);
cfg->set_auto_mixed_precision(value);
cfg->set_auto_mixed_precision_onednn_bfloat16(value);
cfg->set_common_subgraph_elimination(value);
cfg->set_constant_folding(value);
cfg->set_debug_stripper(value);
cfg->set_dependency_optimization(value);
cfg->set_function_optimization(value);
cfg->set_implementation_selector(value);
cfg->set_layout_optimizer(value);
cfg->set_loop_optimization(value);
cfg->set_pin_to_host_optimization(value);
cfg->set_remapping(value);
cfg->set_scoped_allocator_optimization(value);
cfg->set_shape_optimization(value);
}
}
GrapplerTest::GrapplerTest() {
DisableAllOptimizers();
}
void GrapplerTest::DisableAllOptimizers() {
SetAllOptimizers(
options_.config.mutable_graph_options()->mutable_rewrite_options(),
RewriterConfig::OFF);
}
void GrapplerTest::EnableAllOptimizers() {
SetAllOptimizers(
options_.config.mutable_graph_options()->mutable_rewrite_options(),
RewriterConfig::ON);
}
std::vector<Tensor> GrapplerTest::EvaluateNodes(
const GraphDef& graph, const std::vector<string>& node_names) const {
return EvaluateNodes(graph, node_names, {});
}
std::vector<Tensor> GrapplerTest::EvaluateNodes(
const GraphDef& graph, const std::vector<string>& node_names,
const std::vector<std::pair<string, Tensor>>& inputs) const {
std::unique_ptr<tensorflow::Session> session(NewSession(options_));
TF_CHECK_OK(session->Create(graph));
RunOptions run_options;
std::vector<Tensor> output_tensors;
TF_CHECK_OK(session->Run(run_options, inputs, node_names, node_names,
&output_tensors, nullptr));
TF_CHECK_OK(session->Close());
return output_tensors;
}
std::vector<Tensor> GrapplerTest::EvaluateFetchNodes(
const GrapplerItem& item) const {
std::unique_ptr<tensorflow::Session> session(NewSession(options_));
TF_CHECK_OK(session->Create(item.graph));
RunOptions run_options;
if (!item.init_ops.empty()) {
std::vector<Tensor> dummy;
TF_CHECK_OK(
session->Run(run_options, {}, {}, item.init_ops, &dummy, nullptr));
}
std::vector<Tensor> output_tensors;
TF_CHECK_OK(session->Run(run_options, item.feed, item.fetch, {},
&output_tensors, nullptr));
TF_CHECK_OK(session->Close());
return output_tensors;
}
NodeDef* GrapplerTest::AddNode(
const string& name, const string& op, const std::vector<string>& inputs,
const std::vector<std::pair<string, AttrValue>>& attributes,
GraphDef* graph) const {
NodeDef* node = graph->add_node();
node->set_name(name);
node->set_op(op);
for (const string& input : inputs) {
node->add_input(input);
}
for (auto attr : attributes) {
(*node->mutable_attr())[attr.first] = attr.second;
}
return node;
}
void GrapplerTest::CompareGraphs(GraphDef want, GraphDef got) const {
CompareGraphNodes(want.mutable_node(), got.mutable_node());
}
void GrapplerTest::CompareFunctions(FunctionDef want, FunctionDef got) const {
CompareGraphNodes(want.mutable_node_def(), got.mutable_node_def());
}
void GrapplerTest::CompareNodes(const NodeDef& want, const NodeDef& got) const {
EXPECT_EQ(want.name(), got.name());
EXPECT_EQ(want.op(), got.op());
std::vector<string> want_inputs(want.input().begin(), want.input().end());
std::vector<string> got_inputs(got.input().begin(), got.input().end());
EXPECT_EQ(want_inputs, got_inputs);
const auto attr_name = [](const std::pair<const string, AttrValue>& attr) {
return attr.first;
};
std::vector<string> want_attrs;
std::vector<string> got_attrs;
absl::c_transform(want.attr(), std::back_inserter(want_attrs), attr_name);
absl::c_transform(got.attr(), std::back_inserter(got_attrs), attr_name);
absl::c_sort(want_attrs);
absl::c_sort(got_attrs);
EXPECT_EQ(want_attrs, got_attrs);
for (const string& attr : want_attrs) {
EXPECT_TRUE(AreAttrValuesEqual(want.attr().at(attr), got.attr().at(attr)));
}
}
bool GrapplerTest::IsNodesDirectlyConnected(const NodeMap& node_map,
const string& src,
const string& dst, int position) {
const NodeDef* src_node = node_map.GetNode(src);
const NodeDef* dst_node = node_map.GetNode(dst);
EXPECT_TRUE(src_node != nullptr) << src << " node not found";
EXPECT_TRUE(dst_node != nullptr) << dst << " node not found";
return src_node && dst_node && dst_node->input(position) == src_node->name();
}
int GrapplerTest::CountOpNodes(const GraphDef& graph, const string& op) {
return std::count_if(graph.node().begin(), graph.node().end(),
[&op](const NodeDef& node) { return node.op() == op; });
}
}
} |
1,250 | cpp | tensorflow/tensorflow | stream_executor | tensorflow/c/experimental/stream_executor/stream_executor.cc | third_party/xla/xla/stream_executor/stream_executor_test.cc | #include "absl/functional/any_invocable.h"
#include "absl/log/log.h"
#ifndef XLA_STREAM_EXECUTOR_STREAM_EXECUTOR_H_
#define XLA_STREAM_EXECUTOR_STREAM_EXECUTOR_H_
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <variant>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/stream_executor/allocator_stats.h"
#include "xla/stream_executor/blas.h"
#include "xla/stream_executor/command_buffer.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/event.h"
#include "xla/stream_executor/fft.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/kernel_spec.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/memory_allocation.h"
#include "xla/stream_executor/module_spec.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/stream.h"
namespace stream_executor {
class StreamExecutor {
public:
virtual ~StreamExecutor() = default;
virtual const Platform* GetPlatform() const = 0;
virtual absl::Status Init() = 0;
virtual int device_ordinal() const { return -1; }
virtual absl::StatusOr<std::unique_ptr<Stream>> CreateStream(
std::optional<std::variant<StreamPriority, int>> priority =
std::nullopt) = 0;
virtual absl::StatusOr<std::unique_ptr<Event>> CreateEvent() = 0;
virtual const DeviceDescription& GetDeviceDescription() const = 0;
template <typename T>
DeviceMemory<T> AllocateArray(uint64_t element_count,
int64_t memory_space = 0);
template <typename T>
DeviceMemory<T> AllocateScalar() {
return AllocateArray<T>(1);
}
virtual absl::Status GetKernel(const MultiKernelLoaderSpec& spec,
Kernel* kernel) {
return absl::UnimplementedError("Not Implemented");
}
virtual bool UnloadModule(ModuleHandle module_handle) { return false; }
virtual absl::Status LoadModule(const MultiModuleLoaderSpec& spec,
ModuleHandle* module_handle) {
return absl::UnimplementedError("Not Implemented");
}
virtual absl::StatusOr<std::shared_ptr<DeviceMemoryBase>>
CreateOrShareConstant(Stream* stream, absl::Span<const uint8_t> content) {
return absl::UnimplementedError("Not Implemented");
}
virtual absl::Status Launch(Stream* stream, const ThreadDim& thread_dims,
const BlockDim& block_dims, const Kernel& k,
const KernelArgs& args) {
return absl::UnimplementedError("Not Implemented");
}
virtual absl::Status Launch(Stream* stream, const ThreadDim& thread_dims,
const BlockDim& block_dims,
const ClusterDim& cluster_dims, const Kernel& k,
const KernelArgs& args) {
return absl::UnimplementedError("Not Implemented");
}
virtual absl::Status Submit(Stream* stream,
const CommandBuffer& command_buffer) {
return absl::UnimplementedError("Not Implemented");
}
virtual void UnloadKernel(const Kernel* kernel) {}
virtual DeviceMemoryBase Allocate(uint64_t size, int64_t memory_space) = 0;
DeviceMemoryBase Allocate(uint64_t size) {
return Allocate(size, 0);
}
virtual void Deallocate(DeviceMemoryBase* mem) = 0;
virtual void* UnifiedMemoryAllocate(uint64_t size) { return nullptr; }
virtual void UnifiedMemoryDeallocate(void* mem) {}
virtual absl::StatusOr<void*> CollectiveMemoryAllocate(uint64_t size) {
return absl::UnimplementedError("Not implemented");
}
virtual absl::Status CollectiveMemoryDeallocate(void* mem) {
return absl::UnimplementedError("Not implemented");
}
virtual absl::StatusOr<std::unique_ptr<MemoryAllocation>> HostMemoryAllocate(
uint64_t size) = 0;
virtual void HostMemoryDeallocate(void* mem) = 0;
virtual bool SynchronizeAllActivity() = 0;
virtual absl::Status SynchronousMemZero(DeviceMemoryBase* location,
uint64_t size) = 0;
virtual absl::Status SynchronousMemcpy(DeviceMemoryBase* device_dst,
const void* host_src,
uint64_t size) = 0;
absl::Status SynchronousMemcpyH2D(const void* host_src, int64_t size,
DeviceMemoryBase* device_dst) {
return SynchronousMemcpy(device_dst, host_src, size);
}
virtual absl::Status SynchronousMemcpy(void* host_dst,
const DeviceMemoryBase& device_src,
uint64_t size) = 0;
absl::Status SynchronousMemcpyD2H(const DeviceMemoryBase& device_src,
int64_t size, void* host_dst) {
return SynchronousMemcpy(host_dst, device_src, size);
}
virtual absl::Status Memset(Stream* stream, DeviceMemoryBase* location,
uint8_t pattern, uint64_t size) {
return absl::InternalError("Not implemented");
}
virtual bool HostCallback(Stream* stream,
absl::AnyInvocable<absl::Status() &&> callback) = 0;
virtual void DeallocateStream(Stream* stream) = 0;
virtual absl::Status BlockHostUntilDone(Stream* stream) = 0;
virtual absl::Status EnablePeerAccessTo(StreamExecutor* other) = 0;
virtual bool CanEnablePeerAccessTo(StreamExecutor* other) = 0;
virtual bool DeviceMemoryUsage(int64_t* free, int64_t* total) const {
return false;
}
virtual absl::StatusOr<DeviceMemoryBase> GetSymbol(
const std::string& symbol_name, ModuleHandle module_handle) {
return absl::UnimplementedError("Not implemented");
}
virtual absl::StatusOr<std::unique_ptr<DeviceDescription>>
CreateDeviceDescription() const = 0;
virtual blas::BlasSupport* AsBlas() { return nullptr; }
virtual fft::FftSupport* AsFft() { return nullptr; }
virtual dnn::DnnSupport* AsDnn() { return nullptr; }
virtual absl::StatusOr<std::unique_ptr<Kernel>> CreateKernel() {
return absl::UnimplementedError("Kernels are not implemented");
}
virtual absl::StatusOr<std::unique_ptr<CommandBuffer>> CreateCommandBuffer(
CommandBuffer::Mode mode) {
return absl::UnimplementedError("Command buffers are not implemented");
}
virtual std::optional<AllocatorStats> GetAllocatorStats() {
return std::nullopt;
}
virtual bool ClearAllocatorStats() { return false; }
virtual absl::Status FlushCompilationCache() { return absl::OkStatus(); }
virtual Stream* FindAllocatedStream(void* device_stream) { return nullptr; }
virtual int64_t GetMemoryLimitBytes() const = 0;
struct GemmCallTrace {
enum class GemmType {
kPlain = 0,
kStridedBatched = 1,
kBatched = 2,
kBlasLt = 3
};
GemmType op;
int flags;
uint64_t size1, size2;
};
using ApiTrace = std::variant<GemmCallTrace>;
virtual absl::StatusOr<std::vector<ApiTrace>> ExtractApiTrace() {
return absl::UnimplementedError("Not implemented");
}
virtual absl::Status RecordApiTrace(ApiTrace call) {
return absl::UnimplementedError("Not implemented");
}
static constexpr uint64_t kLogGemm = 1 << 0;
virtual bool SetArgumentLoggingMode(uint64_t mode) { return false; }
};
template <typename T>
inline DeviceMemory<T> StreamExecutor::AllocateArray(uint64_t element_count,
int64_t memory_space) {
uint64_t bytes = sizeof(T) * element_count;
auto memory_limit_bytes = GetMemoryLimitBytes();
if (memory_limit_bytes > 0 &&
static_cast<int64_t>(bytes) > memory_limit_bytes) {
LOG(WARNING) << "Not enough memory to allocate " << bytes << " on device "
<< device_ordinal()
<< " within provided limit. limit=" << memory_limit_bytes
<< "]";
return DeviceMemory<T>();
}
return DeviceMemory<T>(Allocate(bytes, memory_space));
}
}
#endif
#include "tensorflow/c/experimental/stream_executor/stream_executor.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "tensorflow/c/c_api_macros.h"
#include "tensorflow/c/c_api_macros_internal.h"
#include "tensorflow/c/experimental/stream_executor/stream_executor_internal.h"
#include "tensorflow/c/tf_status_helper.h"
#include "xla/stream_executor/executor_cache.h"
#include "xla/stream_executor/host_memory_allocation.h"
#include "xla/stream_executor/memory_allocation.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/stream_executor/stream_executor_common.h"
#include "tensorflow/core/common_runtime/device/device_utils.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tsl/platform/status.h"
using tensorflow::StatusFromTF_Status;
namespace stream_executor {
using tensorflow::StringPiece;
using OwnedTFStatus = tensorflow::TF_StatusPtr;
namespace {
absl::Status ValidateSPPlatform(const SP_Platform& platform) {
TF_VALIDATE_STRUCT_SIZE(SP_Platform, platform, SP_PLATFORM_STRUCT_SIZE);
TF_VALIDATE_NOT_NULL(SP_Platform, platform, name);
TF_VALIDATE_NOT_NULL(SP_Platform, platform, type);
TF_RETURN_IF_ERROR(
tensorflow::device_utils::ValidateDeviceType(platform.name));
TF_RETURN_IF_ERROR(
tensorflow::device_utils::ValidateDeviceType(platform.type));
return absl::OkStatus();
}
absl::Status ValidateSPPlatformFns(const SP_PlatformFns& platform_fns) {
TF_VALIDATE_STRUCT_SIZE(SP_PlatformFns, platform_fns,
SP_PLATFORM_FNS_STRUCT_SIZE);
TF_VALIDATE_NOT_NULL(SP_PlatformFns, platform_fns, create_device);
TF_VALIDATE_NOT_NULL(SP_PlatformFns, platform_fns, destroy_device);
TF_VALIDATE_NOT_NULL(SP_PlatformFns, platform_fns, create_stream_executor);
TF_VALIDATE_NOT_NULL(SP_PlatformFns, platform_fns, destroy_stream_executor);
TF_VALIDATE_NOT_NULL(SP_PlatformFns, platform_fns, create_device_fns);
TF_VALIDATE_NOT_NULL(SP_PlatformFns, platform_fns, destroy_device_fns);
return absl::OkStatus();
}
absl::Status ValidateSPAllocatorStats(const SP_AllocatorStats& stats) {
TF_VALIDATE_STRUCT_SIZE(SP_AllocatorStats, stats,
SP_ALLOCATORSTATS_STRUCT_SIZE);
return absl::OkStatus();
}
absl::Status ValidateSPDeviceMemoryBase(const SP_DeviceMemoryBase& mem) {
TF_VALIDATE_STRUCT_SIZE(SP_DeviceMemoryBase, mem,
SP_DEVICE_MEMORY_BASE_STRUCT_SIZE);
return absl::OkStatus();
}
absl::Status ValidateSPDevice(const SP_Device& device) {
TF_VALIDATE_STRUCT_SIZE(SP_Device, device, SP_DEVICE_STRUCT_SIZE);
return absl::OkStatus();
}
absl::Status ValidateSPDeviceFns(const SP_DeviceFns& device_fns) {
TF_VALIDATE_STRUCT_SIZE(SP_DeviceFns, device_fns, SP_DEVICE_FNS_STRUCT_SIZE);
return absl::OkStatus();
}
absl::Status ValidateSPStreamExecutor(const SP_StreamExecutor& se,
const SP_Platform& platform) {
TF_VALIDATE_STRUCT_SIZE(SP_StreamExecutor, se,
SP_STREAM_EXECUTOR_STRUCT_SIZE);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, allocate);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, deallocate);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, get_allocator_stats);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, host_memory_allocate);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, host_memory_deallocate);
if (platform.supports_unified_memory) {
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, unified_memory_allocate);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, unified_memory_deallocate);
}
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, device_memory_usage);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, create_stream);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, destroy_stream);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, create_stream_dependency);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, get_stream_status);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, create_event);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, destroy_event);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, get_event_status);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, record_event);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, wait_for_event);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, memcpy_dtoh);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, memcpy_htod);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, sync_memcpy_dtoh);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, sync_memcpy_htod);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, block_host_for_event);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, synchronize_all_activity);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, host_callback);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, mem_zero);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, memset);
TF_VALIDATE_NOT_NULL(SP_StreamExecutor, se, memset32);
return absl::OkStatus();
}
absl::Status ValidateSEPlatformRegistrationParams(
const SE_PlatformRegistrationParams& params) {
TF_VALIDATE_STRUCT_SIZE(SE_PlatformRegistrationParams, params,
SE_PLATFORM_REGISTRATION_PARAMS_STRUCT_SIZE);
TF_VALIDATE_NOT_NULL(SE_PlatformRegistrationParams, params, destroy_platform);
TF_VALIDATE_NOT_NULL(SE_PlatformRegistrationParams, params,
destroy_platform_fns);
return absl::OkStatus();
}
#undef TF_VALIDATE_NOT_NULL
DeviceMemoryBase DeviceMemoryBaseFromC(const SP_DeviceMemoryBase& mem) {
DeviceMemoryBase base(mem.opaque, mem.size);
base.SetPayload(mem.payload);
return base;
}
struct HostCallbackContext {
absl::AnyInvocable<absl::Status() &&> callback;
};
void HostCallbackTrampoline(void* ctx, TF_Status* status) {
HostCallbackContext* host_ctx = static_cast<HostCallbackContext*>(ctx);
absl::Status s = std::move(host_ctx->callback)();
tsl::Set_TF_Status_from_Status(status, s);
delete host_ctx;
}
class CStreamExecutor : public StreamExecutorCommon {
public:
explicit CStreamExecutor(Platform* se_platform, SP_Device device,
SP_DeviceFns* device_fns,
SP_StreamExecutor* stream_executor,
SP_Platform* platform, SP_PlatformFns* platform_fns,
SP_TimerFns* timer_fns, const std::string& name,
int visible_device_count)
: StreamExecutorCommon(se_platform),
device_(std::move(device)),
device_fns_(device_fns),
stream_executor_(stream_executor),
platform_(platform),
platform_fns_(platform_fns),
timer_fns_(timer_fns),
platform_name_(name),
visible_device_count_(visible_device_count) {}
~CStreamExecutor() override {
platform_fns_->destroy_device(platform_, &device_);
}
absl::Status Init() override { return absl::OkStatus(); }
DeviceMemoryBase Allocate(uint64 size, int64_t memory_space) override {
SP_DeviceMemoryBase mem = {SP_DEVICE_MEMORY_BASE_STRUCT_SIZE};
stream_executor_->allocate(&device_, size, memory_space, &mem);
absl::Status status = ValidateSPDeviceMemoryBase(mem);
if (!status.ok()) {
LOG(ERROR) << status.message();
}
return DeviceMemoryBaseFromC(mem);
}
DeviceMemoryBase Allocate(uint64 size) {
return Allocate(size, 0);
}
void Deallocate(DeviceMemoryBase* mem) override {
SP_DeviceMemoryBase device_memory_base = DeviceMemoryBaseToC(mem);
stream_executor_->deallocate(&device_, &device_memory_base);
}
absl::StatusOr<std::unique_ptr<MemoryAllocation>> HostMemoryAllocate(
uint64 size) override {
auto* buffer = stream_executor_->host_memory_allocate(&device_, size);
if (buffer == nullptr && size > 0) {
return absl::InternalError(
absl::StrFormat("Failed to allocate HostMemory of size %d", size));
}
return std::make_unique<HostMemoryAllocation>(buffer, size, this);
}
void HostMemoryDeallocate(void* mem) override {
stream_executor_->host_memory_deallocate(&device_, mem);
}
void* UnifiedMemoryAllocate(uint64 size) override {
CHECK(stream_executor_->unified_memory_allocate);
return stream_executor_->unified_memory_allocate(&device_, size);
}
void UnifiedMemoryDeallocate(void* mem) override {
CHECK(stream_executor_->unified_memory_deallocate);
stream_executor_->unified_memory_deallocate(&device_, mem);
}
absl::optional<AllocatorStats> GetAllocatorStats() override {
SP_AllocatorStats c_stats{SP_ALLOCATORSTATS_STRUCT_SIZE};
TF_Bool has_stats =
stream_executor_->get_allocator_stats(&device_, &c_stats);
if (!has_stats) {
return absl::nullopt;
}
absl::Status status = ValidateSPAllocatorStats(c_stats);
if (!status.ok()) {
LOG(ERROR) << status.message();
return absl::nullopt;
}
::stream_executor::AllocatorStats stats;
stats.num_allocs = c_stats.num_allocs;
stats.bytes_in_use = c_stats.bytes_in_use;
stats.peak_bytes_in_use = c_stats.peak_bytes_in_use;
stats.largest_alloc_size = c_stats.largest_alloc_size;
if (c_stats.has_bytes_limit) {
stats.bytes_limit = c_stats.bytes_limit;
}
stats.bytes_reserved = c_stats.bytes_reserved;
stats.peak_bytes_reserved = c_stats.peak_bytes_reserved;
if (c_stats.has_bytes_reservable_limit) {
stats.bytes_reservable_limit = c_stats.bytes_reservable_limit;
}
stats.largest_free_block_bytes = c_stats.largest_free_block_bytes;
return stats;
}
bool SynchronizeAllActivity() override {
OwnedTFStatus c_status(TF_NewStatus());
stream_executor_->synchronize_all_activity(&device_, c_status.get());
if (TF_GetCode(c_status.get()) != TF_OK) {
LOG(ERROR) << TF_Message(c_status.get());
return false;
}
return true;
}
absl::Status SynchronousMemZero(DeviceMemoryBase* location,
uint64 size) override {
return tsl::errors::Unimplemented(
"SynchronousMemZero is not supported by pluggable device.");
}
absl::Status SynchronousMemcpy(DeviceMemoryBase* gpu_dst,
const void* host_src, uint64 size) override {
OwnedTFStatus c_status(TF_NewStatus());
SP_DeviceMemoryBase device_memory_base = DeviceMemoryBaseToC(gpu_dst);
stream_executor_->sync_memcpy_htod(&device_, &device_memory_base, host_src,
size, c_status.get());
return StatusFromTF_Status(c_status.get());
}
absl::Status SynchronousMemcpy(void* host_dst,
const DeviceMemoryBase& gpu_src,
uint64 size) override {
OwnedTFStatus c_status(TF_NewStatus());
SP_DeviceMemoryBase device_memory_base = DeviceMemoryBaseToC(&gpu_src);
stream_executor_->sync_memcpy_dtoh(&device_, host_dst, &device_memory_base,
size, c_status.get());
return StatusFromTF_Status(c_status.get());
}
absl::Status Memset(Stream* stream, DeviceMemoryBase* location, uint8 pattern,
uint64 size) override {
OwnedTFStatus c_status(TF_NewStatus());
SP_Stream stream_handle = static_cast<CStream*>(stream)->Handle();
SP_DeviceMemoryBase device_mem = DeviceMemoryBaseToC(location);
stream_executor_->memset(&device_, stream_handle, &device_mem, pattern,
size, c_status.get());
return StatusFromTF_Status(c_status.get());
}
bool HostCallback(Stream* stream,
absl::AnyInvocable<absl::Status() &&> callback) override {
SP_Stream stream_handle = static_cast<CStream*>(stream)->Handle();
HostCallbackContext* ctx = new HostCallbackContext{std::move(callback)};
return stream_executor_->host_callback(&device_, stream_handle,
&HostCallbackTrampoline, ctx);
}
void DeallocateStream(Stream* stream) override {
static_cast<CStream*>(stream)->Destroy();
}
absl::Status BlockHostForEvent(Stream* stream, Event* event) {
OwnedTFStatus c_status(TF_NewStatus());
SP_Event event_handle = static_cast<CEvent*>(event)->Handle();
stream_executor_->block_host_for_event(&device_, event_handle,
c_status.get());
return StatusFromTF_Status(c_status.get());
}
absl::Status BlockHostUntilDone(Stream* stream) override {
OwnedTFStatus c_status(TF_NewStatus());
SP_Stream stream_handle = static_cast<CStream*>(stream)->Handle();
if (stream_executor_->block_host_until_done != nullptr) {
stream_executor_->block_host_until_done(&device_, stream_handle,
c_status.get());
return StatusFromTF_Status(c_status.get());
}
SP_Event event_handle;
stream_executor_->create_event(&device_, &event_handle, c_status.get());
TF_RETURN_IF_ERROR(StatusFromTF_Status(c_status.get()));
stream_executor_->record_event(&device_, stream_handle, event_handle,
c_status.get());
absl::Status s = StatusFromTF_Status(c_status.get());
if (!s.ok()) {
stream_executor_->destroy_event(&device_, event_handle);
return s;
}
stream_executor_->block_host_for_event(&device_, event_handle,
c_status.get());
stream_executor_->destroy_event(&device_, event_handle);
return StatusFromTF_Status(c_status.get());
}
absl::Status EnablePeerAccessTo(StreamExecutor* other) override {
return tsl::errors::Unimplemented(
"EnablePeerAccessTo is not supported by pluggable device.");
}
bool CanEnablePeerAccessTo(StreamExecutor* other) override { return false; }
bool DeviceMemoryUsage(int64_t* free, int64_t* total) const override {
return stream_executor_->device_memory_usage(
&device_, reinterpret_cast<int64_t*>(free),
reinterpret_cast<int64_t*>(total));
}
absl::StatusOr<std::unique_ptr<DeviceDescription>> CreateDeviceDescription()
const override {
OwnedTFStatus c_status(TF_NewStatus());
internal::DeviceDescriptionBuilder builder;
if (device_.hardware_name != nullptr) {
builder.set_name(device_.hardware_name);
}
if (device_.device_vendor != nullptr) {
builder.set_device_vendor(device_.device_vendor);
}
if (device_.pci_bus_id != nullptr) {
builder.set_pci_bus_id(device_.pci_bus_id);
}
if (device_fns_->get_numa_node != nullptr) {
int32_t numa_node = device_fns_->get_numa_node(&device_);
if (numa_node >= 0) {
builder.set_numa_node(numa_node);
}
}
if (device_fns_->get_memory_bandwidth != nullptr) {
int64_t memory_bandwidth = device_fns_->get_memory_bandwidth(&device_);
if (memory | #include "xla/stream_executor/stream_executor.h"
#include <memory>
#include "absl/status/statusor.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace stream_executor {
static absl::StatusOr<std::unique_ptr<StreamExecutor>> NewStreamExecutor() {
StreamExecutorConfig config(0);
TF_ASSIGN_OR_RETURN(auto platform, PlatformManager::PlatformWithName("Host"));
TF_ASSIGN_OR_RETURN(auto stream_exec, platform->GetUncachedExecutor(config));
return stream_exec;
}
TEST(StreamExecutorTest, HostMemoryAllocate) {
TF_ASSERT_OK_AND_ASSIGN(auto executor, NewStreamExecutor());
TF_ASSERT_OK_AND_ASSIGN(auto allocation, executor->HostMemoryAllocate(1024));
EXPECT_NE(allocation->opaque(), nullptr);
EXPECT_EQ(allocation->size(), 1024);
}
} |
1,251 | cpp | tensorflow/tensorflow | tensor_pjrt_buffer_util | tensorflow/c/experimental/next_pluggable_device/tensor_pjrt_buffer_util.cc | tensorflow/c/experimental/next_pluggable_device/tensor_pjrt_buffer_util_test.cc | #ifndef TENSORFLOW_C_EXPERIMENTAL_NEXT_PLUGGABLE_DEVICE_TENSOR_PJRT_BUFFER_UTIL_H_
#define TENSORFLOW_C_EXPERIMENTAL_NEXT_PLUGGABLE_DEVICE_TENSOR_PJRT_BUFFER_UTIL_H_
#include "xla/pjrt/c/pjrt_c_api.h"
#include "xla/pjrt/pjrt_c_api_client.h"
#include "tensorflow/core/framework/tensor.h"
namespace tensorflow {
absl::StatusOr<PJRT_Buffer*> GetPjRtCBufferFromTensor(const Tensor* tensor);
absl::Status SetPjRtCBufferToTensor(PJRT_Buffer* c_buffer,
xla::PjRtCApiClient* c_api_client,
Tensor* tensor);
absl::StatusOr<xla::PjRtCApiClient*> GetPjRtCApiClient(
const DeviceType& device_type);
absl::Status ResetPjRtClient(const DeviceType& device_type);
}
#endif
#include "tensorflow/c/experimental/next_pluggable_device/tensor_pjrt_buffer_util.h"
#include <memory>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/jit/pjrt_tensor_buffer_util.h"
#include "xla/pjrt/c/pjrt_c_api.h"
#include "xla/pjrt/pjrt_c_api_client.h"
#include "xla/pjrt/pjrt_client.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/tfrt/common/async_value_tensor.h"
#include "tensorflow/core/tfrt/common/global_state.h"
#include "tensorflow/core/tfrt/common/pjrt_state.h"
#include "tensorflow/core/tfrt/common/pjrt_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
absl::StatusOr<PJRT_Buffer*> GetPjRtCBufferFromTensor(const Tensor* tensor) {
tensorflow::AsyncValueTensor* av_tensor =
tensorflow::AsyncValueTensor::FromTensor(tensor);
if (av_tensor == nullptr || av_tensor->GetBuffer() == nullptr) {
return absl::InternalError("Input tensor does not have PjRtBuffer.");
}
auto* c_api_buffer =
dynamic_cast<xla::PjRtCApiBuffer*>(av_tensor->GetBuffer().get());
if (c_api_buffer == nullptr) {
return absl::InternalError(
"The PjRtBuffer in the tensor is not type PjRtCApiBuffer.");
}
return c_api_buffer->c_buffer();
}
absl::Status SetPjRtCBufferToTensor(PJRT_Buffer* c_buffer,
xla::PjRtCApiClient* c_api_client,
Tensor* tensor) {
auto buffer = std::make_unique<xla::PjRtCApiBuffer>(c_api_client, c_buffer);
tensorflow::AsyncValueTensor* av_tensor =
tensorflow::AsyncValueTensor::FromTensor(tensor);
if (av_tensor == nullptr) {
TF_ASSIGN_OR_RETURN(
*tensor, MakeTensorFromPjRtBuffer(tensor->dtype(), tensor->shape(),
std::move(buffer)));
} else {
av_tensor->SetBuffer(std::move(buffer));
}
return absl::OkStatus();
}
absl::StatusOr<xla::PjRtCApiClient*> GetPjRtCApiClient(
const DeviceType& device_type) {
TF_ASSIGN_OR_RETURN(absl::StatusOr<xla::PjRtClient*> pjrt_client,
tensorflow::GetPjRtClient(device_type));
auto* pjrt_c_api_client = dynamic_cast<xla::PjRtCApiClient*>(*pjrt_client);
if (pjrt_c_api_client == nullptr) {
return absl::InternalError(absl::StrCat("PjRtClient for ",
device_type.type_string(),
" is not type PjRtCApiClient"));
}
return pjrt_c_api_client;
}
absl::Status ResetPjRtClient(const DeviceType& device_type) {
ResourceMgr* rmgr = tfrt_global::GetTFGlobalResourceMgr();
PjRtState* pjrt_state;
TF_RETURN_IF_ERROR(rmgr->Lookup(rmgr->default_container(),
kPjRtStateResourceName, &pjrt_state));
TF_RETURN_IF_ERROR(pjrt_state->MovePjRtClientToUnused(device_type));
return absl::OkStatus();
}
} | #include "tensorflow/c/experimental/next_pluggable_device/tensor_pjrt_buffer_util.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "xla/pjrt/c/pjrt_c_api.h"
#include "xla/pjrt/c/pjrt_c_api_cpu.h"
#include "xla/pjrt/c/pjrt_c_api_wrapper_impl.h"
#include "xla/pjrt/cpu/cpu_client.h"
#include "xla/pjrt/pjrt_api.h"
#include "xla/pjrt/pjrt_c_api_client.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/tfrt/common/async_value_tensor.h"
#include "tensorflow/core/tfrt/common/pjrt_util.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
using ::testing::HasSubstr;
using ::testing::NotNull;
using ::tsl::testing::StatusIs;
PJRT_Buffer* CreateCBuffer() {
auto status = pjrt::PjrtApi(DEVICE_CPU);
if (!status.ok()) {
CHECK_OK(pjrt::SetPjrtApi(DEVICE_CPU, GetPjrtApi()));
}
auto pjrt_client = xla::GetCApiClient(DEVICE_CPU);
CHECK_OK(pjrt_client.status());
auto c_api_client = down_cast<xla::PjRtCApiClient*>(pjrt_client->get());
std::vector<int32_t> data(1, 0);
xla::Shape shape = xla::ShapeUtil::MakeShape(xla::S32, {1});
auto buffer = c_api_client->pjrt_c_client()->client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
xla::PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
c_api_client->pjrt_c_client()->client->addressable_devices()[0]);
CHECK_OK(buffer.status());
return new PJRT_Buffer{std::move(*buffer), c_api_client->pjrt_c_client()};
}
TEST(TensorPjRtBufferUtilTest, GetPjRtCBufferFromTensorNoBuffer) {
auto allocator = std::make_unique<AsyncValueAllocator>();
tensorflow::Tensor tensor(allocator.get(), DT_FLOAT, {1});
EXPECT_THAT(
GetPjRtCBufferFromTensor(&tensor),
StatusIs(error::INTERNAL, HasSubstr(absl::StrCat(
"Input tensor does not have PjRtBuffer"))));
}
TEST(TensorPjRtBufferUtilTest, GetPjRtCBufferFromTensorIncoorectType) {
auto allocator = std::make_unique<AsyncValueAllocator>();
tensorflow::Tensor tensor(allocator.get(), DT_FLOAT, {1});
TF_ASSERT_OK_AND_ASSIGN(
auto pjrt_client,
xla::GetTfrtCpuClient(true, 1));
std::vector<int32_t> data(1, 0);
xla::Shape shape = xla::ShapeUtil::MakeShape(xla::S32, {1});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
pjrt_client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
xla::PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall,
nullptr, pjrt_client->addressable_devices()[0]));
tensorflow::AsyncValueTensor* av_tensor =
tensorflow::AsyncValueTensor::FromTensor(&tensor);
av_tensor->SetBuffer(std::move(buffer));
EXPECT_THAT(
GetPjRtCBufferFromTensor(&tensor),
StatusIs(
error::INTERNAL,
HasSubstr(absl::StrCat(
"The PjRtBuffer in the tensor is not type PjRtCApiBuffer"))));
}
TEST(TensorPjRtBufferUtilTest, GetPjRtCBufferFromTensorSuccess) {
auto allocator = std::make_unique<AsyncValueAllocator>();
tensorflow::Tensor tensor(allocator.get(), DT_FLOAT, {1});
auto status = pjrt::PjrtApi(DEVICE_CPU);
if (!status.ok()) {
TF_ASSERT_OK(pjrt::SetPjrtApi(DEVICE_CPU, GetPjrtApi()));
}
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client, xla::GetCApiClient(DEVICE_CPU));
std::vector<int32_t> data(1, 0);
xla::Shape shape = xla::ShapeUtil::MakeShape(xla::S32, {1});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
pjrt_client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
xla::PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall,
nullptr, pjrt_client->addressable_devices()[0]));
tensorflow::AsyncValueTensor* av_tensor =
tensorflow::AsyncValueTensor::FromTensor(&tensor);
av_tensor->SetBuffer(std::move(buffer));
TF_ASSERT_OK_AND_ASSIGN(auto c_buffer, GetPjRtCBufferFromTensor(&tensor));
EXPECT_THAT(c_buffer, NotNull());
}
TEST(TensorPjRtBufferUtilTest, SetPjRtCBufferToTensorNotAsyncValueTensor) {
tensorflow::Tensor tensor(DT_FLOAT, {1});
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client, xla::GetCApiClient(DEVICE_CPU));
PJRT_Buffer* c_buffer = CreateCBuffer();
TF_EXPECT_OK(SetPjRtCBufferToTensor(
c_buffer, down_cast<xla::PjRtCApiClient*>(pjrt_client.get()), &tensor));
}
TEST(TensorPjRtBufferUtilTest, SetPjRtCBufferToTensorSuccess) {
auto allocator = std::make_unique<AsyncValueAllocator>();
tensorflow::Tensor tensor(allocator.get(), DT_FLOAT, {1});
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client, xla::GetCApiClient(DEVICE_CPU));
PJRT_Buffer* c_buffer = CreateCBuffer();
TF_EXPECT_OK(SetPjRtCBufferToTensor(
c_buffer, down_cast<xla::PjRtCApiClient*>(pjrt_client.get()), &tensor));
}
TEST(TensorPjRtBufferUtilTest, GetPjRtCApiClientNotFound) {
EXPECT_THAT(
GetPjRtCApiClient(tensorflow::DeviceType(DEVICE_CPU)),
StatusIs(error::NOT_FOUND,
HasSubstr(absl::StrCat("PjRt client not found for device type ",
DEVICE_CPU))));
}
TEST(TensorPjRtBufferUtilTest, GetPjRtCApiClientIncorrectType) {
TF_ASSERT_OK_AND_ASSIGN(
auto pjrt_client,
xla::GetTfrtCpuClient(true, 1));
TF_ASSERT_OK(SetPjRtClientInTFGlobalResourceManager(DEVICE_CPU,
std::move(pjrt_client)));
EXPECT_THAT(GetPjRtCApiClient(tensorflow::DeviceType(DEVICE_CPU)),
StatusIs(error::INTERNAL,
HasSubstr(absl::StrCat("PjRtClient for ", DEVICE_CPU,
" is not type PjRtCApiClient"))));
}
TEST(TensorPjRtBufferUtilTest, GetPjRtCApiClientSuccess) {
auto status = pjrt::PjrtApi(DEVICE_CPU);
if (!status.ok()) {
TF_ASSERT_OK(pjrt::SetPjrtApi(DEVICE_CPU, GetPjrtApi()));
}
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client, xla::GetCApiClient(DEVICE_CPU));
TF_ASSERT_OK(SetPjRtClientInTFGlobalResourceManager(DEVICE_CPU,
std::move(pjrt_client)));
TF_ASSERT_OK_AND_ASSIGN(
auto pjrt_client_get,
GetPjRtCApiClient(tensorflow::DeviceType(DEVICE_CPU)));
EXPECT_THAT(pjrt_client_get, NotNull());
}
}
} |
1,252 | cpp | tensorflow/tensorflow | saved_model_api | tensorflow/c/experimental/saved_model/internal/saved_model_api.cc | tensorflow/cc/saved_model/experimental/tests/saved_model_api_test.cc | #ifndef TENSORFLOW_CC_SAVED_MODEL_EXPERIMENTAL_PUBLIC_SAVED_MODEL_API_H_
#define TENSORFLOW_CC_SAVED_MODEL_EXPERIMENTAL_PUBLIC_SAVED_MODEL_API_H_
#include <memory>
#include <string>
#include <unordered_set>
#include <vector>
#include "tensorflow/c/experimental/saved_model/public/saved_model_api.h"
#include "tensorflow/cc/experimental/base/public/runtime.h"
#include "tensorflow/cc/experimental/base/public/status.h"
#include "tensorflow/cc/saved_model/experimental/public/concrete_function.h"
#include "tensorflow/cc/saved_model/experimental/public/concrete_function_list.h"
#include "tensorflow/cc/saved_model/experimental/public/signature_def_function.h"
namespace tensorflow {
namespace experimental {
namespace cc {
class SavedModelAPI {
public:
static std::unique_ptr<SavedModelAPI> Load(
const std::string& saved_model_path, const Runtime& runtime,
Status* status, const std::unordered_set<std::string>* tags = nullptr);
ConcreteFunction* GetConcreteFunction(const std::string& function_path,
Status* status);
SignatureDefFunction* GetSignatureDefFunction(
const std::string& function_path, Status* status);
SavedModelAPI(SavedModelAPI&&) = default;
SavedModelAPI& operator=(SavedModelAPI&&) = default;
private:
SavedModelAPI(const SavedModelAPI&) = delete;
SavedModelAPI& operator=(const SavedModelAPI&) = delete;
explicit SavedModelAPI(TF_SavedModel* model) : saved_model_(model) {}
struct TFSavedModelDeleter {
void operator()(TF_SavedModel* p) const { TF_DeleteSavedModel(p); }
};
std::unique_ptr<TF_SavedModel, TFSavedModelDeleter> saved_model_;
};
inline std::unique_ptr<SavedModelAPI> SavedModelAPI::Load(
const std::string& saved_model_path, const Runtime& runtime, Status* status,
const std::unordered_set<std::string>* tags) {
TF_SavedModel* saved_model = nullptr;
if (tags == nullptr) {
saved_model =
TF_LoadSavedModel(saved_model_path.c_str(), runtime.GetTFEContext(),
status->GetTFStatus());
} else {
std::vector<const char*> tags_vector;
tags_vector.reserve(tags->size());
for (const std::string& tag : *tags) {
tags_vector.push_back(tag.c_str());
}
saved_model = TF_LoadSavedModelWithTags(
saved_model_path.c_str(), runtime.GetTFEContext(), tags_vector.data(),
tags_vector.size(), status->GetTFStatus());
}
if (!status->ok()) {
return nullptr;
}
return std::unique_ptr<SavedModelAPI>(new SavedModelAPI(saved_model));
}
inline ConcreteFunction* SavedModelAPI::GetConcreteFunction(
const std::string& function_path, Status* status) {
TF_ConcreteFunction* function = TF_GetSavedModelConcreteFunction(
saved_model_.get(), function_path.c_str(), status->GetTFStatus());
if (!status->ok()) {
return nullptr;
}
return ConcreteFunction::wrap(function);
}
inline SignatureDefFunction* SavedModelAPI::GetSignatureDefFunction(
const std::string& function_path, Status* status) {
TF_SignatureDefFunction* function = TF_GetSavedModelSignatureDefFunction(
saved_model_.get(), function_path.c_str(), status->GetTFStatus());
if (!status->ok()) {
return nullptr;
}
return SignatureDefFunction::wrap(function);
}
}
}
}
#endif
#include "tensorflow/c/experimental/saved_model/public/saved_model_api.h"
#include <memory>
#include <string>
#include <unordered_set>
#include "absl/types/optional.h"
#include "tensorflow/c/eager/tfe_context_internal.h"
#include "tensorflow/c/experimental/saved_model/core/saved_model_api.h"
#include "tensorflow/c/experimental/saved_model/core/tf_saved_model_api.h"
#include "tensorflow/c/experimental/saved_model/internal/concrete_function_list_type.h"
#include "tensorflow/c/experimental/saved_model/internal/concrete_function_type.h"
#include "tensorflow/c/experimental/saved_model/internal/saved_model_api_type.h"
#include "tensorflow/c/experimental/saved_model/internal/signature_def_function_type.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_status_internal.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/platform/casts.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
extern "C" {
TF_SavedModel* TF_LoadSavedModel(const char* dirname, TFE_Context* ctx,
TF_Status* status) {
std::string saved_model_dir(dirname);
std::unique_ptr<tensorflow::SavedModelAPI> result;
if (tensorflow::unwrap(ctx)->UsesTFRT()) {
status->status = tensorflow::errors::Unimplemented(
"TFRT SavedModel implementation will be added in the future");
} else {
std::unique_ptr<tensorflow::TFSavedModelAPI> saved_model;
status->status = tensorflow::TFSavedModelAPI::Load(
dirname, absl::nullopt,
tensorflow::down_cast<tensorflow::EagerContext*>(
tensorflow::unwrap(ctx)),
&saved_model);
result = std::move(saved_model);
}
if (!status->status.ok()) {
return nullptr;
}
return tensorflow::wrap(result.release());
}
TF_SavedModel* TF_LoadSavedModelWithTags(const char* dirname, TFE_Context* ctx,
const char* const* tags, int tags_len,
TF_Status* status) {
std::string saved_model_dir(dirname);
std::unordered_set<std::string> tagset;
for (int i = 0; i < tags_len; ++i) {
tagset.insert(std::string(tags[i]));
}
std::unique_ptr<tensorflow::SavedModelAPI> result;
if (tensorflow::unwrap(ctx)->UsesTFRT()) {
status->status = tensorflow::errors::Unimplemented(
"TFRT SavedModel implementation will be added in the future");
} else {
std::unique_ptr<tensorflow::TFSavedModelAPI> saved_model;
status->status = tensorflow::TFSavedModelAPI::Load(
dirname, tagset,
tensorflow::down_cast<tensorflow::EagerContext*>(
tensorflow::unwrap(ctx)),
&saved_model);
result = std::move(saved_model);
}
if (!status->status.ok()) {
return nullptr;
}
return tensorflow::wrap(result.release());
}
void TF_DeleteSavedModel(TF_SavedModel* model) {
delete tensorflow::unwrap(model);
}
TF_ConcreteFunction* TF_GetSavedModelConcreteFunction(TF_SavedModel* model,
const char* function_path,
TF_Status* status) {
tensorflow::ConcreteFunction* result = nullptr;
tensorflow::Status get_function_status =
tensorflow::unwrap(model)->GetFunction(function_path, &result);
status->status.Update(get_function_status);
if (!get_function_status.ok()) {
return nullptr;
}
return tensorflow::wrap(result);
}
TF_CAPI_EXPORT extern TF_SignatureDefFunction*
TF_GetSavedModelSignatureDefFunction(TF_SavedModel* model,
const char* signature_def_key,
TF_Status* status) {
tensorflow::SignatureDefFunction* result = nullptr;
tensorflow::Status get_function_status =
tensorflow::unwrap(model)->GetSignatureDefFunction(signature_def_key,
&result);
status->status.Update(get_function_status);
if (!get_function_status.ok()) {
return nullptr;
}
return tensorflow::wrap(result);
}
} | #include "tensorflow/cc/saved_model/experimental/public/saved_model_api.h"
#include <memory>
#include <string>
#include <unordered_set>
#include "tensorflow/cc/experimental/base/public/runtime.h"
#include "tensorflow/cc/experimental/base/public/runtime_builder.h"
#include "tensorflow/cc/experimental/base/public/status.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/test.h"
namespace {
using tensorflow::experimental::cc::Runtime;
using tensorflow::experimental::cc::RuntimeBuilder;
using tensorflow::experimental::cc::SavedModelAPI;
using tensorflow::experimental::cc::Status;
constexpr char kTestData[] = "cc/saved_model/testdata";
std::string SavedModelPath(tensorflow::StringPiece saved_model_dir) {
return tensorflow::io::JoinPath(tensorflow::testing::TensorFlowSrcRoot(),
kTestData, saved_model_dir);
}
class CPPSavedModelAPITest : public ::testing::TestWithParam<bool> {};
TEST_P(CPPSavedModelAPITest, LoadsSavedModelWithTags) {
Status status;
RuntimeBuilder builder;
bool use_tfrt = GetParam();
if (use_tfrt) {
GTEST_SKIP();
}
builder.SetUseTFRT(use_tfrt);
std::unique_ptr<Runtime> runtime = builder.Build(&status);
ASSERT_TRUE(status.ok()) << status.message();
std::string model_dir = SavedModelPath("VarsAndArithmeticObjectGraph");
std::unordered_set<std::string> tags = {"serve"};
std::unique_ptr<SavedModelAPI> model =
SavedModelAPI::Load(model_dir, *runtime, &status, &tags);
EXPECT_EQ(status.code(), TF_UNIMPLEMENTED);
}
TEST_P(CPPSavedModelAPITest, LoadsSavedModel) {
Status status;
RuntimeBuilder builder;
bool use_tfrt = GetParam();
if (use_tfrt) {
GTEST_SKIP();
}
builder.SetUseTFRT(use_tfrt);
std::unique_ptr<Runtime> runtime = builder.Build(&status);
ASSERT_TRUE(status.ok()) << status.message();
std::string model_dir = SavedModelPath("VarsAndArithmeticObjectGraph");
std::unique_ptr<SavedModelAPI> model =
SavedModelAPI::Load(model_dir, *runtime, &status);
EXPECT_EQ(status.code(), TF_OK) << status.message();
}
INSTANTIATE_TEST_SUITE_P(RuntimeAgnosticCPPSavedModelTests,
CPPSavedModelAPITest, ::testing::Bool());
} |
1,253 | cpp | tensorflow/tensorflow | tensor_spec | tensorflow/c/experimental/saved_model/core/tensor_spec.cc | tensorflow/cc/experimental/libtf/impl/tensor_spec_test.cc | #ifndef TENSORFLOW_CC_EXPERIMENTAL_LIBTF_IMPL_TENSOR_SPEC_H_
#define TENSORFLOW_CC_EXPERIMENTAL_LIBTF_IMPL_TENSOR_SPEC_H_
#include <iosfwd>
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
namespace tf {
namespace libtf {
namespace impl {
struct TensorSpec {
tensorflow::PartialTensorShape shape;
tensorflow::DataType dtype;
bool operator==(const TensorSpec& o) const {
return dtype == o.dtype && shape.IsIdenticalTo(o.shape);
}
template <typename H>
friend H AbslHashValue(H h, const TensorSpec& t) {
return H::combine(std::move(h), t.shape.DebugString(), t.dtype);
}
};
std::ostream& operator<<(std::ostream& o, const TensorSpec& x);
}
}
}
#endif
#include "tensorflow/c/experimental/saved_model/core/tensor_spec.h"
#include <initializer_list>
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
namespace tensorflow {
TensorSpec::TensorSpec()
: shape_(std::initializer_list<int64_t>()), dtype_(DT_FLOAT) {}
TensorSpec::TensorSpec(PartialTensorShape shape, DataType dtype)
: shape_(std::move(shape)), dtype_(dtype) {}
TensorSpec::TensorSpec(const TensorSpecProto& proto)
: shape_(proto.shape()), dtype_(proto.dtype()) {}
const PartialTensorShape& TensorSpec::shape() const { return shape_; }
DataType TensorSpec::dtype() const { return dtype_; }
} | #include "tensorflow/cc/experimental/libtf/impl/tensor_spec.h"
#include "absl/hash/hash_testing.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/test.h"
namespace tf {
namespace libtf {
namespace impl {
TEST(TensorSpecTest, TestSupportsAbslHash) {
tensorflow::PartialTensorShape unknown_shape;
TensorSpec ts1;
ts1.shape = unknown_shape;
ts1.dtype = tensorflow::DT_FLOAT;
TensorSpec ts2;
ts2.shape = tensorflow::PartialTensorShape({2});
ts2.dtype = tensorflow::DT_FLOAT;
TensorSpec ts3;
ts3.shape = tensorflow::PartialTensorShape({1, 2});
ts3.dtype = tensorflow::DT_FLOAT;
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({ts1, ts2, ts3}));
}
}
}
} |
1,254 | cpp | tensorflow/tensorflow | variable | tensorflow/c/experimental/saved_model/core/revived_types/variable.cc | tensorflow/cc/experimental/libtf/tests/variable_test.cc | #ifndef TENSORFLOW_C_EXPERIMENTAL_SAVED_MODEL_CORE_REVIVED_TYPES_VARIABLE_H_
#define TENSORFLOW_C_EXPERIMENTAL_SAVED_MODEL_CORE_REVIVED_TYPES_VARIABLE_H_
#include <memory>
#include "absl/types/optional.h"
#include "tensorflow/c/eager/immediate_execution_context.h"
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#include "tensorflow/c/experimental/saved_model/core/revived_types/tensorhandle_convertible.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/saved_object_graph.pb.h"
namespace tensorflow {
class Variable : public TensorHandleConvertible {
public:
static Status CreateUninitialized(
ImmediateExecutionContext* ctx, DataType dtype, TensorShape shape,
absl::optional<std::string> name, const char* raw_device_name,
const std::vector<std::string>& component_devices,
std::unique_ptr<Variable>* output);
DataType dtype();
TensorShape shape();
Status Assign(ImmediateExecutionTensorHandle* handle);
Status ReadValue(ImmediateTensorHandlePtr* out);
Variable(Variable&& other) = default;
Variable& operator=(Variable&& other) = default;
~Variable() override;
private:
Variable(ImmediateExecutionContext* ctx, DataType dtype, TensorShape shape,
absl::optional<std::string> name, ImmediateTensorHandlePtr handle);
Variable(const Variable& variable) = delete;
Variable& operator=(const Variable&) = delete;
std::string name_;
DataType dtype_;
TensorShape shape_;
ImmediateExecutionContext* ctx_;
};
}
#endif
#include "tensorflow/c/experimental/saved_model/core/revived_types/variable.h"
#include <memory>
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#include "tensorflow/c/experimental/saved_model/core/ops/variable_ops.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/llvm_rtti/llvm_rtti.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
Variable::Variable(ImmediateExecutionContext* ctx, DataType dtype,
TensorShape shape, absl::optional<std::string> name,
ImmediateTensorHandlePtr handle)
: TensorHandleConvertible(std::move(handle)),
name_(name.has_value() ? *name : "Variable"),
dtype_(dtype),
shape_(shape),
ctx_(ctx) {}
Variable::~Variable() {
if (handle_ == nullptr) {
return;
}
Status status = internal::DestroyResource(ctx_, handle_.get());
if (!status.ok()) {
LOG(ERROR) << "Error destroying variable: " << name_
<< "due to: " << status;
}
}
DataType Variable::dtype() { return dtype_; }
TensorShape Variable::shape() { return shape_; }
Status Variable::Assign(ImmediateExecutionTensorHandle* handle) {
return internal::AssignVariable(ctx_, handle_.get(), dtype_, handle);
}
Status Variable::ReadValue(ImmediateTensorHandlePtr* out) {
return internal::ReadVariable(ctx_, handle_.get(), dtype_, out);
}
Status Variable::CreateUninitialized(
ImmediateExecutionContext* ctx, DataType dtype, TensorShape shape,
absl::optional<std::string> name, const char* raw_device_name,
const std::vector<std::string>& component_devices,
std::unique_ptr<Variable>* output) {
ImmediateTensorHandlePtr handle;
if (component_devices.empty()) {
TF_RETURN_IF_ERROR(internal::CreateUninitializedResourceVariable(
ctx, dtype, shape, raw_device_name, &handle));
output->reset(
new Variable(ctx, dtype, shape, std::move(name), std::move(handle)));
return Status();
}
if (!tensorflow::isa<EagerContext>(ctx)) {
return errors::InvalidArgument(
"Can only load distributed variables with EagerContext.");
}
EagerContext* eager_ctx = reinterpret_cast<EagerContext*>(ctx);
std::vector<TensorHandle*> handles;
for (const auto& device : component_devices) {
ImmediateTensorHandlePtr handlePtr;
TF_RETURN_IF_ERROR(internal::CreateUninitializedResourceVariable(
ctx, dtype, shape, device.empty() ? nullptr : device.c_str(),
&handlePtr));
if (!tensorflow::isa<TensorHandle>(handlePtr.get())) {
return errors::Internal("Returned replica handle has unsupported type.");
}
handles.push_back(reinterpret_cast<TensorHandle*>(handlePtr.release()));
}
TensorHandle* packed_handle;
TF_RETURN_IF_ERROR(TensorHandle::CreatePackedHandle(
std::move(handles), eager_ctx, &packed_handle));
for (int i = 0; i != packed_handle->NumPackedHandles(); ++i) {
TensorHandle* component;
TF_RETURN_IF_ERROR(packed_handle->ExtractPackedHandle(i, &component));
component->Unref();
}
handle.reset(packed_handle);
output->reset(
new Variable(ctx, dtype, shape, std::move(name), std::move(handle)));
return Status();
}
} | #include "tensorflow/c/eager/abstract_context.h"
#include "tensorflow/c/eager/abstract_function.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/graph_function.h"
#include "tensorflow/c/eager/unified_api_testutil.h"
#include "tensorflow/c/experimental/ops/resource_variable_ops.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/cc/experimental/libtf/function.h"
#include "tensorflow/cc/experimental/libtf/object.h"
#include "tensorflow/cc/experimental/libtf/value.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
namespace tf {
namespace libtf {
using tensorflow::AbstractContext;
using tensorflow::AbstractContextPtr;
using tensorflow::AbstractFunctionPtr;
using tensorflow::AbstractTensorHandle;
using tensorflow::DT_FLOAT;
using tensorflow::PartialTensorShape;
using tensorflow::Status;
using tensorflow::TF_StatusPtr;
class VariableTest
: public ::testing::TestWithParam<std::tuple<const char*, bool>> {
public:
template <class T, TF_DataType datatype>
impl::TaggedValueTensor CreateScalarTensor(T val) {
AbstractTensorHandle* raw = nullptr;
Status s = TestScalarTensorHandle<T, datatype>(ctx_.get(), val, &raw);
CHECK_EQ(tensorflow::errors::OK, s.code()) << s.message();
return impl::TaggedValueTensor(raw, false);
}
bool UseTfrt() { return std::get<1>(GetParam()); }
AbstractContextPtr ctx_;
protected:
void SetUp() override {
TF_StatusPtr status(TF_NewStatus());
TF_SetTracingImplementation(std::get<0>(GetParam()), status.get());
Status s = tensorflow::StatusFromTF_Status(status.get());
CHECK_EQ(tensorflow::errors::OK, s.code()) << s.message();
AbstractContext* ctx_raw = nullptr;
s = BuildImmediateExecutionContext(UseTfrt(), &ctx_raw);
CHECK_EQ(tensorflow::errors::OK, s.code()) << s.message();
ctx_.reset(ctx_raw);
}
};
template <typename T>
void ExpectEquals(AbstractTensorHandle* t, T expected) {
TF_Tensor* result_t;
Status s = tensorflow::GetValue(t, &result_t);
ASSERT_TRUE(s.ok()) << s.message();
auto value = static_cast<T*>(TF_TensorData(result_t));
EXPECT_EQ(*value, expected);
TF_DeleteTensor(result_t);
}
TEST_P(VariableTest, CreateAssignReadDestroy) {
tensorflow::AbstractTensorHandlePtr var;
{
AbstractTensorHandle* var_ptr = nullptr;
PartialTensorShape scalar_shape;
TF_EXPECT_OK(
PartialTensorShape::MakePartialShape<int32_t>({}, 0, &scalar_shape));
TF_EXPECT_OK(tensorflow::ops::VarHandleOp(ctx_.get(), &var_ptr, DT_FLOAT,
scalar_shape));
var.reset(var_ptr);
}
auto x = CreateScalarTensor<float, TF_FLOAT>(2.0f);
TF_EXPECT_OK(
tensorflow::ops::AssignVariableOp(ctx_.get(), var.get(), x.get()));
tensorflow::AbstractTensorHandlePtr value;
{
AbstractTensorHandle* value_ptr = nullptr;
TF_EXPECT_OK(tensorflow::ops::ReadVariableOp(ctx_.get(), var.get(),
&value_ptr, DT_FLOAT));
value.reset(value_ptr);
}
ExpectEquals(value.get(), 2.0f);
TF_EXPECT_OK(tensorflow::ops::DestroyResourceOp(ctx_.get(), var.get()));
}
INSTANTIATE_TEST_SUITE_P(TF2CAPI, VariableTest,
::testing::Combine(::testing::Values("graphdef",
"mlir"),
::testing::Values(false)));
}
} |
1,255 | cpp | tensorflow/tensorflow | restore_ops | tensorflow/c/experimental/saved_model/core/ops/restore_ops.cc | tensorflow/c/experimental/saved_model/core/ops/restore_ops_test.cc | #ifndef TENSORFLOW_C_EXPERIMENTAL_SAVED_MODEL_CORE_OPS_RESTORE_OPS_H_
#define TENSORFLOW_C_EXPERIMENTAL_SAVED_MODEL_CORE_OPS_RESTORE_OPS_H_
#include <string>
#include "tensorflow/c/eager/immediate_execution_context.h"
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
namespace internal {
Status SingleRestore(ImmediateExecutionContext* ctx, const std::string& prefix,
const std::string& checkpoint_key, DataType dtype,
ImmediateTensorHandlePtr* out);
}
}
#endif
#include "tensorflow/c/experimental/saved_model/core/ops/restore_ops.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/immediate_execution_context.h"
#include "tensorflow/c/eager/immediate_execution_operation.h"
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#include "tensorflow/c/tensor_interface.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/llvm_rtti/llvm_rtti.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace internal {
namespace {
Status CreateStringScalarTensorHandle(ImmediateExecutionContext* ctx,
const std::string& s,
ImmediateTensorHandlePtr* out) {
AbstractTensorPtr tensor(ctx->CreateStringScalar(s));
if (tensor.get() == nullptr) {
return errors::Internal(
"Failed to create scalar string tensor for checkpoint restore");
}
out->reset(ctx->CreateLocalHandle(tensor.get()));
return Status();
}
Status CreateStringVectorTensorHandle(ImmediateExecutionContext* ctx,
const std::string& s,
ImmediateTensorHandlePtr* out) {
int64_t flat_shape[] = {1};
AbstractTensorPtr tensor(ctx->CreateTensor(DT_STRING, flat_shape));
if (tensor.get() == nullptr) {
return errors::Internal(
"Failed to create vector string tensor for checkpoint restore");
}
new (tensor->Data()) tstring(s);
out->reset(ctx->CreateLocalHandle(tensor.get()));
return Status();
}
}
Status SingleRestore(ImmediateExecutionContext* ctx, const std::string& prefix,
const std::string& checkpoint_key, DataType dtype,
ImmediateTensorHandlePtr* out) {
ImmediateOpPtr restore_op(ctx->CreateOperation());
TF_RETURN_IF_ERROR(restore_op->Reset("RestoreV2", "/cpu:0"));
TF_RETURN_IF_ERROR(restore_op->SetAttrTypeList("dtypes", &dtype, 1));
ImmediateTensorHandlePtr prefix_handle;
TF_RETURN_IF_ERROR(
CreateStringScalarTensorHandle(ctx, prefix, &prefix_handle));
ImmediateTensorHandlePtr names_handle;
TF_RETURN_IF_ERROR(
CreateStringVectorTensorHandle(ctx, checkpoint_key, &names_handle));
ImmediateTensorHandlePtr shapes_and_slices_handle;
TF_RETURN_IF_ERROR(
CreateStringVectorTensorHandle(ctx, "", &shapes_and_slices_handle));
TF_RETURN_IF_ERROR(restore_op->AddInput(prefix_handle.get()));
TF_RETURN_IF_ERROR(restore_op->AddInput(names_handle.get()));
TF_RETURN_IF_ERROR(restore_op->AddInput(shapes_and_slices_handle.get()));
AbstractTensorHandle* restored_handle = nullptr;
int num_retvals = 1;
TF_RETURN_IF_ERROR(restore_op->Execute(
absl::MakeSpan(&restored_handle, num_retvals), &num_retvals));
AbstractTensorHandlePtr owned_restored_handle(restored_handle);
if (!tensorflow::isa<ImmediateExecutionTensorHandle>(
owned_restored_handle.get())) {
return errors::Internal("Unexpected tensor handle kind.");
}
out->reset(reinterpret_cast<ImmediateExecutionTensorHandle*>(
owned_restored_handle.release()));
return Status();
}
}
} | #include "tensorflow/c/experimental/saved_model/core/ops/restore_ops.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#include "tensorflow/c/experimental/saved_model/core/test_utils.h"
#include "tensorflow/c/tensor_interface.h"
#include "tensorflow/cc/saved_model/constants.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
std::string CheckpointPrefix(StringPiece saved_model_dir) {
return io::JoinPath(testing::TensorFlowSrcRoot(), "cc/saved_model/testdata",
saved_model_dir, kSavedModelVariablesDirectory,
kSavedModelVariablesFilename);
}
class RestoreOpsTest : public ::testing::Test {
public:
RestoreOpsTest()
: device_mgr_(testing::CreateTestingDeviceMgr()),
ctx_(testing::CreateTestingEagerContext(device_mgr_.get())) {}
EagerContext* context() { return ctx_.get(); }
private:
std::unique_ptr<StaticDeviceMgr> device_mgr_;
EagerContextPtr ctx_;
};
TEST_F(RestoreOpsTest, RestoreSuccessful) {
ImmediateTensorHandlePtr x_handle;
TF_EXPECT_OK(internal::SingleRestore(
context(), CheckpointPrefix("VarsAndArithmeticObjectGraph"),
"x/.ATTRIBUTES/VARIABLE_VALUE", DT_FLOAT, &x_handle));
AbstractTensorPtr x = testing::TensorHandleToTensor(x_handle.get());
EXPECT_EQ(x->Type(), DT_FLOAT);
EXPECT_EQ(x->NumElements(), 1);
EXPECT_EQ(x->NumDims(), 0);
EXPECT_FLOAT_EQ(*reinterpret_cast<float*>(x->Data()), 1.0f);
ImmediateTensorHandlePtr y_handle;
TF_EXPECT_OK(internal::SingleRestore(
context(), CheckpointPrefix("VarsAndArithmeticObjectGraph"),
"y/.ATTRIBUTES/VARIABLE_VALUE", DT_FLOAT, &y_handle));
AbstractTensorPtr y = testing::TensorHandleToTensor(y_handle.get());
EXPECT_EQ(y->Type(), DT_FLOAT);
EXPECT_EQ(y->NumElements(), 1);
EXPECT_EQ(y->NumDims(), 0);
EXPECT_FLOAT_EQ(*reinterpret_cast<float*>(y->Data()), 2.0f);
ImmediateTensorHandlePtr z_handle;
TF_EXPECT_OK(internal::SingleRestore(
context(), CheckpointPrefix("VarsAndArithmeticObjectGraph"),
"child/z/.ATTRIBUTES/VARIABLE_VALUE", DT_FLOAT, &z_handle));
AbstractTensorPtr z = testing::TensorHandleToTensor(z_handle.get());
EXPECT_EQ(z->Type(), DT_FLOAT);
EXPECT_EQ(z->NumElements(), 1);
EXPECT_EQ(z->NumDims(), 0);
EXPECT_FLOAT_EQ(*reinterpret_cast<float*>(z->Data()), 3.0f);
}
TEST_F(RestoreOpsTest, BadCheckpointPrefixShouldFail) {
ImmediateTensorHandlePtr x_handle;
Status status = internal::SingleRestore(
context(), CheckpointPrefix("unknown_bad_checkpoint_prefix"),
"x/.ATTRIBUTES/VARIABLE_VALUE", DT_FLOAT, &x_handle);
EXPECT_FALSE(status.ok()) << status.message();
}
TEST_F(RestoreOpsTest, BadCheckpointKeyShouldFail) {
ImmediateTensorHandlePtr x_handle;
Status status = internal::SingleRestore(
context(), CheckpointPrefix("VarsAndArithmeticObjectGraph"),
"bad_checkpoint_key", DT_FLOAT, &x_handle);
EXPECT_FALSE(status.ok()) << status.message();
}
}
} |
1,256 | cpp | tensorflow/tensorflow | array_ops | tensorflow/core/ops/array_ops.cc | tensorflow/core/ops/array_ops_test.cc | #ifndef TENSORFLOW_C_EXPERIMENTAL_OPS_ARRAY_OPS_H_
#define TENSORFLOW_C_EXPERIMENTAL_OPS_ARRAY_OPS_H_
#include "tensorflow/c/eager/abstract_context.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
namespace tensorflow {
namespace ops {
Status Identity(AbstractContext* ctx, AbstractTensorHandle* const input,
AbstractTensorHandle** output, const char* name = nullptr,
const char* raw_device_name = nullptr);
Status IdentityN(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> input,
absl::Span<AbstractTensorHandle*> output,
const char* name = nullptr,
const char* raw_device_name = nullptr);
Status ZerosLike(AbstractContext* ctx, AbstractTensorHandle* const x,
AbstractTensorHandle** y, const char* name = nullptr,
const char* raw_device_name = nullptr);
Status Shape(AbstractContext* ctx, AbstractTensorHandle* const input,
AbstractTensorHandle** output, DataType out_type = DT_INT32,
const char* name = nullptr, const char* raw_device_name = nullptr);
Status ExpandDims(AbstractContext* ctx, AbstractTensorHandle* const input,
AbstractTensorHandle* const dim,
AbstractTensorHandle** output, const char* name = nullptr,
const char* raw_device_name = nullptr);
Status OnesLike(AbstractContext* ctx, AbstractTensorHandle* const x,
AbstractTensorHandle** y, const char* name = nullptr,
const char* raw_device_name = nullptr);
}
}
#endif
#include <algorithm>
#include <ostream>
#include <vector>
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/kernel_shape_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/mirror_pad_mode.h"
#include "tensorflow/core/util/padding.h"
#include "tensorflow/core/util/strided_slice_op.h"
#include "tensorflow/core/util/tensor_format.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
using shape_inference::UnchangedShape;
namespace {
Status GetAxisForPackAndUnpack(InferenceContext* c, int32_t rank_after_pack,
int32* axis) {
TF_RETURN_IF_ERROR(c->GetAttr("axis", axis));
if (*axis < -1 * rank_after_pack || *axis >= rank_after_pack) {
return errors::InvalidArgument("Invalid axis: ", *axis, "; must be in [",
-1 * rank_after_pack, ",", rank_after_pack,
")");
}
if (*axis < 0) *axis = (rank_after_pack + *axis);
return absl::OkStatus();
}
template <typename T>
std::vector<int64_t> AsInt64(const Tensor* tensor, int64_t num_elements) {
std::vector<int64_t> ret(num_elements);
auto data = tensor->vec<T>();
for (int64_t i = 0; i < num_elements; ++i) {
ret[i] = data(i);
}
return ret;
}
template <typename T>
Status PadKnown(InferenceContext* c, ShapeHandle input,
const Tensor* paddings_t, int64_t num_dims) {
std::vector<DimensionHandle> dims(num_dims);
auto paddings_data = paddings_t->matrix<T>();
for (int64_t i = 0; i < num_dims; ++i) {
const T pad0 = paddings_data(i, 0);
const T pad1 = paddings_data(i, 1);
if (pad0 < 0 || pad1 < 0) {
return errors::InvalidArgument("Paddings must be non-negative");
}
TF_RETURN_IF_ERROR(c->Add(c->Dim(input, i), pad0 + pad1, &dims[i]));
}
c->set_output(0, c->MakeShape(dims));
return absl::OkStatus();
}
Status PadShapeFn(InferenceContext* c) {
ShapeHandle paddings;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &paddings));
DimensionHandle unused;
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(paddings, 1), 2, &unused));
ShapeHandle input = c->input(0);
DimensionHandle n_dim = c->Dim(paddings, 0);
if (c->ValueKnown(n_dim)) {
TF_RETURN_IF_ERROR(c->WithRank(input, c->Value(n_dim), &input));
} else if (c->RankKnown(input)) {
TF_RETURN_IF_ERROR(c->WithValue(n_dim, c->Rank(input), &n_dim));
}
const Tensor* paddings_t = c->input_tensor(1);
if (paddings_t == nullptr) {
if (c->ValueKnown(n_dim)) {
c->set_output(0, c->UnknownShapeOfRank(c->Value(n_dim)));
} else {
c->set_output(0, c->UnknownShape());
}
return absl::OkStatus();
}
const int64_t num_dims = paddings_t->shape().dim_size(0);
TF_RETURN_IF_ERROR(c->WithRank(input, num_dims, &input));
TF_RETURN_IF_ERROR(c->WithValue(n_dim, num_dims, &n_dim));
if (paddings_t->dtype() == DT_INT32) {
return PadKnown<int32>(c, input, paddings_t, num_dims);
} else {
return PadKnown<int64_t>(c, input, paddings_t, num_dims);
}
}
Status TransposeShapeFn(InferenceContext* c) {
ShapeHandle input = c->input(0);
ShapeHandle perm_shape = c->input(1);
const Tensor* perm = c->input_tensor(1);
DimensionHandle perm_elems = c->NumElements(perm_shape);
if (!c->RankKnown(input) && !c->ValueKnown(perm_elems) && perm == nullptr) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
int64_t rank;
if (c->RankKnown(input)) {
rank = c->Rank(input);
} else if (c->ValueKnown(perm_elems)) {
rank = c->Value(perm_elems);
} else {
rank = perm->NumElements();
}
if (!c->RankKnown(input) && rank < 2) {
c->set_output(0, input);
return absl::OkStatus();
}
std::vector<DimensionHandle> dims;
dims.resize(rank);
TF_RETURN_IF_ERROR(c->WithRank(input, rank, &input));
TF_RETURN_IF_ERROR(c->WithRank(perm_shape, 1, &perm_shape));
TF_RETURN_IF_ERROR(c->WithValue(perm_elems, rank, &perm_elems));
if (perm != nullptr) {
std::vector<int64_t> data;
if (perm->dtype() == DT_INT32) {
data = AsInt64<int32>(perm, rank);
} else {
data = AsInt64<int64_t>(perm, rank);
}
for (int32_t i = 0; i < rank; ++i) {
int64_t in_idx = data[i];
if (in_idx >= rank || in_idx < -rank) {
return errors::InvalidArgument("perm dim ", in_idx,
" is out of range of input rank ", rank);
}
dims[i] = c->Dim(input, in_idx);
}
} else {
for (int i = 0; i < rank; ++i) {
dims[i] = c->UnknownDim();
}
}
c->set_output(0, c->MakeShape(dims));
return absl::OkStatus();
}
Status SetOutputShapeForReshape(InferenceContext* c) {
ShapeHandle in = c->input(0);
ShapeHandle out;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(1, &out));
if (!c->RankKnown(out)) {
c->set_output(0, out);
return absl::OkStatus();
}
if (c->RankKnown(in)) {
bool too_many_unknown = false;
int32_t out_unknown_idx = -1;
DimensionHandle known_out_elems = c->NumElements(out);
if (!c->ValueKnown(known_out_elems)) {
known_out_elems = c->MakeDim(1);
for (int32_t i = 0; i < c->Rank(out); ++i) {
DimensionHandle dim = c->Dim(out, i);
if (!c->ValueKnown(dim)) {
if (out_unknown_idx >= 0) {
too_many_unknown = true;
break;
}
out_unknown_idx = i;
} else {
TF_RETURN_IF_ERROR(
c->Multiply(known_out_elems, dim, &known_out_elems));
}
}
}
int32_t in_unknown_idx = -1;
DimensionHandle known_in_elems = c->NumElements(in);
if (!c->ValueKnown(known_in_elems)) {
known_in_elems = c->MakeDim(1);
for (int32_t i = 0; i < c->Rank(in); ++i) {
DimensionHandle dim = c->Dim(in, i);
if (!c->ValueKnown(dim)) {
if (in_unknown_idx >= 0) {
too_many_unknown = true;
break;
}
in_unknown_idx = i;
} else {
TF_RETURN_IF_ERROR(c->Multiply(known_in_elems, dim, &known_in_elems));
}
}
}
if (!too_many_unknown) {
if (in_unknown_idx < 0 && out_unknown_idx < 0) {
if (c->Value(known_in_elems) != c->Value(known_out_elems)) {
return errors::InvalidArgument(
"Cannot reshape a tensor with ", c->DebugString(known_in_elems),
" elements to shape ", c->DebugString(out), " (",
c->DebugString(known_out_elems), " elements)");
}
} else if (in_unknown_idx < 0 && out_unknown_idx >= 0 &&
c->Value(known_out_elems) > 0) {
DimensionHandle inferred_dim;
TF_RETURN_IF_ERROR(c->Divide(known_in_elems, c->Value(known_out_elems),
true ,
&inferred_dim));
TF_RETURN_IF_ERROR(
c->ReplaceDim(out, out_unknown_idx, inferred_dim, &out));
} else if (in_unknown_idx >= 0 && out_unknown_idx < 0 &&
c->Value(known_in_elems) != 0) {
DimensionHandle inferred_dim;
TF_RETURN_IF_ERROR(c->Divide(known_out_elems, c->Value(known_in_elems),
true ,
&inferred_dim));
DimensionHandle unknown_in_dim = c->Dim(in, in_unknown_idx);
TF_RETURN_IF_ERROR(
c->Merge(unknown_in_dim, inferred_dim, &unknown_in_dim));
} else if (in_unknown_idx >= 0 && out_unknown_idx >= 0) {
if (c->Value(known_in_elems) == c->Value(known_out_elems)) {
DimensionHandle unknown_in_dim = c->Dim(in, in_unknown_idx);
TF_RETURN_IF_ERROR(
c->ReplaceDim(out, out_unknown_idx, unknown_in_dim, &out));
}
}
}
}
c->set_output(0, out);
return absl::OkStatus();
}
}
REGISTER_OP("ParallelConcat")
.Input("values: N * T")
.Output("output: T")
.Attr("N: int >= 1")
.Attr("T: type")
.Attr("shape: shape")
.SetShapeFn([](InferenceContext* c) {
PartialTensorShape shape;
TF_RETURN_IF_ERROR(c->GetAttr("shape", &shape));
ShapeHandle passed_shape;
TF_RETURN_IF_ERROR(
c->MakeShapeFromPartialTensorShape(shape, &passed_shape));
if (!c->FullyDefined(passed_shape)) {
return errors::InvalidArgument("shape attr must be fully defined.");
}
ShapeHandle cur;
TF_RETURN_IF_ERROR(c->ReplaceDim(
passed_shape, 0, c->MakeDim(shape_inference::DimensionOrConstant(1)),
&cur));
for (int i = 0; i < c->num_inputs(); ++i) {
if (!c->FullyDefined(c->input(i))) {
return errors::InvalidArgument(
"All input shapes must be fully defined.");
}
if (c->Rank(c->input(i)) < 1) {
return errors::InvalidArgument(
"The rank of all input shapes must be greater than 0, "
"but input ",
i, " had rank ", c->Rank(c->input(i)), ".");
}
DimensionHandle unused;
if (!c->WithValue(c->Dim(c->input(i), 0), 1, &unused).ok()) {
return errors::InvalidArgument("Size of first dimension must be 1.");
}
TF_RETURN_WITH_CONTEXT_IF_ERROR(c->Merge(c->input(i), cur, &cur),
"From merging shape ", i,
" with other shapes.");
}
c->set_output(0, passed_shape);
return absl::OkStatus();
});
REGISTER_OP("Pack")
.Input("values: N * T")
.Output("output: T")
.Attr("N: int >= 1")
.Attr("T: type")
.Attr("axis: int = 0")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle cur = c->input(c->num_inputs() - 1);
for (int i = c->num_inputs() - 2; i >= 0; --i) {
TF_RETURN_WITH_CONTEXT_IF_ERROR(c->Merge(c->input(i), cur, &cur),
"From merging shape ", i,
" with other shapes.");
}
if (!c->RankKnown(cur)) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
int32_t rank = c->Rank(cur);
int32_t axis;
TF_RETURN_IF_ERROR(GetAxisForPackAndUnpack(c, rank + 1, &axis));
std::vector<DimensionHandle> dims;
int index = 0;
while (index < axis) dims.push_back(c->Dim(cur, index++));
dims.push_back(c->MakeDim(c->num_inputs()));
while (index < rank) dims.push_back(c->Dim(cur, index++));
c->set_output(0, c->MakeShape(dims));
for (int i = 0; i < c->num_inputs(); ++i) {
auto* shape_and_type = c->input_handle_shapes_and_types(i);
if (shape_and_type) {
if (!c->RelaxOutputHandleShapesAndMergeTypes(0, *shape_and_type)) {
c->set_output_handle_shapes_and_types(
0, std::vector<shape_inference::ShapeAndType>({}));
break;
}
}
}
return absl::OkStatus();
});
REGISTER_OP("DeepCopy")
.Input("x: T")
.Output("y: T")
.Attr("T: type")
.SetIsStateful()
.SetShapeFn(UnchangedShape);
REGISTER_OP("InplaceUpdate")
.Input("x: T")
.Input("i: int32")
.Input("v: T")
.Output("y: T")
.Attr("T: type")
.SetShapeFn(UnchangedShape);
REGISTER_OP("InplaceAdd")
.Input("x: T")
.Input("i: int32")
.Input("v: T")
.Output("y: T")
.Attr("T: type")
.SetShapeFn(UnchangedShape);
REGISTER_OP("InplaceSub")
.Input("x: T")
.Input("i: int32")
.Input("v: T")
.Output("y: T")
.Attr("T: type")
.SetShapeFn(UnchangedShape);
REGISTER_OP("Empty")
.Input("shape: int32")
.Output("output: dtype")
.Attr("dtype: type")
.Attr("init: bool = false")
.SetDoNotOptimize()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle out;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(0, &out));
c->set_output(0, out);
return absl::OkStatus();
});
REGISTER_OP("Unpack")
.Input("value: T")
.Output("output: num * T")
.Attr("num: int >= 0")
.Attr("T: type")
.Attr("axis: int = 0")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle s = c->input(0);
ShapeHandle out;
if (c->RankKnown(s)) {
int32_t rank = c->Rank(s);
int32_t axis;
TF_RETURN_IF_ERROR(GetAxisForPackAndUnpack(c, rank, &axis));
DimensionHandle unused;
TF_RETURN_IF_ERROR(
c->WithValue(c->Dim(s, axis), c->num_outputs(), &unused));
std::vector<DimensionHandle> dims;
for (int i = 0; i < rank; ++i) {
if (i != axis) dims.push_back(c->Dim(s, i));
}
out = c->MakeShape(dims);
} else {
out = c->UnknownShape();
}
for (int i = 0; i < c->num_outputs(); ++i) c->set_output(i, out);
return absl::OkStatus();
});
REGISTER_OP("UnravelIndex")
.Input("indices: Tidx")
.Input("dims: Tidx")
.Output("output: Tidx")
.Attr("Tidx: {int32, int64} = DT_INT32")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle indices = c->input(0);
ShapeHandle dims;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &dims));
if (c->RankKnown(indices) && c->Rank(indices) == 0) {
c->set_output(0, c->Vector(c->Dim(dims, 0)));
} else if (c->RankKnown(indices)) {
c->set_output(0, c->Matrix(c->Dim(dims, 0), c->NumElements(indices)));
} else {
c->set_output(0, c->UnknownShape());
}
return absl::OkStatus();
});
REGISTER_OP("BroadcastTo")
.Input("input: T")
.Input("shape: Tidx")
.Output("output: T")
.Attr("T: type")
.Attr("Tidx: {int32, int64} = DT_INT32")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle shape_in = c->input(1);
TF_RETURN_IF_ERROR(c->WithRank(shape_in, 1, &shape_in));
ShapeHandle out;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(1, &out));
if (!c->RankKnown(out)) {
c->set_output(0, out);
return absl::OkStatus();
}
ShapeHandle in = c->input(0);
if (!c->RankKnown(in)) {
c->set_output(0, out);
return absl::OkStatus();
}
int out_rank = c->Rank(out);
TF_RETURN_IF_ERROR(c->WithRankAtMost(in, out_rank, &in));
int in_rank = c->Rank(in);
for (int i = 0; i < in_rank; ++i) {
auto in_dim = c->Dim(in, in_rank - i - 1);
if (c->Value(in_dim) > 1) {
auto out_dim = c->Dim(out, out_rank - i - 1);
TF_RETURN_IF_ERROR(c->Merge(in_dim, out_dim, &out_dim));
TF_RETURN_IF_ERROR(
c->ReplaceDim(out, out_rank - i - 1, out_dim, &out));
}
}
c->set_output(0, out);
return absl::OkStatus();
});
REGISTER_OP("Concat")
.Input("concat_dim: int32")
.Input("values: N * T")
.Output("output: T")
.Attr("N: int >= 2")
.Attr("T: type")
.SetShapeFn([](InferenceContext* c) {
return shape_inference::ConcatShape(c, c->num_inputs() - 1);
});
REGISTER_OP("ConcatV2")
.Input("values: N * T")
.Input("axis: Tidx")
.Output("output: T")
.Attr("N: int >= 2")
.Attr("T: type")
.Attr("Tidx: {int32, int64} = DT_INT32")
.SetShapeFn(shape_inference::ConcatV2Shape);
#ifdef INTEL_MKL
REGISTER_OP("_MklConcatV2")
.Input("values: N * T")
.Input("axis: Tidx")
.Input("mkl_values: N * uint8")
.Input("mkl_axis: uint8")
.Output("output: T")
.Output("mkl_output: uint8")
.Attr("N: int >= 2")
.Attr("T: type")
.Attr("Tidx: {int32, int64} = DT_INT32")
.SetShapeFn(shape_inference::ConcatV2Shape)
.Doc(R"doc(
MKL version of ConcatV2 operator. Uses MKL DNN APIs to perform concatenation.
NOTE Do not invoke this operator directly in Python. Graph rewrite pass is
expected to invoke these operators.
)doc");
#endif
REGISTER_OP("ConcatOffset")
.Input("concat_dim: int32")
.Input("shape: N * shape_type")
.Output("offset: N * shape_type")
.Attr("N: int >= 2")
.Attr("shape_type: {int32, int64} = DT_INT32")
.SetShapeFn([](InferenceContext* c) {
for (int i = 1; i < c->num_inputs(); ++i) {
c->set_output(i - 1, c->input(i));
}
return absl::OkStatus();
});
REGISTER_OP("Split")
.Input("split_dim: int32")
.Input("value: T")
.Output("output: num_split * T")
.Attr("num_split: int >= 1")
.Attr("T: type")
.SetShapeFn([](InferenceContext* c) {
DimensionHandle split_dimension;
ShapeHandle input = c->input(1);
TF_RETURN_IF_ERROR(c->MakeDimForScalarInputWithNegativeIndexing(
0, c->Rank(input), &split_dimension));
int num_split = c->num_outputs();
ShapeHandle out;
if (!c->ValueKnown(split_dimension)) {
if (c->RankKnown(input)) {
out = c->UnknownShapeOfRank(c->Rank(input));
} else {
out = c->UnknownShape();
}
} else {
int64_t split_dim = c->Value(split_dimension);
TF_RETURN_IF_ERROR(c->WithRankAtLeast(input, split_dim + 1, &input));
DimensionHandle split_dim_size;
TF_RETURN_WITH_CONTEXT_IF_ERROR(
c->Divide(c->Dim(input, split_dim), num_split,
true , &split_dim_size),
"Number of ways to split should evenly divide the split dimension");
TF_RETURN_IF_ERROR(
c->ReplaceDim(input, split_dim, split_dim_size, &out));
}
for (int i = 0; i < num_split; ++i) c->set_output(i, out);
return absl::OkStatus();
});
REGISTER_OP("SplitV")
.Input("value: T")
.Input("size_splits: Tlen")
.Input("split_dim: int32")
.Output("output: num_split * T")
.Attr("num_split: int >= 1")
.Attr("T: type")
.Attr("Tlen: {int8, int32, int64} = DT_INT64")
.SetShapeFn([](InferenceContext* c) {
DimensionHandle split_dimension;
ShapeHandle input = c->input(0);
TF_RETURN_IF_ERROR(c->MakeDimForScalarInputWithNegativeIndexing(
2, c->Rank(input), &split_dimension));
int32_t num_outputs = c->num_outputs();
int32_t rank = c->Rank(input);
ShapeHandle output_shape;
const Tensor* size_splits = c->input_tensor(1);
if (rank == InferenceContext::kUnknownRank) {
for (int i = 0; i < num_outputs; ++i) {
c->set_output(i, c->UnknownShape());
}
} else if (rank == 0) {
return errors::InvalidArgument("Can't split scalars");
} else if (size_splits == nullptr && c->ValueKnown(split_dimension)) {
output_shape = input;
for (int i = 0; i < num_outputs; ++i) {
TF_RETURN_IF_ERROR(c->ReplaceDim(output_shape,
c->Value(split_dimension),
c->UnknownDim(), &output_shape));
c->set_output(i, output_shape);
}
} else if (size_splits == nullptr && !c->ValueKnown(split_dimension)) {
for (int i = 0; i < num_outputs; ++i) {
c->set_output(i, c->UnknownShapeOfRank(rank));
}
} else {
int64_t split_dim = c->Value(split_dimension);
TF_RETURN_IF_ERROR(c->WithRankAtLeast(input, split_dim + 1, &input));
std::vector<int64_t> data;
if (size_splits->dtype() == DT_INT32) {
data = AsInt64<int32>(size_splits, size_splits->shape().dim_size(0));
} else {
data =
AsInt64<int64_t>(size_splits, size_splits->shape().dim_size(0));
}
if (num_outputs != data.size()) {
return errors::InvalidArgument(
"Length of size_splits should be equal to num_outputs");
}
int64_t total_size = 0;
bool has_neg_one = false;
for (const auto size : data) {
if (size == -1) {
if (has_neg_one) {
return errors::InvalidArgument(
"size_splits can only have one -1");
}
has_neg_one = true;
} else {
total_size += size;
}
}
auto split_dim_size = c->Value(c->Dim(input, split_dim));
for (int i = 0; i < num_outputs; ++i) {
auto size = data[i];
if (data[i] == -1 && c->ValueKnown(split_dim_size)) {
size = split_dim_size - total_size;
}
if (size < -1 || (size == -1 && c->ValueKnown(split_dim_size))) {
return errors::InvalidArgument("Split size at index ", i,
" must be >= 0. Got: ", size);
}
TF_RETURN_IF_ERROR(
c->ReplaceDim(input, split_dim, c->MakeDim(size), &output_shape));
c->set_output(i, output_shape);
}
if (c->ValueKnown(split_dim_size)) {
if (has_neg_one ? total_size > split_dim_size
: total_size != split_dim_size) {
return errors::InvalidArgument(
"can't split axis of size ", split_dim_size,
" into pieces of size [", absl::StrJoin(data, ","), "]");
}
}
}
return absl::OkStatus();
});
REGISTER_OP("Const")
.Output("output: dtype")
.Attr("value: tensor")
.Attr("dtype: type")
.SetShapeFn([](InferenceContext* c) {
const TensorProto* proto = nullptr;
TF_RETURN_IF_ERROR(c->GetAttr("value", &proto));
TF_RETURN_IF_ERROR(TensorShape::IsValidShape(proto->tensor_shape()));
TensorShape shape(proto->tensor_shape());
std::vector<DimensionHandle> dims;
dims.reserve(shape.dims());
for (int i = 0; i < shape.dims(); ++i) {
dims.push_back(c->MakeDim(shape.dim_size(i)));
}
c->set_output(0, c->MakeShape(dims));
return absl::OkStatus();
});
REGISTER_OP("HostConst")
.Output("output: dtype")
.Attr("value: tensor")
.Attr("dtype: type")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("_EagerConst")
.Input("input: T")
.Output("output: T")
.Attr("T: type")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("ImmutableConst")
.Attr("dtype: type")
.Attr("shape: shape")
.Attr("memory_region_name: string")
.Output("tensor: dtype")
.SetShapeFn(shape_inference::ExplicitShape);
REGISTER_OP("GuaranteeConst")
.Input("input: T")
.Output("output: T")
.Attr("T: type")
.SetShapeFn([](shape_inference::InferenceContext* c) {
return UnchangedShape(c);
})
.SetDoNotOptimize();
REGISTER_OP("ZerosLike")
.Input("x: T")
.Output("y: T")
.Attr("T: type")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("OnesLike")
.Input("x: T")
.Output("y: T")
.Attr(
"T: {bfloat16, half, float, double, int8, uint8, int16, uint16, int32, "
"uint32, int64, uint64, complex64, complex128, bool}")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("Diag")
.Input("diagonal: T")
.Output("output: T")
.Attr(
"T: {bfloat16, half, float, double, int32, int64, complex64, "
"complex128}")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle in = c->input(0);
TF_RETURN_IF_ERROR(c->WithRankAtLeast(in, 1, &in));
ShapeHandle out;
TF_RETURN_IF_ERROR(c->Concatenate(in, in, &out));
c->set_output(0, out);
return absl::OkStatus();
});
REGISTER_OP("DiagPart")
.Input("input: T")
.Output("diagonal: T")
.Attr(
"T: {bfloat16, half, float, double, int32, int64, complex64, "
"complex128}")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle in = c->input(0);
if (!c->RankKnown(in)) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
const int32_t rank = c->Rank(in);
if ((rank % 2) != 0 || ran | #include "tensorflow/core/common_runtime/type_inference.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
TEST(ArrayOpsTest, TensorScatterUpdate_ShapeFn) {
ShapeInferenceTestOp op("TensorScatterUpdate");
INFER_OK(op, "[4,3];[8,2];[8]", "in0");
INFER_OK(op, "[?,?];[?,2];[?]", "in0");
INFER_OK(op, "[?];[?];[?]", "in0");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op,
"[];[?,2];[?]");
INFER_ERROR("Indices and updates specified for empty input", op,
"[0,2,2];[8,2];[8]");
INFER_ERROR(
"Dimensions [0,1) of indices[shape=[8,2]] = [8] must match "
"dimensions [0,1) of updates[shape=[9]] = [9]",
op, "[?,?];[8,2];[9]");
INFER_ERROR(
"Dimensions [2,2) of input[shape=[?,?]] = [] must match "
"dimensions [1,2) of updates[shape=[?,1]] = [1]",
op, "[?,?];[?,2];[?,1]");
}
TEST(ArrayOpsTest, ScatterNd_ShapeFn) {
ShapeInferenceTestOp op("ScatterNd");
INFER_OK(op, "[8,2];[8];[2]", "[?,?]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[?,2];[?];[]");
INFER_ERROR(
"Dimensions [0,1) of indices[shape=[8,2]] = [8] must match "
"dimensions [0,1) of updates[shape=[9]] = [9]",
op, "[8,2];[9];[?]");
}
TEST(ArrayOpsTest, UnravelIndex_ShapeFn) {
ShapeInferenceTestOp op("UnravelIndex");
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[];[?]", "[d1_0]");
INFER_OK(op, "[4,5];[?]", "[d1_0,20]");
INFER_OK(op, "[2,3,4];[?]", "[d1_0,24]");
INFER_OK(op, "?;[?]", "?");
INFER_OK(op, "[?];[?]", "[d1_0,?]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[1,1]");
}
TEST(ArrayOpsTest, Pack_ShapeFn) {
ShapeInferenceTestOp op("Pack");
auto set_axis = [&op](int axis) {
int n = 3;
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.reserve(n);
for (int i = 0; i < n; ++i) src_list.emplace_back("a", 0, DT_FLOAT);
TF_ASSERT_OK(NodeDefBuilder("test", "Pack")
.Input(src_list)
.Attr("N", n)
.Attr("axis", axis)
.Finalize(&op.node_def));
};
set_axis(0);
INFER_OK(op, "?;?;?", "?");
for (int axis : {0, -3}) {
set_axis(axis);
INFER_OK(op, "?;?;?", "?");
INFER_OK(op, "[1,3];[1,3];?", "[3,d0_0|d1_0,d0_1|d1_1]");
INFER_OK(op, "[?,3];[1,3];?", "[3,d1_0,d0_1|d1_1]");
INFER_OK(op, "[?,?];[1,3];?", "[3,d1_0,d1_1]");
}
for (int axis : {1, -2}) {
set_axis(axis);
INFER_OK(op, "?;?;?", "?");
INFER_OK(op, "[1,3];[1,3];?", "[d0_0|d1_0,3,d0_1|d1_1]");
INFER_OK(op, "[?,3];[1,3];?", "[d1_0,3,d0_1|d1_1]");
INFER_OK(op, "[?,?];[1,3];?", "[d1_0,3,d1_1]");
}
for (int axis : {2, -1}) {
set_axis(axis);
INFER_OK(op, "?;?;?", "?");
INFER_OK(op, "[1,3];[1,3];?", "[d0_0|d1_0,d0_1|d1_1,3]");
INFER_OK(op, "[?,3];[1,3];?", "[d1_0,d0_1|d1_1,3]");
INFER_OK(op, "[?,?];[1,3];?", "[d1_0,d1_1,3]");
}
set_axis(-4);
INFER_ERROR("Invalid axis: -4; must be in [-3,3)", op, "[1,3];[1,3];?");
set_axis(3);
INFER_ERROR("Invalid axis: 3; must be in [-3,3)", op, "[1,3];[1,3];?");
set_axis(0);
INFER_ERROR("Shapes must be equal rank, but are 3 and 2", op,
"[1,2,3];?;[1,4]");
INFER_ERROR("From merging shape 0 with other shapes.", op, "[1,2,3];?;[1,4]");
}
TEST(ArrayOpsTest, UnPack_ShapeFn) {
ShapeInferenceTestOp op("Unpack");
auto set_axis_and_num = [&op](int axis, int num) {
TF_ASSERT_OK(NodeDefBuilder("test", "Unpack")
.Input("a", 0, DT_FLOAT)
.Attr("axis", axis)
.Attr("num", num)
.Finalize(&op.node_def));
};
set_axis_and_num(0, 1);
INFER_OK(op, "?", "?");
for (int axis : {0, -3}) {
set_axis_and_num(axis, 1);
INFER_OK(op, "?", "?");
INFER_OK(op, "[1,2,3]", "[d0_1,d0_2]");
INFER_OK(op, "[?,?,?]", "[d0_1,d0_2]");
}
for (int axis : {1, -2}) {
set_axis_and_num(axis, 2);
INFER_OK(op, "[1,2,3]", "[d0_0,d0_2];[d0_0,d0_2]");
INFER_OK(op, "[?,?,?]", "[d0_0,d0_2];[d0_0,d0_2]");
}
for (int axis : {2, -1}) {
set_axis_and_num(axis, 3);
INFER_OK(op, "[1,2,3]", "[d0_0,d0_1];[d0_0,d0_1];[d0_0,d0_1]");
INFER_OK(op, "[?,?,?]", "[d0_0,d0_1];[d0_0,d0_1];[d0_0,d0_1]");
}
set_axis_and_num(2, 2);
INFER_ERROR("Dimension must be 2 but is 3", op, "[1,2,3]");
set_axis_and_num(-4, 3);
INFER_ERROR("Invalid axis: -4; must be in [-3,3)", op, "[1,2,3]");
set_axis_and_num(3, 3);
INFER_ERROR("Invalid axis: 3; must be in [-3,3)", op, "[1,2,3]");
}
TEST(ArrayOpsTest, Const_ShapeFn) {
ShapeInferenceTestOp op("Const");
TensorProto tensor_proto;
auto* shape_proto = tensor_proto.mutable_tensor_shape();
auto rebuild_node_def = [&op, &tensor_proto]() {
TF_ASSERT_OK(NodeDefBuilder("test", "Const")
.Attr("value", tensor_proto)
.Finalize(&op.node_def));
};
TensorShape{}.AsProto(shape_proto);
rebuild_node_def();
INFER_OK(op, "", "[]");
TensorShape{1, 2, 3, 4}.AsProto(shape_proto);
rebuild_node_def();
INFER_OK(op, "", "[1,2,3,4]");
shape_proto->add_dim()->set_size(-1);
rebuild_node_def();
INFER_ERROR("Shape [1,2,3,4,?] is not fully defined", op, "");
}
TEST(ArrayOpsTest, UnchangedShapes_ShapeFn) {
for (const char* op_name : {
"CheckNumerics",
"Identity",
"RefIdentity",
"QuantizeAndDequantize",
"StopGradient",
"ZerosLike",
"OnesLike",
}) {
ShapeInferenceTestOp op(op_name);
INFER_OK(op, "?", "in0");
INFER_OK(op, "[]", "in0");
INFER_OK(op, "[1,2,?,4,5]", "in0");
}
ShapeInferenceTestOp op("MatrixBandPart");
INFER_OK(op, "?;?;?", "in0");
INFER_OK(op, "[];?;?", "in0");
INFER_OK(op, "[1,2,?,4,5];?;?", "in0");
}
TEST(ArrayOpsTest, GuaranteeConst_ShapeFn) {
ShapeInferenceTestOp op("GuaranteeConst");
INFER_OK(op, "?", "in0");
INFER_OK(op, "[]", "in0");
INFER_OK(op, "[1,2,?,4,5]", "in0");
}
TEST(ArrayOpsTest, Identity_ShapeFnHandles) {
const char* op_name = "Identity";
ShapeInferenceTestOp op(op_name);
const OpRegistrationData* op_reg_data;
TF_ASSERT_OK(OpRegistry::Global()->LookUp(op.name, &op_reg_data));
std::vector<
std::unique_ptr<std::vector<std::pair<PartialTensorShape, DataType>>>>
handle_data;
handle_data.emplace_back(
new std::vector<std::pair<PartialTensorShape, DataType>>(
{{PartialTensorShape(), DT_BOOL}}));
shape_inference::InferenceContext c(
TF_GRAPH_DEF_VERSION, op.node_def, op_reg_data->op_def,
{PartialTensorShape()}, {}, {}, handle_data);
TF_ASSERT_OK(c.construction_status());
ASSERT_TRUE(op_reg_data->shape_inference_fn != nullptr);
TF_ASSERT_OK(c.Run(op_reg_data->shape_inference_fn));
const auto* shapes_and_types = c.output_handle_shapes_and_types(0);
ASSERT_TRUE(shapes_and_types != nullptr);
ASSERT_EQ(1, shapes_and_types->size());
EXPECT_EQ((*shapes_and_types)[0].dtype, DT_BOOL);
}
TEST(ArrayOpsTest, Diag_ShapeFn) {
ShapeInferenceTestOp op("Diag");
INFER_OK(op, "?", "?");
INFER_OK(op, "[1,?,3]", "[d0_0,d0_1,d0_2,d0_0,d0_1,d0_2]");
INFER_OK(op, "[?,1,2,3]", "[d0_0,d0_1,d0_2,d0_3,d0_0,d0_1,d0_2,d0_3]");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[]");
}
TEST(ArrayOpsTest, DiagPart_ShapeFn) {
ShapeInferenceTestOp op("DiagPart");
INFER_OK(op, "?", "?");
INFER_OK(op, "[1,?,?,4]", "[d0_0,d0_3]");
INFER_OK(op, "[1,?,3,?,4,3]", "[d0_0,d0_4,d0_2|d0_5]");
INFER_OK(op, "[1,2,3,?,?,?,?,4]", "[d0_0,d0_1,d0_2,d0_7]");
INFER_ERROR("Input must have even and non-zero rank", op, "[]");
INFER_ERROR("Input must have even and non-zero rank", op, "[?]");
INFER_ERROR("Input must have even and non-zero rank", op, "[1,2,3]");
INFER_ERROR("Dimensions must be equal, but are 2 and 10", op, "[1,2,?,10]");
}
TEST(ArrayOpsTest, MatrixDiag_ShapeFn) {
ShapeInferenceTestOp op("MatrixDiag");
INFER_OK(op, "?", "?");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[]");
INFER_OK(op, "[?]", "[d0_0,d0_0]");
INFER_OK(op, "[1,?,?,4]", "[d0_0,d0_1,d0_2,d0_3,d0_3]");
}
TEST(ArrayOpsTest, MatrixDiagPart_ShapeFn) {
ShapeInferenceTestOp op("MatrixDiagPart");
INFER_OK(op, "?", "?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[?]");
INFER_OK(op, "[?,1,2,2]", "[d0_0,d0_1,d0_2|d0_3]");
INFER_OK(op, "[?,1,2,3]", "[d0_0,d0_1,d0_2]");
INFER_OK(op, "[?,1,3,2]", "[d0_0,d0_1,d0_3]");
}
TEST(ArrayOpsTest, Reverse_ShapeFn) {
ShapeInferenceTestOp op("Reverse");
INFER_OK(op, "?;?", "in0");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "?;[]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[?,2]");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];[4]");
INFER_ERROR("reverse does not work on tensors with more than 8 dimensions",
op, "[1,2,3,4,5,6,7,8,9];[9]");
INFER_OK(op, "[1,2,3,?];[4]", "in0");
INFER_OK(op, "[1,2,3,?,5,6,7,8];[8]", "in0");
}
TEST(ArrayOpsTest, ReverseV2_ShapeFn) {
ShapeInferenceTestOp op("ReverseV2");
INFER_OK(op, "?;?", "in0");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "?;[]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[?,2]");
INFER_OK(op, "[1,2,3];[2]", "in0");
INFER_ERROR("reverse does not work on tensors with more than 8 dimensions",
op, "[1,2,3,4,5,6,7,8,9];[9]");
INFER_OK(op, "[1,2,3,?];[4]", "in0");
INFER_OK(op, "[1,2,3,?,5,6,7,8];[8]", "in0");
}
TEST(ArrayOpsTest, Fill_ShapeFn) {
ShapeInferenceTestOp op("Fill");
AddNodeAttr("index_type", DT_INT32, &op.node_def);
op.input_tensors.resize(2);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[?];?", "?");
INFER_OK(op, "[4];?", "[?,?,?,?]");
Tensor in_t = test::AsTensor<int32>({1, 2, 3, 4});
op.input_tensors[0] = &in_t;
INFER_OK(op, "[4];?", "[1,2,3,4]");
}
TEST(ArrayOpsTest, Gather_ShapeFn) {
ShapeInferenceTestOp op("Gather");
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[1,?,2];[3]", "[d1_0,d0_1,d0_2]");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[];[1,2,3]");
}
TEST(ArrayOpsTest, GatherV2_ShapeFn) {
ShapeInferenceTestOp op("GatherV2");
AddNodeAttr("batch_dims", 0, &op.node_def);
INFER_OK(op, "?;?;?", "?");
INFER_OK(op, "[1,2,3];[3];[]", "[?,?,?]");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op,
"[];[1,2,3];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1];[1,2,3];[1]");
Tensor axis_dim_t;
op.input_tensors.resize(3);
op.input_tensors[2] = &axis_dim_t;
axis_dim_t = test::AsScalar(1);
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[1];[1,2];[]");
axis_dim_t = test::AsScalar(0);
INFER_OK(op, "[1,2,3];[];[]", "[d0_1,d0_2]");
axis_dim_t = test::AsScalar(1);
INFER_OK(op, "[1,2,3];[];[]", "[d0_0,d0_2]");
axis_dim_t = test::AsScalar(2);
INFER_OK(op, "[1,2,3];[];[]", "[d0_0,d0_1]");
axis_dim_t = test::AsScalar(0);
INFER_OK(op, "[1,2,3];[5];[]", "[d1_0,d0_1,d0_2]");
axis_dim_t = test::AsScalar(1);
INFER_OK(op, "[1,2,3];[5];[]", "[d0_0,d1_0,d0_2]");
axis_dim_t = test::AsScalar(2);
INFER_OK(op, "[1,2,3];[5];[]", "[d0_0,d0_1,d1_0]");
axis_dim_t = test::AsScalar(0);
INFER_OK(op, "[1,2,3];[5,6];[]", "[d1_0,d1_1,d0_1,d0_2]");
axis_dim_t = test::AsScalar(1);
INFER_OK(op, "[1,2,3];[5,6];[]", "[d0_0,d1_0,d1_1,d0_2]");
axis_dim_t = test::AsScalar(2);
INFER_OK(op, "[1,2,3];[5,6];[]", "[d0_0,d0_1,d1_0,d1_1]");
axis_dim_t = test::AsScalar(-3);
INFER_OK(op, "[1,2,3];[5,6];[]", "[d1_0,d1_1,d0_1,d0_2]");
axis_dim_t = test::AsScalar(-2);
INFER_OK(op, "[1,2,3];[5,6];[]", "[d0_0,d1_0,d1_1,d0_2]");
axis_dim_t = test::AsScalar(-1);
INFER_OK(op, "[1,2,3];[5,6];[]", "[d0_0,d0_1,d1_0,d1_1]");
ShapeInferenceTestOp batch_op("GatherV2");
AddNodeAttr("batch_dims", 1, &batch_op.node_def);
INFER_OK(batch_op, "[1,4800,8];[1,28400];[]", "[?,?,?]");
ShapeInferenceTestOp batch_op_2("GatherV2");
AddNodeAttr("batch_dims", 2, &batch_op_2.node_def);
INFER_OK(batch_op_2, "[1,2,3,4,5];[1,2,3];[]", "[?,?,?,?,?]");
}
TEST(ArrayOpsTest, GatherNd_ShapeFn) {
ShapeInferenceTestOp op("GatherNd");
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[1,?,3,?];[?,0]", "[d1_0,d0_0,d0_1,d0_2,d0_3]");
INFER_OK(op, "[1,?,3,?];[?,4]", "[d1_0]");
INFER_ERROR("indices.shape[-1] must be <= params.rank", op, "[1,2,3];[4]");
}
TEST(ArrayOpsTest, Shape_ShapeFn) {
ShapeInferenceTestOp op("Shape");
AddNodeAttr("out_type", DT_INT32, &op.node_def);
INFER_OK(op, "?", "[?]");
INFER_OK(op, "[?]", "[1]");
INFER_OK(op, "[?,2,3,4,5]", "[5]");
}
static Status type_inference(Graph& graph) {
GraphOptimizationPassOptions opt_options;
std::unique_ptr<Graph> graph_ptr(new Graph(OpRegistry::Global()));
graph_ptr->Copy(graph);
opt_options.graph = &graph_ptr;
opt_options.flib_def = graph.mutable_flib_def();
TypeInferencePass pass;
return pass.Run(opt_options);
}
REGISTER_OP("ArrayOpsTest>ConstTypeCtor")
.Output("output: dtype")
.Attr("value: tensor")
.Attr("dtype: type")
.SetTypeConstructor(full_type::Unary(TFT_TENSOR, "dtype"))
.SetShapeFn(shape_inference::UnknownShape);
TEST(ArrayOpsTest, Shape_TypeCtor) {
Graph graph(OpRegistry::Global());
Node* input_tensor_op;
TensorProto tensor_proto;
TF_EXPECT_OK(NodeBuilder("input_tensor_op", "ArrayOpsTest>ConstTypeCtor")
.Attr("value", tensor_proto)
.Attr("dtype", DT_FLOAT)
.Finalize(&graph, &input_tensor_op));
Node* shape_op;
TF_EXPECT_OK(NodeBuilder("shape_op", "Shape")
.Input(input_tensor_op)
.Attr("T", DT_FLOAT)
.Attr("out_type", DT_INT32)
.Finalize(&graph, &shape_op));
TF_EXPECT_OK(type_inference(graph));
FullTypeDef expected_shape_op_t;
protobuf::TextFormat::Parser parser;
CHECK(parser.ParseFromString(
R"pb(type_id: TFT_PRODUCT
args {
type_id: TFT_SHAPE_TENSOR
args { type_id: TFT_INT32 }
})pb",
&expected_shape_op_t));
EXPECT_TRUE(full_type::IsEqual(shape_op->def().experimental_type(),
expected_shape_op_t))
<< "fulltype is\n"
<< shape_op->def().experimental_type().DebugString() << "\nexpected\n"
<< expected_shape_op_t.DebugString();
}
TEST(ArrayOpsTest, ShapeN_ShapeFn) {
ShapeInferenceTestOp op("ShapeN");
int n = 3;
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.reserve(n);
for (int i = 0; i < n; ++i) src_list.emplace_back("a", 0, DT_FLOAT);
TF_ASSERT_OK(NodeDefBuilder("test", "ShapeN")
.Input(src_list)
.Attr("N", n)
.Finalize(&op.node_def));
INFER_OK(op, "?;?;?", "[?];[?];[?]");
INFER_OK(op, "[?];[?];[?]", "[1];[1];[1]");
INFER_OK(op, "[?,2,3,4,5];?;[1,?,3]", "[5];[?];[3]");
}
TEST(ArrayOpsTest, Unique_ShapeFn) {
ShapeInferenceTestOp op("Unique");
INFER_OK(op, "?", "[?];in0");
INFER_OK(op, "[5]", "[?];in0");
INFER_ERROR("Shape must be rank 1 but is rank 5", op, "[1,2,3,?,5]");
}
TEST(ArrayOpsTest, UniqueWithCounts_ShapeFn) {
ShapeInferenceTestOp op("UniqueWithCounts");
INFER_OK(op, "?", "[?];in0;[?]");
INFER_OK(op, "[1,2,3,?,5]", "[?];in0;[?]");
}
TEST(ArrayOpsTest, InvertPermutation_ShapeFn) {
ShapeInferenceTestOp op("InvertPermutation");
INFER_OK(op, "?", "[?]");
INFER_OK(op, "[1]", "in0");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[]");
}
TEST(ArrayOpsTest, PadD_ShapeFn) {
for (const char* op_name : {"Pad", "MirrorPad"}) {
ShapeInferenceTestOp op(op_name);
op.input_tensors.resize(2);
INFER_OK(op, "?;?", "?");
INFER_ERROR("Shape must be rank 2 but is rank 3", op, "?;[1,2,3]");
INFER_ERROR("Dimension must be 2 but is 4", op, "?;[1,4]");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];[4,2]");
INFER_OK(op, "[1,2,3];?", "[?,?,?]");
INFER_OK(op, "?;[3,2]", "[?,?,?]");
Tensor paddings_t(DT_INT64, TensorShape{3, 2});
test::FillValues<int64_t>(&paddings_t, {1, 10, 2, 20, 3, 30});
op.input_tensors[1] = &paddings_t;
INFER_OK(op, "[100,200,300];[3,2]", "[111,222,333]");
INFER_OK(op, "[100,?,300];[3,2]", "[111,?,333]");
INFER_OK(op, "?;[3,2]", "[?,?,?]");
INFER_OK(op, "?;?", "[?,?,?]");
}
}
TEST(ArrayOpsTest, PadV2_ShapeFn) {
ShapeInferenceTestOp op("PadV2");
op.input_tensors.resize(3);
INFER_OK(op, "?;?;?", "?");
INFER_ERROR("Shape must be rank 2 but is rank 3", op, "?;[1,2,3];?");
INFER_ERROR("Dimension must be 2 but is 4", op, "?;[1,4];?");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];[4,2];[]");
INFER_OK(op, "[1,2,3];?;[]", "[?,?,?]");
INFER_OK(op, "?;[3,2];[]", "[?,?,?]");
Tensor paddings_t(DT_INT64, TensorShape{3, 2});
test::FillValues<int64_t>(&paddings_t, {1, 10, 2, 20, 3, 30});
op.input_tensors[1] = &paddings_t;
INFER_OK(op, "[100,200,300];[3,2];[]", "[111,222,333]");
INFER_OK(op, "[100,?,300];[3,2];[]", "[111,?,333]");
INFER_OK(op, "?;[3,2];[]", "[?,?,?]");
INFER_OK(op, "?;?;[]", "[?,?,?]");
}
TEST(ArrayOpsTest, MirrorPadGrad_ShapeFn) {
ShapeInferenceTestOp op("MirrorPadGrad");
op.input_tensors.resize(2);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "?;[?,4]", "?");
INFER_ERROR("must be rank 3 but is rank 2", op, "[?,?];[3,2]");
INFER_ERROR("Dimension 1 in both shapes must be equal, but are 3 and 2", op,
"[?,?,?];[3,3]");
INFER_OK(op, "[?,?,?];[3,2]", "[?,?,?]");
Tensor paddings_t(DT_INT64, TensorShape{3, 2});
test::FillValues<int64_t>(&paddings_t, {1, 10, 2, 20, 3, 30});
op.input_tensors[1] = &paddings_t;
INFER_OK(op, "[111,222,333];[3,2]", "[100,200,300]");
INFER_OK(op, "[111,?,333];[3,2]", "[100,?,300]");
}
TEST(ArrayOpsTest, BroadcastArgs_ShapeFn) {
ShapeInferenceTestOp op("BroadcastArgs");
INFER_OK(op, "?;?", "[?]");
INFER_OK(op, "[123];[1]", "[123]");
INFER_OK(op, "[1];[123]", "[123]");
INFER_OK(op, "[123];[121]", "[123]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];?");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "?;[]");
}
TEST(ArrayOpsTest, BroadcastTo_ShapeFn) {
ShapeInferenceTestOp op("BroadcastTo");
op.input_tensors.resize(2);
INFER_OK(op, "?;[?]", "?");
INFER_OK(op, "[];[1]", "[?]");
INFER_OK(op, "[1];[1]", "[?]");
INFER_OK(op, "[1];[2]", "[?,?]");
INFER_OK(op, "[2,2];[3]", "[?,d0_0,d0_1]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[?,?]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[2];[]");
INFER_ERROR("Shape must be at most rank 1 but is rank 2", op, "[2,2];[1]");
Tensor shape_t(DT_INT64, TensorShape{3});
test::FillValues<int64_t>(&shape_t, {2, 10, 3});
op.input_tensors[1] = &shape_t;
INFER_OK(op, "[1,?,1];[3]", "[2,10,3]");
INFER_OK(op, "[1,1,1];[3]", "[2,10,3]");
INFER_OK(op, "[10,1];[3]", "[2,d0_0,3]");
INFER_ERROR("Dimensions must be equal, but are 3 and 2 for", op,
"[3,1,1];[3]");
INFER_ERROR("Dimensions must be equal, but are 2 and 10 for", op,
"[2,2,1];[3]");
}
TEST(ArrayOpsTest, BroadcastGradientArgs_ShapeFn) {
ShapeInferenceTestOp op("BroadcastGradientArgs");
INFER_OK(op, "?;?", "[?];[?]");
INFER_OK(op, "[123];[456]", "[?];[?]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];?");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "?;[]");
}
TEST(ArrayOpsTest, ListDiff_ShapeFn) {
ShapeInferenceTestOp op("BroadcastGradientArgs");
INFER_OK(op, "?;?", "[?];[?]");
INFER_OK(op, "[123];[456]", "[?];[?]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];?");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "?;[]");
}
TEST(ArrayOpsTest, MatrixSetDiag_ShapeFn) {
ShapeInferenceTestOp op("MatrixSetDiag");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1];?");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "?;[]");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[2,2];[]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[2,2];[2,2]");
INFER_ERROR("Dimensions must be equal, but are 2 and 3", op, "[2,3];[3]");
INFER_OK(op, "?;?", "in0");
INFER_OK(op, "[1,2,2];[1,2]", "in0");
INFER_OK(op, "[1,2,3];?", "in0");
INFER_OK(op, "[1,3,2];?", "in0");
INFER_OK(op, "[1,?,2];[?,?]", "in0");
INFER_OK(op, "[1,?,?];[?,2]", "in0");
INFER_OK(op, "?;[1,2]", "[d1_0,?,?]");
INFER_OK(op, "[?,?,3];[1,2]", "[d1_0,d0_1,d0_2]");
INFER_OK(op, "[?,3,?];[1,2]", "[d1_0,d0_1,d0_2]");
INFER_OK(op, "[?,3,2];[1,2]", "[d1_0,d0_1,d0_2]");
}
TEST(ArrayOpsTest, ExpandDims_ShapeFn) {
ShapeInferenceTestOp op("ExpandDims");
op.input_tensors.resize(2);
INFER_OK(op, "?;?", "?");
Tensor dim_t;
op.input_tensors[1] = &dim_t;
for (int32_t idx : {0, -4}) {
dim_t = test::AsScalar<int32>(idx);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[5,?,7];?", "[1,d0_0,d0_1,d0_2]");
}
for (int32_t idx : {1, -3}) {
dim_t = test::AsScalar<int32>(idx);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[5,?,7];?", "[d0_0,1,d0_1,d0_2]");
dim_t = test::AsScalar<int64_t>(idx);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[5,?,7];?", "[d0_0,1,d0_1,d0_2]");
}
for (int32_t idx : {2, -2}) {
dim_t = test::AsScalar<int32>(idx);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[5,?,7];?", "[d0_0,d0_1,1,d0_2]");
dim_t = test::AsScalar<int64_t>(idx);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[5,?,7];?", "[d0_0,d0_1,1,d0_2]");
}
for (int32_t idx : {3, -1}) {
dim_t = test::AsScalar<int32>(idx);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[5,?,7];?", "[d0_0,d0_1,d0_2,1]");
dim_t = test::AsScalar<int64_t>(idx);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[5,?,7];?", "[d0_0,d0_1,d0_2,1]");
}
for (int32_t idx : {4, -5}) {
dim_t = test::AsScalar<int32>(idx);
INFER_ERROR("not in the interval [-4, 3]", op, "[5,?,7];?");
dim_t = test::AsScalar<int64_t>(idx);
INFER_ERROR("not in the interval [-4, 3]", op, "[5,?,7];?");
}
std::vector<int32> dims;
dims.push_back(0);
dim_t = test::AsTensor<int32>(dims);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[5,?,7];?", "[1,d0_0,d0_1,d0_2]");
dims.push_back(1);
dim_t = test::AsTensor<int32>(dims);
INFER_ERROR("'dim' input must be a tensor with a single", op, "?;?");
INFER_ERROR("'dim' input must be a tensor with a single", op, "[5,6,7];?");
dim_t = test::AsScalar<int32>(0);
INFER_OK(op, "[2];[]", "[1,d0_0]");
dim_t = test::AsScalar<int32>(1);
INFER_OK(op, "[2];[]", "[d0_0,1]");
dim_t = test::AsScalar<int32>(-1);
INFER_OK(op, "[2];[]", "[d0_0,1]");
}
TEST(ArrayOpsTest, ImmutableConst_ShapeFn) {
ShapeInferenceTestOp op("ImmutableConst");
TF_ASSERT_OK(NodeDefBuilder("test", "ImmutableConst")
.Attr("dtype", DT_FLOAT)
.Attr("shape", TensorShape({1, 2, 3}))
.Attr("memory_region_name", "test_region")
.Finalize(&op.node_def));
INFER_OK(op, "", "[1,2,3]");
TF_ASSERT_OK(NodeDefBuilder("test", "ImmutableConst")
.Attr("dtype", DT_FLOAT)
.Attr("shape", TensorShape({}))
.Attr("memory_region_name", "test_region")
.Finalize(&op.node_def));
INFER_OK(op, "", "[]");
TF_ASSERT_OK(NodeDefBuilder("test", "ImmutableConst")
.Attr("dtype", DT_FLOAT)
.Attr("shape", "invalid")
.Attr("memory_region_name", "test_region")
.Finalize(&op.node_def));
INFER_ERROR("AttrValue had value with type 'string' when 'shape' expected",
op, "");
}
TEST(ArrayOpsTest, Concat_ShapeFn) {
ShapeInferenceTestOp op("Concat");
auto set_n = [&op](int n) {
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.reserve(n);
for (int i = 0; i < n; ++i) src_list.emplace_back("a", 0, DT_FLOAT);
TF_ASSERT_OK(NodeDefBuilder("test", "Concat")
.Input({"concat_dim", 0, DT_INT32})
.Input(src_list)
.Attr("n", n)
.Finalize(&op.node_def));
};
set_n(2);
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1];?;?");
set_n(7);
INFER_OK(op, "?;?;?;?;[1,2,3];?;[3,2,1];?", "[?,?,?]");
set_n(4);
INFER_OK(op, "?;?;?;[1,2,3,4];[4,3,2,1]", "[?,?,?,?]");
INFER_OK(op, "?;?;?;?;?", "?");
INFER_ERROR("Can't concatenate scalars (use tf.stack instead)", op,
"?;?;?;[];[]");
INFER_ERROR("Shape must be rank 2 but is rank 3", op, "?;?;?;[1,2];[1,2,3]");
Tensor concat_dim_t;
op.input_tensors.push_back(&concat_dim_t);
set_n(2);
for (int concat_dim : {0, -3}) {
concat_dim_t = test::AsScalar(concat_dim);
INFER_OK(op, "[];[100,2,?];[10,?,3]", "[110,d1_1,d2_2]");
INFER_ERROR("Dimension 1 in both shapes must be equal, but are 5 and 3", op,
"[];[100,2,5];[10,?,3]");
INFER_OK(op, "[];[100,2,?];[?,?,3]", "[?,d1_1,d2_2]");
INFER_OK(op, "[];[?,2,?];[10,?,3]", "[?,d1_1,d2_2]");
}
for (bool use_negative : {false, true}) {
concat_dim_t = test::AsScalar(use_negative ? -2 : 1);
INFER_OK(op, "[];[1,100,?];[?,10,3]", "[d1_0,110,d2_2]");
concat_dim_t = test::AsScalar(use_negative ? -1 : 1);
INFER_OK(op, "[];[1,100];[?,10]", "[d1_0,110]");
INFER_OK(op, "[];[?,100];[1,10]", "[d2_0,110]");
concat_dim_t = test::AsScalar(use_negative ? -2 : 1);
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[];[100];[10,?]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[];[100,5];[10]");
}
concat_dim_t = test::AsScalar(-2);
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[];[100];[10,?]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[];[100,5];[10]");
set_n(5);
concat_dim_t = test::AsScalar(1);
INFER_OK(op, "[];?;[1,100,?];[?,?,?];[?,10,3];?", "[d2_0,?,d4_2]");
}
TEST(ArrayOpsTest, ConcatV2_ShapeFn) {
ShapeInferenceTestOp op("ConcatV2");
auto set_n = [&op](int n) {
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.reserve(n);
for (int i = 0; i < n; ++i) src_list.emplace_back("a", 0, DT_FLOAT);
TF_ASSERT_OK(NodeDefBuilder("test", "ConcatV2")
.Input(src_list)
.Input({"axis", 0, DT_INT32})
.Attr("n", n)
.Finalize(&op.node_def));
};
set_n(2);
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;[1]");
set_n(7);
INFER_OK(op, "?;?;?;?;[1,2,3];?;[3,2,1];?", "[?,?,?]");
set_n(4);
INFER_OK(op, "?;?;[1,2,3,4];[4,3,2,1];?", "[?,?,?,?]");
INFER_OK(op, "?;?;?;?;?", "?");
INFER_ERROR("Can't concatenate scalars (use tf.stack instead)", op,
"?;?;[];[];?");
INFER_ERROR("Shape must be rank 2 but is rank 3", op, "?;?;[1,2];[1,2,3];?");
Tensor concat_dim_t;
op.input_tensors.resize(3);
op.input_tensors[2] = &concat_dim_t;
set_n(2);
concat_dim_t = test::AsScalar(0);
INFER_OK(op, "[100,2,?];[10,?,3];[]", "[110,d0_1,d1_2]");
INFER_ERROR("Dimension 1 in both shapes must be equal, but are 5 and 3", op,
"[100,2,5];[10,?,3];[]");
INFER_OK(op, "[100,2,?];[?,?,3];[]", "[?,d0_1,d1_2]");
INFER_OK(op, "[?,2,?];[10,?,3];[]", "[?,d0_1,d1_2]"); |
1,257 | cpp | tensorflow/tensorflow | io_ops | tensorflow/core/ops/io_ops.cc | tensorflow/core/ops/io_ops_test.cc | #ifndef TENSORFLOW_C_EXPERIMENTAL_OPS_IO_OPS_H_
#define TENSORFLOW_C_EXPERIMENTAL_OPS_IO_OPS_H_
#include "tensorflow/c/eager/abstract_context.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
namespace tensorflow {
namespace ops {
Status RestoreV2(AbstractContext* ctx, AbstractTensorHandle* const prefix,
AbstractTensorHandle* const tensor_names,
AbstractTensorHandle* const shape_and_slices,
absl::Span<AbstractTensorHandle*> tensors,
absl::Span<DataType> dtypes, const char* name = nullptr,
const char* raw_device_name = nullptr);
Status SaveV2(AbstractContext* ctx, AbstractTensorHandle* const prefix,
AbstractTensorHandle* const tensor_names,
AbstractTensorHandle* const shape_and_slices,
absl::Span<AbstractTensorHandle* const> tensors,
const char* name = nullptr,
const char* raw_device_name = nullptr);
}
}
#endif
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/util/saved_tensor_slice_util.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
namespace {
Status ScalarInputsAndOutputs(InferenceContext* c) {
ShapeHandle unused;
for (int i = 0; i < c->num_inputs(); ++i) {
TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 0, &unused));
}
for (int i = 0; i < c->num_outputs(); ++i) {
c->set_output(i, c->Scalar());
}
return absl::OkStatus();
}
Status TwoElementVectorAndScalarOutputs(InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_handle;
for (int i = 0; i < c->num_inputs(); ++i) {
TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_handle));
}
for (int i = 0; i < c->num_outputs(); ++i) {
c->set_output(i, c->Scalar());
}
return absl::OkStatus();
}
Status TwoElementOutput(InferenceContext* c) {
c->set_output(0, c->Vector(2));
return absl::OkStatus();
}
}
REGISTER_OP("SaveV2")
.Input("prefix: string")
.Input("tensor_names: string")
.Input("shape_and_slices: string")
.Input("tensors: dtypes")
.Attr("dtypes: list(type)")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
ShapeHandle s;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
for (int i = 1; i <= 2; ++i) {
TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 1, &s));
TF_RETURN_IF_ERROR(
c->WithValue(c->Dim(s, 0), c->num_inputs() - 3, &unused_dim));
}
return absl::OkStatus();
});
REGISTER_OP("RestoreV2")
.Input("prefix: string")
.Input("tensor_names: string")
.Input("shape_and_slices: string")
.Output("tensors: dtypes")
.Attr("dtypes: list(type)")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle shape0, shape1, shape2;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &shape0));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &shape1));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &shape2));
TF_RETURN_IF_ERROR(c->Merge(shape1, shape2, &shape0));
const Tensor* shape_and_slices_tensor = c->input_tensor(2);
if (shape_and_slices_tensor) {
if (shape_and_slices_tensor->dtype() != DT_STRING) {
return errors::InvalidArgument(
"Expected an input tensor of type string.");
}
const auto& shape_and_slices_flat =
shape_and_slices_tensor->flat<tstring>();
if (shape_and_slices_flat.size() != c->num_outputs()) {
return errors::InvalidArgument(
"The number of shape_and_slice doesn't match tensor outputs.");
}
for (int i = 0; i < shape_and_slices_flat.size(); ++i) {
const string& shape_and_slice = shape_and_slices_flat(i);
if (shape_and_slice.empty()) {
c->set_output(i, c->UnknownShape());
continue;
}
TensorShape parsed_full_shape;
TensorSlice parsed_slice;
TensorShape parsed_slice_shape;
TF_RETURN_IF_ERROR(checkpoint::ParseShapeAndSlice(
shape_and_slice, &parsed_full_shape, &parsed_slice,
&parsed_slice_shape));
ShapeHandle shape_handle;
TF_RETURN_IF_ERROR(
c->MakeShapeFromTensorShape(parsed_slice_shape, &shape_handle));
c->set_output(i, shape_handle);
}
return absl::OkStatus();
} else {
return UnknownShape(c);
}
});
REGISTER_OP("MergeV2Checkpoints")
.Input("checkpoint_prefixes: string")
.Input("destination_prefix: string")
.Attr("delete_old_dirs: bool = true")
.Attr("allow_missing_files: bool = false")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
return absl::OkStatus();
});
REGISTER_OP("Save")
.Input("filename: string")
.Input("tensor_names: string")
.Input("data: T")
.Attr("T: list(type)")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
ShapeHandle s;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &s));
TF_RETURN_IF_ERROR(
c->WithValue(c->Dim(s, 0), c->num_inputs() - 2, &unused_dim));
return absl::OkStatus();
});
REGISTER_OP("SaveSlices")
.Input("filename: string")
.Input("tensor_names: string")
.Input("shapes_and_slices: string")
.Input("data: T")
.Attr("T: list(type)")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
ShapeHandle s;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
for (int i = 1; i <= 2; ++i) {
TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 1, &s));
TF_RETURN_IF_ERROR(
c->WithValue(c->Dim(s, 0), c->num_inputs() - 3, &unused_dim));
}
return absl::OkStatus();
});
REGISTER_OP("Restore")
.Input("file_pattern: string")
.Input("tensor_name: string")
.Output("tensor: dt")
.Attr("dt: type")
.Attr("preferred_shard: int = -1")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
});
REGISTER_OP("RestoreSlice")
.Input("file_pattern: string")
.Input("tensor_name: string")
.Input("shape_and_slice: string")
.Output("tensor: dt")
.Attr("dt: type")
.Attr("preferred_shard: int = -1")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused));
const Tensor* shape_and_slices_tensor = c->input_tensor(2);
if (shape_and_slices_tensor) {
const auto& shape_and_slice =
shape_and_slices_tensor->flat<tstring>()(0);
if (shape_and_slice.empty()) {
c->set_output(0, c->UnknownShape());
} else {
TensorShape parsed_full_shape;
TensorSlice parsed_slice;
TensorShape parsed_slice_shape;
TF_RETURN_IF_ERROR(checkpoint::ParseShapeAndSlice(
shape_and_slice, &parsed_full_shape, &parsed_slice,
&parsed_slice_shape));
ShapeHandle shape_handle;
TF_RETURN_IF_ERROR(
c->MakeShapeFromTensorShape(parsed_slice_shape, &shape_handle));
c->set_output(0, shape_handle);
}
} else {
c->set_output(0, c->UnknownShape());
}
return absl::OkStatus();
});
REGISTER_OP("ShardedFilename")
.Input("basename: string")
.Input("shard: int32")
.Input("num_shards: int32")
.Output("filename: string")
.SetShapeFn(ScalarInputsAndOutputs);
REGISTER_OP("ShardedFilespec")
.Input("basename: string")
.Input("num_shards: int32")
.Output("filename: string")
.SetShapeFn(ScalarInputsAndOutputs);
REGISTER_OP("WholeFileReader")
.Output("reader_handle: Ref(string)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(TwoElementOutput);
REGISTER_OP("WholeFileReaderV2")
.Output("reader_handle: resource")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("TextLineReader")
.Output("reader_handle: Ref(string)")
.Attr("skip_header_lines: int = 0")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(TwoElementOutput)
.Deprecated(26, "Use TextLineReaderV2");
REGISTER_OP("TextLineReaderV2")
.Output("reader_handle: resource")
.Attr("skip_header_lines: int = 0")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("FixedLengthRecordReader")
.Output("reader_handle: Ref(string)")
.Attr("header_bytes: int = 0")
.Attr("record_bytes: int")
.Attr("footer_bytes: int = 0")
.Attr("hop_bytes: int = 0")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(TwoElementOutput)
.Deprecated(26, "Use FixedLengthRecordReaderV2");
REGISTER_OP("FixedLengthRecordReaderV2")
.Output("reader_handle: resource")
.Attr("header_bytes: int = 0")
.Attr("record_bytes: int")
.Attr("footer_bytes: int = 0")
.Attr("hop_bytes: int = 0")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("encoding: string = ''")
.SetIsStateful()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("TFRecordReader")
.Output("reader_handle: Ref(string)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("compression_type: string = ''")
.SetIsStateful()
.SetShapeFn(TwoElementOutput)
.Deprecated(26, "Use TFRecordReaderV2");
REGISTER_OP("TFRecordReaderV2")
.Output("reader_handle: resource")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("compression_type: string = ''")
.SetIsStateful()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("LMDBReader")
.Output("reader_handle: Ref(string)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(TwoElementOutput);
REGISTER_OP("IdentityReader")
.Output("reader_handle: Ref(string)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(TwoElementOutput)
.Deprecated(26, "Use IdentityReaderV2");
REGISTER_OP("IdentityReaderV2")
.Output("reader_handle: resource")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("ReaderRead")
.Input("reader_handle: Ref(string)")
.Input("queue_handle: Ref(string)")
.Output("key: string")
.Output("value: string")
.SetShapeFn(TwoElementVectorAndScalarOutputs);
REGISTER_OP("ReaderReadV2")
.Input("reader_handle: resource")
.Input("queue_handle: resource")
.Output("key: string")
.Output("value: string")
.SetShapeFn(ScalarInputsAndOutputs);
REGISTER_OP("ReaderReadUpTo")
.Input("reader_handle: Ref(string)")
.Input("queue_handle: Ref(string)")
.Input("num_records: int64")
.Output("keys: string")
.Output("values: string")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused));
ShapeHandle out = c->Vector(InferenceContext::kUnknownDim);
c->set_output(0, out);
c->set_output(1, out);
return absl::OkStatus();
});
REGISTER_OP("ReaderReadUpToV2")
.Input("reader_handle: resource")
.Input("queue_handle: resource")
.Input("num_records: int64")
.Output("keys: string")
.Output("values: string")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused));
ShapeHandle out = c->Vector(InferenceContext::kUnknownDim);
c->set_output(0, out);
c->set_output(1, out);
return absl::OkStatus();
});
REGISTER_OP("ReaderNumRecordsProduced")
.Input("reader_handle: Ref(string)")
.Output("records_produced: int64")
.SetShapeFn(TwoElementVectorAndScalarOutputs);
REGISTER_OP("ReaderNumRecordsProducedV2")
.Input("reader_handle: resource")
.Output("records_produced: int64")
.SetShapeFn(ScalarInputsAndOutputs);
REGISTER_OP("ReaderNumWorkUnitsCompleted")
.Input("reader_handle: Ref(string)")
.Output("units_completed: int64")
.SetShapeFn(TwoElementVectorAndScalarOutputs);
REGISTER_OP("ReaderNumWorkUnitsCompletedV2")
.Input("reader_handle: resource")
.Output("units_completed: int64")
.SetShapeFn(ScalarInputsAndOutputs);
REGISTER_OP("ReaderSerializeState")
.Input("reader_handle: Ref(string)")
.Output("state: string")
.SetShapeFn(TwoElementVectorAndScalarOutputs);
REGISTER_OP("ReaderSerializeStateV2")
.Input("reader_handle: resource")
.Output("state: string")
.SetShapeFn(ScalarInputsAndOutputs);
REGISTER_OP("ReaderRestoreState")
.Input("reader_handle: Ref(string)")
.Input("state: string")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &unused));
DimensionHandle unused_handle;
TF_RETURN_IF_ERROR(
c->WithValue(c->Dim(c->input(0), 0), 2, &unused_handle));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
return absl::OkStatus();
});
REGISTER_OP("ReaderRestoreStateV2")
.Input("reader_handle: resource")
.Input("state: string")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
return absl::OkStatus();
});
REGISTER_OP("ReaderReset")
.Input("reader_handle: Ref(string)")
.SetShapeFn(TwoElementVectorAndScalarOutputs);
REGISTER_OP("ReaderResetV2")
.Input("reader_handle: resource")
.SetShapeFn(ScalarInputsAndOutputs);
REGISTER_OP("ReadFile")
.Input("filename: string")
.Output("contents: string")
.SetShapeFn(ScalarInputsAndOutputs);
REGISTER_OP("WriteFile")
.Input("filename: string")
.Input("contents: string")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
return absl::OkStatus();
});
REGISTER_OP("MatchingFiles")
.Input("pattern: string")
.Output("filenames: string")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(0), 1, &unused));
c->set_output(0, c->Vector(InferenceContext::kUnknownDim));
return absl::OkStatus();
});
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(IoOpsTest, Save_ShapeFn) {
ShapeInferenceTestOp op("Save");
TF_ASSERT_OK(NodeDefBuilder("test", op.name)
.Input({"a", 0, DT_STRING})
.Input({"b", 0, DT_STRING})
.Input({{"c", 0, DT_FLOAT}, {"d", 0, DT_INT64}})
.Attr("T", {DT_FLOAT, DT_INT64})
.Finalize(&op.node_def));
INFER_OK(op, "?;?;?;?", "");
INFER_OK(op, "[];[2];?;?", "");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[];[2,3];?;?");
INFER_ERROR("Dimension must be 2 but is 3", op, "[];[3];?;?");
}
TEST(IoOpsTest, SaveSlices_ShapeFn) {
ShapeInferenceTestOp op("SaveSlices");
TF_ASSERT_OK(NodeDefBuilder("test", op.name)
.Input({"a", 0, DT_STRING})
.Input({"b", 0, DT_STRING})
.Input({"c", 0, DT_STRING})
.Input({{"d", 0, DT_FLOAT}, {"e", 0, DT_INT64}})
.Attr("T", {DT_FLOAT, DT_INT64})
.Finalize(&op.node_def));
INFER_OK(op, "?;?;?;?;?", "");
INFER_OK(op, "[];[2];[2];?;?", "");
INFER_OK(op, "[];[2];[2];[100,200,300];[4,5]", "");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];?;?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[];[2,3];?;?;?");
INFER_ERROR("Dimension must be 2 but is 3", op, "[];[3];?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[];[2];[2,3];?;?");
INFER_ERROR("Dimension must be 2 but is 3", op, "[];[2];[3];?;?");
}
TEST(IoOpsTest, Restore_ShapeFn) {
ShapeInferenceTestOp op("Restore");
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[];[]", "?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[];[?]");
}
TEST(IoOpsTest, RestoreV2_ShapeFn) {
ShapeInferenceTestOp op("RestoreV2");
TF_ASSERT_OK(NodeDefBuilder("test", op.name)
.Input({"prefix", 0, DT_STRING})
.Input({"tensor_names", 0, DT_STRING})
.Input({"shapes_and_slices", 0, DT_STRING})
.Attr("dtypes", {DT_FLOAT, DT_INT64})
.Finalize(&op.node_def));
INFER_OK(op, "?;?;?", "?;?");
INFER_OK(op, "[];[10];[10]", "?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];[?];[?]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[];[?,?];[?]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[];[?];[?,?]");
INFER_ERROR("in both shapes must be equal", op, "[];[10];[20]");
}
TEST(IoOpsTest, RestoreSlice_ShapeFn) {
ShapeInferenceTestOp op("RestoreSlice");
INFER_OK(op, "?;?;?", "?");
INFER_OK(op, "[];[];[]", "?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];[];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[];[?];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[];[];[?]");
}
TEST(IoOpsTest, ShardedFilename_ShapeFn) {
ShapeInferenceTestOp op("ShardedFilename");
INFER_OK(op, "?;?;?", "[]");
INFER_OK(op, "[];[];[]", "[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];[];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[];[?];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[];[];[?]");
}
TEST(IoOpsTest, ShardedFilespec_ShapeFn) {
ShapeInferenceTestOp op("ShardedFilespec");
INFER_OK(op, "?;?", "[]");
INFER_OK(op, "[];[]", "[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[];[?]");
}
TEST(IoOpsTest, SingleScalarInputAndOutput_ShapeFns) {
for (const char* op_name : {"ReadFile"}) {
ShapeInferenceTestOp op(op_name);
INFER_OK(op, "?", "[]");
INFER_OK(op, "[]", "[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?]");
}
}
TEST(IoOpsTest, TwoElementVectorInputsAndScalarOutput_ShapeFns) {
for (const char* op_name :
{"ReaderNumRecordsProduced", "ReaderNumWorkUnitsCompleted",
"ReaderSerializeState"}) {
ShapeInferenceTestOp op(op_name);
INFER_OK(op, "?", "[]");
INFER_OK(op, "[2]", "[]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[]");
INFER_ERROR("Dimension must be 2 but is 3", op, "[3]");
}
}
TEST(IoOpsTest, ReaderRead_ShapeFn) {
ShapeInferenceTestOp op("ReaderRead");
INFER_OK(op, "?;?", "[];[]");
INFER_OK(op, "[2];[?]", "[];[]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[?,?];[2]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[2];[]");
}
TEST(IoOpsTest, ReaderReadUpTo_ShapeFn) {
ShapeInferenceTestOp op("ReaderReadUpTo");
INFER_OK(op, "[2];[2];[]", "[?];[?]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];[2];[]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[2];[];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[2];[2];[?]");
}
TEST(IoOpsTest, ReaderReset_ShapeFn) {
ShapeInferenceTestOp op("ReaderReset");
INFER_OK(op, "[2]", "");
INFER_OK(op, "[?]", "");
INFER_OK(op, "?", "");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[]");
}
TEST(IoOpsTest, ReaderRestoreState_ShapeFn) {
ShapeInferenceTestOp op("ReaderRestoreState");
INFER_OK(op, "?;?", "");
INFER_OK(op, "[2];[]", "");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];[?]");
}
TEST(IoOpsTest, MatchingFiles_ShapeFn) {
ShapeInferenceTestOp op("MatchingFiles");
INFER_OK(op, "?", "[?]");
INFER_OK(op, "[]", "[?]");
INFER_OK(op, "[42]", "[?]");
INFER_ERROR("Shape must be at most rank 1 but is rank 2", op, "[?,?]");
}
} |
1,258 | cpp | tensorflow/tensorflow | case_format | tensorflow/c/experimental/ops/gen/common/case_format.cc | tensorflow/c/experimental/ops/gen/common/case_format_test.cc | #ifndef TENSORFLOW_C_EXPERIMENTAL_OPS_GEN_COMMON_CASE_FORMAT_H_
#define TENSORFLOW_C_EXPERIMENTAL_OPS_GEN_COMMON_CASE_FORMAT_H_
#include "tensorflow/core/platform/str_util.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace generator {
string toLowerCamel(const string &s, const char delimiter = '_');
string toLowerSnake(const string &s, const char delimiter = '_');
string toUpperCamel(const string &s, const char delimiter = '_');
string toUpperSnake(const string &s, const char delimiter = '_');
}
}
#endif
#include "tensorflow/c/experimental/ops/gen/common/case_format.h"
#include "tensorflow/core/platform/str_util.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace generator {
namespace {
enum CaseFormatType {
LOWER_CAMEL,
UPPER_CAMEL,
LOWER_SNAKE,
UPPER_SNAKE,
};
string FormatStringCase(const string &str, CaseFormatType to,
const char delimiter = '_') {
const bool from_snake =
(str == str_util::Uppercase(str)) || (str == str_util::Lowercase(str));
const bool toUpper = (to == UPPER_CAMEL || to == UPPER_SNAKE);
const bool toSnake = (to == LOWER_SNAKE || to == UPPER_SNAKE);
string result;
bool inputStart = true;
bool wordStart = true;
for (const char c : str) {
if (c == delimiter) {
if (wordStart) {
result.push_back(delimiter);
}
wordStart = true;
continue;
}
if (!from_snake && isupper(c)) {
wordStart = true;
}
if (wordStart && toSnake && !inputStart) {
result.push_back(delimiter);
}
const bool shouldCapIfSnake = toUpper;
const bool shouldCapIfCamel = wordStart && (toUpper || !inputStart);
if ((toSnake && shouldCapIfSnake) || (!toSnake && shouldCapIfCamel)) {
result += toupper(c);
} else {
result += tolower(c);
}
wordStart = false;
inputStart = false;
}
if (wordStart) {
result.push_back(delimiter);
}
return result;
}
}
string toLowerCamel(const string &s, const char delimiter) {
return FormatStringCase(s, LOWER_CAMEL, delimiter);
}
string toLowerSnake(const string &s, const char delimiter) {
return FormatStringCase(s, LOWER_SNAKE, delimiter);
}
string toUpperCamel(const string &s, const char delimiter) {
return FormatStringCase(s, UPPER_CAMEL, delimiter);
}
string toUpperSnake(const string &s, const char delimiter) {
return FormatStringCase(s, UPPER_SNAKE, delimiter);
}
}
} | #include "tensorflow/c/experimental/ops/gen/common/case_format.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace generator {
namespace {
struct Variations {
string lower_camel;
string lower_snake;
string upper_camel;
string upper_snake;
};
void TestSingleVariation(const string &str, Variations expected,
char delimiter = '_') {
EXPECT_EQ(expected.lower_camel, toLowerCamel(str, delimiter));
EXPECT_EQ(expected.lower_snake, toLowerSnake(str, delimiter));
EXPECT_EQ(expected.upper_camel, toUpperCamel(str, delimiter));
EXPECT_EQ(expected.upper_snake, toUpperSnake(str, delimiter));
}
void TestAllVariations(Variations variations, char delimiter = '_') {
TestSingleVariation(variations.lower_camel, variations, delimiter);
TestSingleVariation(variations.lower_snake, variations, delimiter);
TestSingleVariation(variations.upper_camel, variations, delimiter);
TestSingleVariation(variations.upper_snake, variations, delimiter);
}
TEST(CppOpGenCaseFormat, test_single_word) {
TestAllVariations(Variations{
"three",
"three",
"Three",
"THREE",
});
}
TEST(CppOpGenCaseFormat, test_complex_string) {
TestAllVariations(Variations{
"threeNTest33Words",
"three_n_test33_words",
"ThreeNTest33Words",
"THREE_N_TEST33_WORDS",
});
}
TEST(CppOpGenCaseFormat, test_hyphen_delimiter) {
TestAllVariations(
Variations{
"threeNTest33Words",
"three-n-test33-words",
"ThreeNTest33Words",
"THREE-N-TEST33-WORDS",
},
'-');
}
TEST(CppOpGenCaseFormat, test_trailing_underscore) {
TestAllVariations(Variations{
"threeNTest33Words_",
"three_n_test33_words_",
"ThreeNTest33Words_",
"THREE_N_TEST33_WORDS_",
});
}
TEST(CppOpGenCaseFormat, test_double_trailing_underscores) {
TestAllVariations(Variations{
"xxY__",
"xx_y__",
"XxY__",
"XX_Y__",
});
}
TEST(CppOpGenCaseFormat, test_leading_underscore) {
TestAllVariations(Variations{
"_threeNTest33Words",
"_three_n_test33_words",
"_ThreeNTest33Words",
"_THREE_N_TEST33_WORDS",
});
}
TEST(CppOpGenCaseFormat, test_double_leading_underscores) {
TestAllVariations(Variations{
"__threeNTest33Words",
"__three_n_test33_words",
"__ThreeNTest33Words",
"__THREE_N_TEST33_WORDS",
});
}
TEST(CppOpGenCaseFormat, test_leading_and_trailing_underscores) {
TestAllVariations(Variations{
"__threeNTest33Words____",
"__three_n_test33_words____",
"__ThreeNTest33Words____",
"__THREE_N_TEST33_WORDS____",
});
}
}
}
} |
1,259 | cpp | tensorflow/tensorflow | cpp_generator | tensorflow/c/experimental/ops/gen/cpp/cpp_generator.cc | tensorflow/c/experimental/ops/gen/cpp/cpp_generator_test.cc | #ifndef TENSORFLOW_C_EXPERIMENTAL_OPS_GEN_CPP_CPP_GENERATOR_H_
#define TENSORFLOW_C_EXPERIMENTAL_OPS_GEN_CPP_CPP_GENERATOR_H_
#include "tensorflow/c/experimental/ops/gen/common/controller.h"
#include "tensorflow/c/experimental/ops/gen/cpp/renderers/cpp_config.h"
#include "tensorflow/c/experimental/ops/gen/cpp/renderers/renderer_context.h"
namespace tensorflow {
namespace generator {
class CppGenerator {
public:
explicit CppGenerator(cpp::CppConfig cpp_config, PathConfig path_config);
SourceCode HeaderFileContents() const;
SourceCode SourceFileContents() const;
string HeaderFileName() const;
string SourceFileName() const;
void WriteHeaderFile() const;
void WriteSourceFile() const;
private:
SourceCode GenerateOneFile(cpp::RendererContext::Mode mode) const;
Controller controller_;
cpp::CppConfig cpp_config_;
PathConfig path_config_;
};
}
}
#endif
#include "tensorflow/c/experimental/ops/gen/cpp/cpp_generator.h"
#include "tensorflow/c/experimental/ops/gen/cpp/renderers/cpp_file_renderer.h"
#include "tensorflow/core/lib/io/path.h"
namespace tensorflow {
namespace generator {
CppGenerator::CppGenerator(cpp::CppConfig cpp_config, PathConfig path_config)
: controller_(path_config),
cpp_config_(cpp_config),
path_config_(path_config) {}
SourceCode CppGenerator::GenerateOneFile(
cpp::RendererContext::Mode mode) const {
SourceCode generated_code;
const std::vector<OpSpec> ops(controller_.GetModelOps());
std::vector<cpp::OpView> views(ops.begin(), ops.end());
cpp::RendererContext context{mode, generated_code, cpp_config_, path_config_};
cpp::CppFileRenderer(context, views).Render();
return generated_code;
}
SourceCode CppGenerator::HeaderFileContents() const {
return GenerateOneFile(cpp::RendererContext::kHeader);
}
SourceCode CppGenerator::SourceFileContents() const {
return GenerateOneFile(cpp::RendererContext::kSource);
}
string CppGenerator::HeaderFileName() const {
return io::JoinPath(path_config_.output_path, cpp_config_.unit + "_ops.h");
}
string CppGenerator::SourceFileName() const {
return io::JoinPath(path_config_.output_path, cpp_config_.unit + "_ops.cc");
}
void CppGenerator::WriteHeaderFile() const {
controller_.WriteFile(HeaderFileName(), HeaderFileContents());
}
void CppGenerator::WriteSourceFile() const {
controller_.WriteFile(SourceFileName(), SourceFileContents());
}
}
} | #include "tensorflow/c/experimental/ops/gen/cpp/cpp_generator.h"
#include <algorithm>
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace generator {
namespace {
TEST(CppGeneratorTest, typical_usage) {
string category = "testing";
string name_space = "tensorflow::ops";
string output_dir = "tensorflow/c/experimental/ops/gen/cpp/golden";
string source_dir = "tensorflow";
string api_dirs = "";
std::vector<string> ops = {
"Neg",
"MatMul",
"IdentityN",
"SparseSoftmaxCrossEntropyWithLogits",
"AccumulatorApplyGradient",
"VarHandleOp",
"RestoreV2",
};
cpp::CppConfig cpp_config(category, name_space);
PathConfig controller_config(output_dir, source_dir, api_dirs, ops);
CppGenerator generator(cpp_config, controller_config);
Env *env = Env::Default();
string golden_dir = io::JoinPath(testing::TensorFlowSrcRoot(),
controller_config.tf_output_dir);
string generated_header = generator.HeaderFileContents().Render();
string generated_source = generator.SourceFileContents().Render();
string expected_header;
string header_file_name = io::JoinPath(golden_dir, "testing_ops.h.golden");
TF_CHECK_OK(ReadFileToString(env, header_file_name, &expected_header));
string expected_source;
string source_file_name = io::JoinPath(golden_dir, "testing_ops.cc.golden");
TF_CHECK_OK(ReadFileToString(env, source_file_name, &expected_source));
expected_header.erase(
std::remove(expected_header.begin(), expected_header.end(), '\r'),
expected_header.end());
expected_source.erase(
std::remove(expected_source.begin(), expected_source.end(), '\r'),
expected_source.end());
EXPECT_EQ(expected_header, generated_header);
EXPECT_EQ(expected_source, generated_source);
}
}
}
} |
1,260 | cpp | tensorflow/tensorflow | renderer | tensorflow/c/experimental/ops/gen/cpp/renderers/renderer.cc | tensorflow/c/experimental/ops/gen/cpp/renderers/renderer_test.cc | #ifndef TENSORFLOW_C_EXPERIMENTAL_OPS_GEN_CPP_RENDERERS_RENDERER_H_
#define TENSORFLOW_C_EXPERIMENTAL_OPS_GEN_CPP_RENDERERS_RENDERER_H_
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "tensorflow/c/experimental/ops/gen/cpp/renderers/renderer_context.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace generator {
namespace cpp {
class Renderer {
public:
explicit Renderer(RendererContext context);
protected:
Renderer &BlankLine();
Renderer &CodeLine(const string &text);
template <typename... Args>
Renderer CodeLine(absl::string_view text, const Args &...args) {
return CodeLine(absl::Substitute(text, args...));
}
Renderer &CodeLines(const string &text);
template <typename... Args>
Renderer CodeLines(absl::string_view text, const Args &...args) {
return CodeLines(absl::Substitute(text, args...));
}
Renderer &Statement(const string &text);
template <typename... Args>
Renderer Statement(absl::string_view text, const Args &...args) {
return Statement(absl::Substitute(text, args...));
}
Renderer &TFStatement(const string &text);
template <typename... Args>
Renderer TFStatement(absl::string_view text, const Args &...args) {
return TFStatement(absl::Substitute(text, args...));
}
Renderer &CommentLine(const string &text = "");
template <typename... Args>
Renderer CommentLine(absl::string_view text, const Args &...args) {
return CommentLine(absl::Substitute(text, args...));
}
Renderer &BlockOpen(const string &text);
template <typename... Args>
Renderer BlockOpen(absl::string_view text, const Args &...args) {
return BlockOpen(absl::Substitute(text, args...));
}
Renderer &BlockClose(const string &text = "");
template <typename... Args>
Renderer BlockClose(absl::string_view text, const Args &...args) {
return BlockClose(absl::Substitute(text, args...));
}
protected:
RendererContext context_;
};
}
}
}
#endif
#include "tensorflow/c/experimental/ops/gen/cpp/renderers/renderer.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/substitute.h"
#include "tensorflow/c/experimental/ops/gen/cpp/renderers/renderer_context.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/stringpiece.h"
namespace tensorflow {
namespace generator {
namespace cpp {
Renderer::Renderer(RendererContext context) : context_(context) {}
Renderer& Renderer::BlankLine() {
context_.code.AddLineWithoutIndent("");
return *this;
}
Renderer& Renderer::CodeLine(const string& text) {
context_.code.AddLineWithoutIndent(text);
return *this;
}
Renderer& Renderer::CodeLines(const string& text) {
StringPiece trimmed_text(text);
str_util::RemoveWhitespaceContext(&trimmed_text);
for (const string& line : str_util::Split(trimmed_text, '\n')) {
context_.code.AddLineWithoutIndent(line);
}
return *this;
}
Renderer& Renderer::Statement(const string& text) {
if (str_util::EndsWith(text, ";")) {
LOG(WARNING) << "Superfluous terminating ';' in '" << text << "'";
context_.code.AddLineWithIndent(text);
} else {
context_.code.AddLineWithIndent(absl::StrCat(text, ";"));
}
return *this;
}
Renderer& Renderer::TFStatement(const string& text) {
return Statement(absl::Substitute("TF_RETURN_IF_ERROR($0)", text));
}
Renderer& Renderer::CommentLine(const string& text) {
context_.code.AddLineWithIndent(absl::StrCat("
return *this;
}
Renderer& Renderer::BlockOpen(const string& text) {
context_.code.AddLineWithIndent(absl::StrCat(text, " {"));
context_.code.IncreaseIndent();
return *this;
}
Renderer& Renderer::BlockClose(const string& text) {
context_.code.DecreaseIndent();
context_.code.AddLineWithIndent(absl::StrCat("}", text));
return *this;
}
}
}
} | #include "tensorflow/c/experimental/ops/gen/cpp/renderers/renderer.h"
#include "tensorflow/c/experimental/ops/gen/common/path_config.h"
#include "tensorflow/c/experimental/ops/gen/common/source_code.h"
#include "tensorflow/c/experimental/ops/gen/cpp/renderers/cpp_config.h"
#include "tensorflow/c/experimental/ops/gen/cpp/renderers/renderer_context.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace generator {
namespace cpp {
namespace {
TEST(Renderer, typical_usage) {
class TestRenderer : Renderer {
public:
explicit TestRenderer(SourceCode& code)
: Renderer(
{RendererContext::kSource, code, CppConfig(), PathConfig()}) {}
void Render() {
CommentLine("File level comment.");
CodeLine("#include \"header.h\"");
BlankLine();
BlockOpen("void TestFunction()");
{
Statement("int i = 1");
BlankLine();
BlockOpen("while (i == 1)");
{
CommentLine("Do nothing, really....");
CodeLine("#if 0");
Statement("call()");
CodeLine("#endif");
BlockClose();
}
BlockClose("
}
}
};
SourceCode code;
TestRenderer(code).Render();
string expected = R"(
#include "header.h"
void TestFunction() {
int i = 1;
while (i == 1) {
#if 0
call();
#endif
}
}
)";
code.SetSpacesPerIndent(3);
EXPECT_EQ(expected, code.Render());
}
}
}
}
} |
1,261 | cpp | tensorflow/tensorflow | nn_grad | tensorflow/cc/gradients/nn_grad.cc | tensorflow/cc/gradients/nn_grad_test.cc | #ifndef TENSORFLOW_C_EXPERIMENTAL_GRADIENTS_NN_GRAD_H_
#define TENSORFLOW_C_EXPERIMENTAL_GRADIENTS_NN_GRAD_H_
#include "tensorflow/c/eager/gradients.h"
namespace tensorflow {
namespace gradients {
GradientFunction* ReluRegisterer(const ForwardOperation& op);
GradientFunction* SparseSoftmaxCrossEntropyWithLogitsRegisterer(
const ForwardOperation& op);
GradientFunction* BiasAddRegisterer(const ForwardOperation& op);
}
}
#endif
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/nn_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/cc/framework/grad_op_registry.h"
#include "tensorflow/cc/framework/gradients.h"
namespace tensorflow {
namespace ops {
namespace {
Status SoftmaxGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto y = op.output(0);
auto dyy = Mul(scope, grad_inputs[0], y);
auto sum = Sum(scope, dyy, -1, Sum::KeepDims(true));
auto sub = Sub(scope, grad_inputs[0], sum);
auto dx = Mul(scope, sub, y);
grad_outputs->push_back(dx);
return scope.status();
}
REGISTER_GRADIENT_OP("Softmax", SoftmaxGrad);
bool IsZero(const Scope& scope, const Output& grad) {
string op_type_name = grad.op().node()->type_string();
if (op_type_name == "ZerosLike" || op_type_name == "Zeros") {
return true;
}
return false;
}
Output BroadcastMul(const Scope& scope, const Output& vec, const Output& mat) {
auto reshaped = ExpandDims(scope, vec, -1);
return Multiply(scope, reshaped, mat);
}
Status SoftmaxCrossEntropyWithLogitsGrad(const Scope& scope,
const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto logits = op.input(0);
auto softmax_grad = op.output(1);
auto grad_loss = grad_inputs[0];
auto grad_grad = grad_inputs[1];
auto grad = BroadcastMul(scope, grad_loss, softmax_grad);
if (!IsZero(scope, grad_grad)) {
std::vector<int> axis;
auto logits_softmax = Softmax(scope, logits);
auto grad_grad_expand = ExpandDims(scope, grad_grad, 1);
auto logits_softmax_expand = ExpandDims(scope, logits_softmax, 2);
auto matmul_result =
BatchMatMul(scope, grad_grad_expand, logits_softmax_expand);
axis.push_back(1);
auto squeeze_result = Squeeze(scope, matmul_result, Squeeze::Axis(axis));
auto subtraction_result = Subtract(scope, grad_grad, squeeze_result);
auto multiply_result = Multiply(scope, subtraction_result, logits_softmax);
grad = Add(scope, grad, multiply_result);
}
auto minus_log_softmax = Multiply(scope, LogSoftmax(scope, logits), -1.0f);
grad_outputs->push_back(grad);
grad_outputs->push_back(BroadcastMul(scope, grad_loss, minus_log_softmax));
return scope.status();
}
REGISTER_GRADIENT_OP("SoftmaxCrossEntropyWithLogits",
SoftmaxCrossEntropyWithLogitsGrad);
Status LogSoftmaxGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto softmax = Exp(scope, op.output(0));
auto sum = Sum(scope, grad_inputs[0], {1}, Sum::KeepDims(true));
auto mul = Mul(scope, sum, softmax);
auto dx = Sub(scope, grad_inputs[0], mul);
grad_outputs->push_back(dx);
return scope.status();
}
REGISTER_GRADIENT_OP("LogSoftmax", LogSoftmaxGrad);
Status ReluGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto dx = internal::ReluGrad(scope, grad_inputs[0], op.input(0));
grad_outputs->push_back(dx);
return scope.status();
}
REGISTER_GRADIENT_OP("Relu", ReluGradHelper);
Status Relu6GradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto dx = internal::Relu6Grad(scope, grad_inputs[0], op.input(0));
grad_outputs->push_back(dx);
return scope.status();
}
REGISTER_GRADIENT_OP("Relu6", Relu6GradHelper);
Status LeakyReluGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
float alpha;
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "alpha", &alpha));
internal::LeakyReluGrad::Attrs attrs;
auto dx = internal::LeakyReluGrad(scope, grad_inputs[0], op.input(0),
attrs.Alpha(alpha));
grad_outputs->push_back(dx);
return scope.status();
}
REGISTER_GRADIENT_OP("LeakyRelu", LeakyReluGradHelper);
Status LeakyReluGradGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
float alpha;
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "alpha", &alpha));
internal::LeakyReluGrad::Attrs attrs;
auto dx = internal::LeakyReluGrad(scope, grad_inputs[0], op.input(1),
attrs.Alpha(alpha));
grad_outputs->push_back(dx);
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("LeakyReluGrad", LeakyReluGradGradHelper);
Status EluGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto dx = internal::EluGrad(scope, grad_inputs[0], op.output(0));
grad_outputs->push_back(dx);
return scope.status();
}
REGISTER_GRADIENT_OP("Elu", EluGradHelper);
Status SeluGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto dx = internal::SeluGrad(scope, grad_inputs[0], op.output(0));
grad_outputs->push_back(dx);
return scope.status();
}
REGISTER_GRADIENT_OP("Selu", SeluGradHelper);
Status L2LossGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
grad_outputs->push_back(Mul(scope, op.input(0), grad_inputs[0]));
return scope.status();
}
REGISTER_GRADIENT_OP("L2Loss", L2LossGrad);
Status BiasAddGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
string data_format;
TF_RETURN_IF_ERROR(
GetNodeAttr(op.output(0).node()->attrs(), "data_format", &data_format));
auto dx_1 =
BiasAddGrad(scope, grad_inputs[0], BiasAddGrad::DataFormat(data_format));
grad_outputs->push_back(Identity(scope, grad_inputs[0]));
grad_outputs->push_back(dx_1);
return scope.status();
}
REGISTER_GRADIENT_OP("BiasAdd", BiasAddGradHelper);
Status Conv2DGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
string data_format;
string padding;
std::vector<int32> strides;
bool use_cudnn_on_gpu;
auto attrs = op.output(0).node()->attrs();
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "strides", &strides));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "use_cudnn_on_gpu", &use_cudnn_on_gpu));
auto dx_1 = Conv2DBackpropInput(scope, Shape(scope, op.input(0)), op.input(1),
grad_inputs[0], strides, padding,
Conv2DBackpropInput::DataFormat(data_format)
.UseCudnnOnGpu(use_cudnn_on_gpu));
grad_outputs->push_back(dx_1);
auto dx_2 =
Conv2DBackpropFilter(scope, op.input(0), Shape(scope, op.input(1)),
grad_inputs[0], strides, padding,
Conv2DBackpropFilter::DataFormat(data_format)
.UseCudnnOnGpu(use_cudnn_on_gpu));
grad_outputs->push_back(dx_2);
return scope.status();
}
REGISTER_GRADIENT_OP("Conv2D", Conv2DGrad);
Status MaxPoolGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
string data_format;
string padding;
std::vector<int32> strides;
std::vector<int32> ksize;
auto attrs = op.output(0).node()->attrs();
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "ksize", &ksize));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "strides", &strides));
auto dx = internal::MaxPoolGrad(
scope, op.input(0), op.output(0), grad_inputs[0], ksize, strides, padding,
internal::MaxPoolGrad::DataFormat(data_format));
grad_outputs->push_back(dx);
return scope.status();
}
REGISTER_GRADIENT_OP("MaxPool", MaxPoolGradHelper);
Status MaxPoolGradV2Helper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
string data_format;
string padding;
auto attrs = op.output(0).node()->attrs();
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding));
auto dx = MaxPoolGradV2(scope, op.input(0), op.output(0), grad_inputs[0],
op.input(1), op.input(2), padding,
MaxPoolGradV2::DataFormat(data_format));
grad_outputs->push_back(dx);
grad_outputs->push_back(NoGradient());
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("MaxPoolV2", MaxPoolGradV2Helper);
Status MaxPool3DGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
std::vector<int32> ksize;
std::vector<int32> strides;
string padding;
string data_format;
auto attrs = op.output(0).node()->attrs();
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "ksize", &ksize));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "strides", &strides));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
MaxPool3DGrad::Attrs grad_attrs;
auto dx =
MaxPool3DGrad(scope, op.input(0), op.output(0), grad_inputs[0], ksize,
strides, padding, grad_attrs.DataFormat(data_format));
grad_outputs->push_back(dx);
return scope.status();
}
REGISTER_GRADIENT_OP("MaxPool3D", MaxPool3DGradHelper);
Status AvgPoolGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
std::vector<int32> ksize;
std::vector<int32> strides;
string padding;
string data_format;
auto attrs = op.output(0).node()->attrs();
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "ksize", &ksize));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "strides", &strides));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
internal::AvgPoolGrad::Attrs grad_attrs;
auto dx = internal::AvgPoolGrad(scope, Shape(scope, op.input(0)),
grad_inputs[0], ksize, strides, padding,
grad_attrs.DataFormat(data_format));
grad_outputs->push_back(dx);
return scope.status();
}
REGISTER_GRADIENT_OP("AvgPool", AvgPoolGradHelper);
Status AvgPool3DGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
std::vector<int32> ksize;
std::vector<int32> strides;
string padding;
string data_format;
auto attrs = op.output(0).node()->attrs();
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "ksize", &ksize));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "strides", &strides));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
AvgPool3DGrad::Attrs grad_attrs;
auto dx =
AvgPool3DGrad(scope, Shape(scope, op.input(0)), grad_inputs[0], ksize,
strides, padding, grad_attrs.DataFormat(data_format));
grad_outputs->push_back(dx);
return scope.status();
}
REGISTER_GRADIENT_OP("AvgPool3D", AvgPool3DGradHelper);
Status LRNGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto dx = internal::LRNGrad(scope, grad_inputs[0], op.input(0), op.output(0));
grad_outputs->push_back(dx);
return scope.status();
}
REGISTER_GRADIENT_OP("LRN", LRNGradHelper);
Status SoftplusGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto dx = internal::SoftplusGrad(scope, grad_inputs[0], op.input(0));
grad_outputs->push_back(dx);
return scope.status();
}
REGISTER_GRADIENT_OP("Softplus", SoftplusGradHelper);
Status SoftsignGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto dx = internal::SoftsignGrad(scope, grad_inputs[0], op.input(0));
grad_outputs->push_back(dx);
return scope.status();
}
REGISTER_GRADIENT_OP("Softsign", SoftsignGradHelper);
Status FractionalAvgPoolGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
bool overlapping;
TF_RETURN_IF_ERROR(
GetNodeAttr(op.output(0).node()->attrs(), "overlapping", &overlapping));
auto dx = internal::FractionalAvgPoolGrad(
scope, Shape(scope, op.input(0), Shape::OutType(DT_INT64)),
grad_inputs[0], op.output(1), op.output(2),
internal::FractionalAvgPoolGrad::Overlapping(overlapping));
grad_outputs->push_back(dx);
return scope.status();
}
REGISTER_GRADIENT_OP("FractionalAvgPool", FractionalAvgPoolGradHelper);
Status FractionalMaxPoolGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
bool overlapping;
TF_RETURN_IF_ERROR(
GetNodeAttr(op.output(0).node()->attrs(), "overlapping", &overlapping));
auto dx = internal::FractionalMaxPoolGrad(
scope, op.input(0), op.output(0), grad_inputs[0], op.output(1),
op.output(2), internal::FractionalMaxPoolGrad::Overlapping(overlapping));
grad_outputs->push_back(dx);
return scope.status();
}
REGISTER_GRADIENT_OP("FractionalMaxPool", FractionalMaxPoolGradHelper);
template <typename T>
T FusedBatchNormGradAttrs(float epsilon, StringPiece data_format,
bool is_training) {
T result;
result.epsilon_ = epsilon;
result.data_format_ = data_format;
result.is_training_ = is_training;
return result;
}
using BatchNormGradFn =
std::function<Status(const Scope&, Output x, Output grad_y, Output scale,
const std::vector<Output>& reserve_spaces,
float epsilon, StringPiece data_format,
bool is_training, std::vector<Output>* grad_outputs)>;
Status BaseFusedBatchNormGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
BatchNormGradFn grad_fn,
std::vector<Output>* grad_outputs) {
if (op.num_outputs() < 5) {
return errors::InvalidArgument(
"FusedBatchNorm requires at least 5 outputs");
}
if (grad_inputs.empty()) {
return errors::InvalidArgument("FusedBatchNorm grad requires 1 grad input");
}
if (op.num_inputs() < 3) {
return errors::InvalidArgument("FusedBatchNorm has too few inputs");
}
Output x = op.input(0);
Output grad_y = grad_inputs[0];
Output scale = op.input(1);
float epsilon;
std::string data_format;
bool is_training;
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "epsilon", &epsilon));
TF_RETURN_IF_ERROR(
GetNodeAttr(op.node()->attrs(), "data_format", &data_format));
TF_RETURN_IF_ERROR(
GetNodeAttr(op.node()->attrs(), "is_training", &is_training));
std::vector<Output> reserve_spaces;
reserve_spaces.push_back(op.output(3));
reserve_spaces.push_back(op.output(4));
if (op.num_outputs() > 5) {
reserve_spaces.push_back(op.output(5));
}
if (is_training) {
return grad_fn(scope, x, grad_y, scale, reserve_spaces, epsilon,
data_format, is_training, grad_outputs);
} else {
if (op.num_inputs() < 5) {
return errors::InvalidArgument(
"FusedBatchNorm requires 5 inputs in eval mode");
}
reserve_spaces[0] = op.input(3);
reserve_spaces[1] = op.input(4);
if (data_format == "NCHW") {
x = Transpose(scope, x, {0, 2, 3, 1});
grad_y = Transpose(scope, grad_y, {0, 2, 3, 1});
} else if (data_format == "NCDHW") {
x = Transpose(scope, x, {0, 2, 3, 4, 1});
grad_y = Transpose(scope, grad_y, {0, 2, 3, 4, 1});
}
StringPiece target_data_format;
if (data_format == "NCHW" || data_format == "NHWC") {
target_data_format = "NHWC";
} else {
target_data_format = "NDHWC";
}
TF_RETURN_IF_ERROR(grad_fn(scope, x, grad_y, scale, reserve_spaces, epsilon,
target_data_format, is_training, grad_outputs));
if (data_format == "NCHW") {
(*grad_outputs)[0] = Transpose(scope, (*grad_outputs)[0], {0, 3, 1, 2});
} else if (data_format == "NCDHW") {
(*grad_outputs)[0] =
Transpose(scope, (*grad_outputs)[0], {0, 4, 1, 2, 3});
}
return scope.status();
}
}
Status FusedBatchNormV3Grad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
return BaseFusedBatchNormGrad(
scope, op, grad_inputs,
[](const Scope& scope, Output x, Output grad_y, Output scale,
const std::vector<Output>& reserve_spaces, float epsilon,
StringPiece data_format, bool is_training,
std::vector<Output>* grad_outputs) {
FusedBatchNormGradV3 grad(
scope, grad_y, x, scale, reserve_spaces[0], reserve_spaces[1],
reserve_spaces[2],
FusedBatchNormGradAttrs<FusedBatchNormGradV3::Attrs>(
epsilon, data_format, is_training));
grad_outputs->push_back(grad.x_backprop);
grad_outputs->push_back(grad.scale_backprop);
grad_outputs->push_back(grad.offset_backprop);
grad_outputs->push_back(NoGradient());
grad_outputs->push_back(NoGradient());
return scope.status();
},
grad_outputs);
}
REGISTER_GRADIENT_OP("FusedBatchNormV3", FusedBatchNormV3Grad);
Status Conv2DBackpropInputGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
if (op.num_inputs() != 3) {
return errors::InvalidArgument("Conv2DBackpropInput requires 3 inputs.");
}
if (grad_inputs.empty()) {
return errors::InvalidArgument(
"Conv2DBackpropInput grad requires 1 grad input");
}
std::vector<int> dilations, strides, explicit_paddings;
bool use_cudnn_on_gpu;
std::string data_format, padding;
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "dilations", &dilations));
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "strides", &strides));
TF_RETURN_IF_ERROR(
GetNodeAttr(op.node()->attrs(), "explicit_paddings", &explicit_paddings));
TF_RETURN_IF_ERROR(
GetNodeAttr(op.node()->attrs(), "use_cudnn_on_gpu", &use_cudnn_on_gpu));
TF_RETURN_IF_ERROR(
GetNodeAttr(op.node()->attrs(), "data_format", &data_format));
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "padding", &padding));
grad_outputs->push_back(NoGradient());
Conv2DBackpropFilter::Attrs filter_attrs;
filter_attrs.use_cudnn_on_gpu_ = use_cudnn_on_gpu;
filter_attrs.explicit_paddings_ = explicit_paddings;
filter_attrs.data_format_ = data_format;
filter_attrs.dilations_ = dilations;
grad_outputs->push_back(
Conv2DBackpropFilter(scope, grad_inputs[0], Shape(scope, op.input(1)),
op.input(2), strides, padding, filter_attrs));
Conv2D::Attrs conv_attrs;
conv_attrs.use_cudnn_on_gpu_ = use_cudnn_on_gpu;
conv_attrs.explicit_paddings_ = explicit_paddings;
conv_attrs.data_format_ = data_format;
conv_attrs.dilations_ = dilations;
grad_outputs->push_back(
Conv2D(scope, grad_inputs[0], op.input(1), strides, padding, conv_attrs));
return scope.status();
}
REGISTER_GRADIENT_OP("Conv2DBackpropInput", Conv2DBackpropInputGrad);
Status DepthwiseConv2dNativeGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
if (op.num_inputs() != 2) {
return errors::InvalidArgument("DepthwiseConv2dNative requires 2 inputs.");
}
if (grad_inputs.empty()) {
return errors::InvalidArgument(
"DepthwiseConv2dNative grad requires 1 grad input");
}
std::vector<int> dilations, strides, explicit_paddings;
std::string data_format, padding;
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "dilations", &dilations));
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "strides", &strides));
TF_RETURN_IF_ERROR(
GetNodeAttr(op.node()->attrs(), "explicit_paddings", &explicit_paddings));
TF_RETURN_IF_ERROR(
GetNodeAttr(op.node()->attrs(), "data_format", &data_format));
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "padding", &padding));
DepthwiseConv2dNativeBackpropInput::Attrs input_attrs;
input_attrs.explicit_paddings_ = explicit_paddings;
input_attrs.data_format_ = data_format;
input_attrs.dilations_ = dilations;
grad_outputs->push_back(DepthwiseConv2dNativeBackpropInput(
scope, Shape(scope, op.input(0)), op.input(1), grad_inputs[0], strides,
padding, input_attrs));
DepthwiseConv2dNativeBackpropFilter::Attrs filter_attrs;
filter_attrs.explicit_paddings_ = explicit_paddings;
filter_attrs.data_format_ = data_format;
filter_attrs.dilations_ = dilations;
grad_outputs->push_back(DepthwiseConv2dNativeBackpropFilter(
scope, op.input(0), Shape(scope, op.input(1)), grad_inputs[0], strides,
padding, filter_attrs));
return scope.status();
}
REGISTER_GRADIENT_OP("DepthwiseConv2dNative", DepthwiseConv2dNativeGrad);
}
}
} | #include <string>
#include "tensorflow/cc/framework/grad_op_registry.h"
#include "tensorflow/cc/framework/gradient_checker.h"
#include "tensorflow/cc/framework/testutil.h"
#include "tensorflow/cc/gradients/grad_testutil.h"
#include "tensorflow/cc/ops/nn_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/random.h"
namespace tensorflow {
namespace {
using ops::AvgPool;
using ops::AvgPool3D;
using ops::BiasAdd;
using ops::Conv2D;
using ops::Conv2DBackpropInput;
using ops::DepthwiseConv2dNative;
using ops::Elu;
using ops::FractionalAvgPool;
using ops::FractionalMaxPool;
using ops::FusedBatchNormV3;
using ops::L2Loss;
using ops::LogSoftmax;
using ops::LRN;
using ops::MaxPool;
using ops::MaxPool3D;
using ops::MaxPoolV2;
using ops::Placeholder;
using ops::Relu;
using ops::Relu6;
using ops::Selu;
using ops::Softmax;
using ops::Softplus;
using ops::Softsign;
class NNGradTest : public ::testing::Test {
protected:
NNGradTest() : scope_(Scope::NewRootScope()) {}
void RunTest(const Output& x, const TensorShape& x_shape, const Output& y,
const TensorShape& y_shape) {
float max_error;
TF_ASSERT_OK((ComputeGradientError<float, float, float>(
scope_, {x}, {x_shape}, {y}, {y_shape}, &max_error)));
EXPECT_LT(max_error, 1e-3);
}
void RunTest(const Output& x, const Tensor& x_init_value, const Output& y,
const TensorShape& y_shape) {
float max_error;
TF_ASSERT_OK((ComputeGradientError<float, float, float>(
scope_, x, x_init_value, y, y_shape, &max_error)));
EXPECT_LT(max_error, 1e-3);
}
void RunTest(const OutputList& xs, const std::vector<TensorShape>& x_shapes,
const OutputList& ys, const std::vector<TensorShape>& y_shapes) {
TF_ASSERT_OK(scope_.status());
float max_error;
TF_ASSERT_OK((ComputeGradientError<float, float, float>(
scope_, xs, x_shapes, ys, y_shapes, &max_error)));
EXPECT_LT(max_error, 1e-3);
}
template <typename T>
void SetRandomValuesForMaxPooling(Tensor* tensor) {
auto tensor_flat = tensor->flat<T>();
T cur = 0;
for (size_t i = 0; i < tensor->NumElements(); i++) {
tensor_flat(i) = cur;
cur += 5e-2;
}
for (size_t i = tensor->NumElements() - 1; i >= 1; i--) {
size_t j = random::New64() % (i + 1);
T tmp = tensor_flat(i);
tensor_flat(i) = tensor_flat(j);
tensor_flat(j) = tmp;
}
}
Scope scope_;
};
TEST_F(NNGradTest, SoftmaxGrad) {
TensorShape shape({32, 10});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto y = Softmax(scope_, x);
RunTest(x, shape, y, shape);
}
TEST_F(NNGradTest, SoftmaxRank3Grad) {
TensorShape shape({32, 1, 10});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto y = Softmax(scope_, x);
RunTest(x, shape, y, shape);
}
TEST_F(NNGradTest, SoftmaxCrossEntropyWithLogitsGrad) {
TensorShape logits_shape({5, 3});
TensorShape loss_shape({5});
auto logits = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(logits_shape));
auto labels = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(logits_shape));
auto y =
tensorflow::ops::SoftmaxCrossEntropyWithLogits(scope_, logits, labels);
RunTest({logits, labels}, {logits_shape, logits_shape}, {y.backprop, y.loss},
{logits_shape, loss_shape});
}
TEST_F(NNGradTest, LogSoftmaxGrad) {
TensorShape shape({5, 3});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto y = LogSoftmax(scope_, x);
Tensor x_init_value =
test::AsTensor<float>({-0.9f, -0.7f, -0.5f, -0.3f, -0.1f, 0.1f, 0.3f,
0.5f, 0.7f, 0.8f, -0.1f, 0.1f, 0.1f, 0.1f, 1.2f},
{5, 3});
RunTest(x, x_init_value, y, shape);
}
TEST_F(NNGradTest, ReluGrad) {
TensorShape shape({5, 2});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto y = Relu(scope_, x);
Tensor x_init_value = test::AsTensor<float>(
{-0.9f, -0.7f, -0.5f, -0.3f, -0.1f, 0.1f, 0.3f, 0.5f, 0.7f, 0.9f},
{5, 2});
RunTest(x, x_init_value, y, shape);
}
TEST_F(NNGradTest, Relu6Grad) {
TensorShape shape({5, 2});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto y = Relu6(scope_, x);
Tensor x_init_value = test::AsTensor<float>(
{-0.9f, -0.7f, -0.5f, -0.3f, -0.1f, 6.1f, 6.3f, 6.5f, 6.7f, 6.9f},
{5, 2});
RunTest(x, x_init_value, y, shape);
}
TEST_F(NNGradTest, LeakyReluGrad) {
TensorShape shape({5, 2});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto y = ops::internal::LeakyRelu(scope_, x);
Tensor x_init_value = test::AsTensor<float>(
{-0.9f, -0.7f, -0.5f, -0.3f, -0.1f, 0.1f, 0.3f, 0.5f, 0.7f, 0.9f},
{5, 2});
RunTest(x, x_init_value, y, shape);
}
TEST_F(NNGradTest, LeakyReluGradGrad) {
TensorShape shape({5, 2});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
Tensor x_init_value = test::AsTensor<float>(
{2.3f, 1.9f, 1.5f, 1.1f, 0.7f, 0.3f, -0.1f, -0.5f, -0.9f, -1.3f}, {5, 2});
Tensor features = test::AsTensor<float>(
{-0.9f, -0.7f, -0.5f, -0.3f, -0.1f, 0.1f, 0.3f, 0.5f, 0.7f, 0.9f},
{5, 2});
auto y = ops::internal::LeakyReluGrad(scope_, x, features);
RunTest(x, x_init_value, y, shape);
}
TEST_F(NNGradTest, EluGrad) {
TensorShape shape({5, 2});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto y = Elu(scope_, x);
Tensor x_init_value = test::AsTensor<float>(
{-0.9f, -0.7f, -0.5f, -0.3f, -0.1f, 0.1f, 0.3f, 0.5f, 0.7f, 0.9f},
{5, 2});
RunTest(x, x_init_value, y, shape);
}
TEST_F(NNGradTest, SeluGrad) {
TensorShape shape({5, 2});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto y = Selu(scope_, x);
Tensor x_init_value = test::AsTensor<float>(
{-0.9f, -0.7f, -0.5f, -0.3f, -0.1f, 0.1f, 0.3f, 0.5f, 0.7f, 0.9f},
{5, 2});
RunTest(x, x_init_value, y, shape);
}
TEST_F(NNGradTest, L2LossGrad) {
TensorShape x_shape({5, 2});
TensorShape y_shape({1});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto y = L2Loss(scope_, x);
RunTest(x, x_shape, y, y_shape);
}
TEST_F(NNGradTest, BiasAddGradHelper) {
TensorShape shape({4, 5});
TensorShape bias_shape({5});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto bias = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(bias_shape));
auto y = BiasAdd(scope_, x, bias);
RunTest({x, bias}, {shape, bias_shape}, {y}, {shape});
}
TEST_F(NNGradTest, Conv2DGrad) {
TensorShape shape({1, 2, 2, 1});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
Tensor filter = test::AsTensor<float>({0.5f}, {1, 1, 1, 1});
const std::vector<int> strides{1, 1, 1, 1};
auto y = Conv2D(scope_, x, filter, strides, "SAME");
RunTest(x, shape, y, shape);
}
TEST_F(NNGradTest, MaxPoolGradHelper) {
TensorShape x_shape({1, 2, 2, 1});
TensorShape y_shape({1, 1, 1, 1});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
const std::vector<int> ksize{1, 2, 2, 1};
const std::vector<int> strides{1, 2, 2, 1};
auto y = MaxPool(scope_, x, ksize, strides, "VALID");
Tensor x_init_value = Tensor(DT_FLOAT, x_shape);
SetRandomValuesForMaxPooling<float>(&x_init_value);
RunTest(x, x_init_value, y, y_shape);
}
TEST_F(NNGradTest, MaxPoolGradV2Helper) {
TensorShape x_shape({1, 2, 2, 1});
TensorShape y_shape({1, 1, 1, 1});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
Tensor ksize = test::AsTensor<int>({1, 2, 2, 1}, {4});
Tensor strides = test::AsTensor<int>({1, 2, 2, 1}, {4});
auto y = MaxPoolV2(scope_, x, ksize, strides, "VALID");
Tensor x_init_value = Tensor(DT_FLOAT, x_shape);
SetRandomValuesForMaxPooling<float>(&x_init_value);
RunTest(x, x_init_value, y, y_shape);
}
TEST_F(NNGradTest, MaxPool3DGradHelper) {
TensorShape x_shape({1, 3, 3, 3, 1});
TensorShape y_shape({1, 1, 1, 1, 1});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
const std::vector<int> ksize{1, 3, 3, 3, 1};
const std::vector<int> strides{1, 3, 3, 3, 1};
auto y = MaxPool3D(scope_, x, ksize, strides, "VALID");
Tensor x_init_value = Tensor(DT_FLOAT, x_shape);
SetRandomValuesForMaxPooling<float>(&x_init_value);
RunTest(x, x_init_value, y, y_shape);
}
TEST_F(NNGradTest, AvgPoolGradHelper) {
TensorShape x_shape({1, 2, 2, 1});
TensorShape y_shape({1, 1, 1, 1});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
const std::vector<int> ksize{1, 2, 2, 1};
const std::vector<int> strides{1, 2, 2, 1};
auto y = AvgPool(scope_, x, ksize, strides, "SAME");
RunTest(x, x_shape, y, y_shape);
}
TEST_F(NNGradTest, AvgPool3DGradHelper) {
TensorShape x_shape({1, 3, 3, 3, 1});
TensorShape y_shape({1, 1, 1, 1, 1});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
const std::vector<int> ksize{1, 3, 3, 3, 1};
const std::vector<int> strides{1, 3, 3, 3, 1};
auto y = AvgPool3D(scope_, x, ksize, strides, "SAME");
RunTest(x, x_shape, y, y_shape);
}
TEST_F(NNGradTest, LRN) {
TensorShape x_shape({1, 1, 2, 1});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto y = LRN(scope_, x);
RunTest(x, x_shape, y, x_shape);
}
TEST_F(NNGradTest, SoftplusGrad) {
TensorShape shape({3, 7});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto y = Softplus(scope_, x);
RunTest(x, shape, y, shape);
}
TEST_F(NNGradTest, SoftsignGrad) {
TensorShape shape({3, 7});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto y = Softsign(scope_, x);
RunTest(x, shape, y, shape);
}
TEST_F(NNGradTest, FractionalAvgPoolGradHelper) {
TensorShape x_shape({1, 3, 7, 1});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto y = FractionalAvgPool(
scope_, x, {1, 1.2, 1.9, 1},
FractionalAvgPool::Deterministic(true).Overlapping(true).Seed(1).Seed2(
2));
TensorShape y_shape({1, 2, 3, 1});
RunTest(x, x_shape, y.output, y_shape);
}
TEST_F(NNGradTest, FractionalMaxPoolGradHelper) {
TensorShape x_shape({1, 3, 7, 1});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto y = FractionalMaxPool(
scope_, x, {1, 1.2, 1.9, 1},
FractionalMaxPool::Deterministic(true).Overlapping(true).Seed(1).Seed2(
2));
Tensor x_init_value = Tensor(DT_FLOAT, x_shape);
SetRandomValuesForMaxPooling<float>(&x_init_value);
TensorShape y_shape({1, 2, 3, 1});
RunTest(x, x_init_value, y.output, y_shape);
}
class FusedBatchNormGradTest : public NNGradTest,
public ::testing::WithParamInterface<
std::tuple<bool, bool, TensorShape>> {};
TEST_P(FusedBatchNormGradTest, FusedBatchNormV3Grad) {
FusedBatchNormV3::Attrs attrs;
attrs.is_training_ = std::get<0>(GetParam());
bool channel_first = std::get<1>(GetParam());
TensorShape shape = std::get<2>(GetParam());
int channel_dim = (channel_first) ? 1 : shape.dims() - 1;
TensorShape scale_shape({shape.dim_size(channel_dim)});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto scale = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(scale_shape));
auto offset = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(scale_shape));
auto mean = ops::ZerosLike(scope_, scale);
auto var = ops::OnesLike(scope_, scale);
if (!channel_first) {
attrs.data_format_ = (shape.dims() == 5) ? "NDHWC" : "NHWC";
} else {
attrs.data_format_ = (shape.dims() == 5) ? "NCDHW" : "NCHW";
}
auto y = FusedBatchNormV3(scope_, x, scale, offset, mean, var, attrs);
RunTest({x, scale, offset}, {shape, scale_shape, scale_shape}, {y.y},
{shape});
}
INSTANTIATE_TEST_SUITE_P(
FusedBatchNormGrad, FusedBatchNormGradTest,
::testing::Combine(::testing::Bool(), ::testing::Bool(),
::testing::Values(TensorShape({2, 3, 4, 5}),
TensorShape({2, 3, 2, 2, 2}))));
TEST_F(NNGradTest, Conv2DBackpropInputGrad) {
TensorShape shape({1, 2, 2, 1});
TensorShape filter_shape({1, 1, 1, 1});
auto out = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto filter = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(filter_shape));
const std::vector<int> strides{1, 1, 1, 1};
auto y = Conv2DBackpropInput(scope_, ops::Shape(scope_, out), filter, out,
strides, "SAME");
RunTest({out, filter}, {shape, filter_shape}, {y}, {shape});
}
TEST_F(NNGradTest, DepthwiseConv2dNativeGrad) {
TensorShape shape({1, 2, 2, 1});
TensorShape filter_shape({1, 1, 1, 1});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto filter = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(filter_shape));
const std::vector<int> strides{1, 1, 1, 1};
auto y = DepthwiseConv2dNative(scope_, x, filter, strides, "SAME");
RunTest({x, filter}, {shape, filter_shape}, {y}, {shape});
}
}
} |
1,262 | cpp | tensorflow/tensorflow | math_grad | tensorflow/cc/gradients/math_grad.cc | tensorflow/cc/gradients/math_grad_test.cc | #ifndef TENSORFLOW_C_EXPERIMENTAL_GRADIENTS_MATH_GRAD_H_
#define TENSORFLOW_C_EXPERIMENTAL_GRADIENTS_MATH_GRAD_H_
#include "tensorflow/c/eager/gradients.h"
namespace tensorflow {
namespace gradients {
GradientFunction* AddRegisterer(const ForwardOperation& op);
GradientFunction* ExpRegisterer(const ForwardOperation& op);
GradientFunction* MatMulRegisterer(const ForwardOperation& op);
GradientFunction* SqrtRegisterer(const ForwardOperation& op);
GradientFunction* NegRegisterer(const ForwardOperation& op);
GradientFunction* SubRegisterer(const ForwardOperation& op);
GradientFunction* MulRegisterer(const ForwardOperation& op);
GradientFunction* Log1pRegisterer(const ForwardOperation& op);
GradientFunction* DivNoNanRegisterer(const ForwardOperation& op);
}
}
#endif
#include <cmath>
#include "tensorflow/cc/framework/grad_op_registry.h"
#include "tensorflow/cc/framework/gradients.h"
#include "tensorflow/cc/gradients/grad_helper.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/array_ops_internal.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/ops/math_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
namespace tensorflow {
namespace ops {
namespace {
REGISTER_NO_GRADIENT_OP("Less");
REGISTER_NO_GRADIENT_OP("LessEqual");
REGISTER_NO_GRADIENT_OP("Greater");
REGISTER_NO_GRADIENT_OP("GreaterEqual");
REGISTER_NO_GRADIENT_OP("Equal");
REGISTER_NO_GRADIENT_OP("ApproximateEqual");
REGISTER_NO_GRADIENT_OP("NotEqual");
REGISTER_NO_GRADIENT_OP("LogicalAnd");
REGISTER_NO_GRADIENT_OP("LogicalOr");
REGISTER_NO_GRADIENT_OP("LogicalNot");
REGISTER_NO_GRADIENT_OP("Floor");
Output ConjugateHelper(const Scope& scope, const Output& out) {
DataType dtype = out.type();
if (dtype == DT_COMPLEX64 || dtype == DT_COMPLEX128) {
return Conj(scope, out);
} else {
return out;
}
}
Status AbsGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
grad_outputs->push_back(Mul(scope, grad_inputs[0], Sign(scope, op.input(0))));
return scope.status();
}
REGISTER_GRADIENT_OP("Abs", AbsGrad);
Status NegGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
grad_outputs->push_back(Neg(scope, grad_inputs[0]));
return scope.status();
}
REGISTER_GRADIENT_OP("Neg", NegGrad);
Status InvGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
grad_outputs->push_back(
internal::ReciprocalGrad(scope, op.output(0), grad_inputs[0]));
return scope.status();
}
REGISTER_GRADIENT_OP("Inv", InvGrad);
REGISTER_GRADIENT_OP("Reciprocal", InvGrad);
Status SquareGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto two = Cast(scope, Const(scope, 2), op.input(0).type());
auto dydx = Mul(scope, two, op.input(0));
grad_outputs->push_back(
Mul(scope, grad_inputs[0], ConjugateHelper(scope, dydx)));
return scope.status();
}
REGISTER_GRADIENT_OP("Square", SquareGrad);
Status SqrtGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
grad_outputs->push_back(
internal::SqrtGrad(scope, op.output(0), grad_inputs[0]));
return scope.status();
}
REGISTER_GRADIENT_OP("Sqrt", SqrtGrad);
Status RsqrtGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
grad_outputs->push_back(
internal::RsqrtGrad(scope, op.output(0), grad_inputs[0]));
return scope.status();
}
REGISTER_GRADIENT_OP("Rsqrt", RsqrtGrad);
Status ExpGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
grad_outputs->push_back(
Mul(scope, grad_inputs[0], ConjugateHelper(scope, op.output(0))));
return scope.status();
}
REGISTER_GRADIENT_OP("Exp", ExpGrad);
Status Expm1Grad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto dydx = Exp(scope, op.input(0));
grad_outputs->push_back(
Mul(scope, grad_inputs[0], ConjugateHelper(scope, dydx)));
return scope.status();
}
REGISTER_GRADIENT_OP("Expm1", Expm1Grad);
Status LogGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto dydx = Reciprocal(scope, op.input(0));
grad_outputs->push_back(
Mul(scope, grad_inputs[0], ConjugateHelper(scope, dydx)));
return scope.status();
}
REGISTER_GRADIENT_OP("Log", LogGrad);
Status Log1pGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto one = Cast(scope, Const(scope, 1.0), op.input(0).type());
auto dydx = Reciprocal(scope, Add(scope, one, op.input(0)));
grad_outputs->push_back(
Mul(scope, grad_inputs[0], ConjugateHelper(scope, dydx)));
return scope.status();
}
REGISTER_GRADIENT_OP("Log1p", Log1pGrad);
Status SinhGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto dydx = Cosh(scope, op.input(0));
grad_outputs->push_back(
Mul(scope, grad_inputs[0], ConjugateHelper(scope, dydx)));
return scope.status();
}
REGISTER_GRADIENT_OP("Sinh", SinhGrad);
Status CoshGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto dydx = Sinh(scope, op.input(0));
grad_outputs->push_back(
Mul(scope, grad_inputs[0], ConjugateHelper(scope, dydx)));
return scope.status();
}
REGISTER_GRADIENT_OP("Cosh", CoshGrad);
Status TanhGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto grad = grad_inputs[0];
Scope grad_scope = scope.WithControlDependencies(grad);
auto y = ConjugateHelper(grad_scope, op.output(0));
grad_outputs->push_back(internal::TanhGrad(grad_scope, y, grad));
return grad_scope.status();
}
REGISTER_GRADIENT_OP("Tanh", TanhGrad);
Status AsinhGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto dydx = Reciprocal(scope, Cosh(scope, op.output(0)));
grad_outputs->push_back(
Mul(scope, grad_inputs[0], ConjugateHelper(scope, dydx)));
return scope.status();
}
REGISTER_GRADIENT_OP("Asinh", AsinhGrad);
Status AcoshGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto dydx = Reciprocal(scope, Sinh(scope, op.output(0)));
grad_outputs->push_back(
Mul(scope, grad_inputs[0], ConjugateHelper(scope, dydx)));
return scope.status();
}
REGISTER_GRADIENT_OP("Acosh", AcoshGrad);
Status AtanhGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto one = Cast(scope, Const(scope, 1.0), op.input(0).type());
auto dydx = Reciprocal(scope, Sub(scope, one, Square(scope, op.input(0))));
grad_outputs->push_back(
Mul(scope, grad_inputs[0], ConjugateHelper(scope, dydx)));
return scope.status();
}
REGISTER_GRADIENT_OP("Atanh", AtanhGrad);
Status SigmoidGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto grad = grad_inputs[0];
Scope grad_scope = scope.WithControlDependencies(grad);
auto y = ConjugateHelper(grad_scope, op.output(0));
grad_outputs->push_back(internal::SigmoidGrad(grad_scope, y, grad));
return grad_scope.status();
}
REGISTER_GRADIENT_OP("Sigmoid", SigmoidGrad);
Status SignGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto shape = Shape(scope, op.input(0));
auto zero = Cast(scope, Const(scope, 0.0), op.input(0).type());
auto dx = Fill(scope, shape, zero);
grad_outputs->push_back(dx);
return scope.status();
}
REGISTER_GRADIENT_OP("Sign", SignGrad);
Status SinGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto dydx = Cos(scope, op.input(0));
grad_outputs->push_back(
Mul(scope, grad_inputs[0], ConjugateHelper(scope, dydx)));
return scope.status();
}
REGISTER_GRADIENT_OP("Sin", SinGrad);
Status CosGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto dydx = Neg(scope, Sin(scope, op.input(0)));
grad_outputs->push_back(
Mul(scope, grad_inputs[0], ConjugateHelper(scope, dydx)));
return scope.status();
}
REGISTER_GRADIENT_OP("Cos", CosGrad);
Status AsinGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto x2 = Square(scope, op.input(0));
auto one = Cast(scope, Const(scope, 1.0), op.input(0).type());
auto dydx = Reciprocal(scope, Sqrt(scope, Sub(scope, one, x2)));
auto dx = Mul(scope, grad_inputs[0], ConjugateHelper(scope, dydx));
grad_outputs->push_back(dx);
return scope.status();
}
REGISTER_GRADIENT_OP("Asin", AsinGrad);
Status AcosGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto x2 = Square(scope, op.input(0));
auto one = Cast(scope, Const(scope, 1.0), op.input(0).type());
auto dydx = Neg(scope, Reciprocal(scope, Sqrt(scope, Sub(scope, one, x2))));
auto dx = Mul(scope, grad_inputs[0], dydx);
grad_outputs->push_back(dx);
return scope.status();
}
REGISTER_GRADIENT_OP("Acos", AcosGrad);
Status TanGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto dydx = Square(scope, Reciprocal(scope, Cos(scope, op.input(0))));
auto dx = Mul(scope, grad_inputs[0], ConjugateHelper(scope, dydx));
grad_outputs->push_back(dx);
return scope.status();
}
REGISTER_GRADIENT_OP("Tan", TanGrad);
Status AtanGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto one = Cast(scope, Const(scope, 1.0), op.input(0).type());
auto dydx = Reciprocal(scope, Add(scope, one, Square(scope, op.input(0))));
auto dx = Mul(scope, grad_inputs[0], dydx);
grad_outputs->push_back(dx);
return scope.status();
}
REGISTER_GRADIENT_OP("Atan", AtanGrad);
Status Atan2Grad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto y = op.input(0);
auto x = op.input(1);
Output grad_inv = Div(scope, grad_inputs[0],
Add(scope, Square(scope, x), Square(scope, y)));
grad_outputs->push_back(Mul(scope, x, grad_inv));
grad_outputs->push_back(Mul(scope, Neg(scope, y), grad_inv));
return scope.status();
}
REGISTER_GRADIENT_OP("Atan2", Atan2Grad);
Status BinaryGradCommon(const Scope& scope, const Operation& op,
std::vector<Output>* grad_outputs, const Output& gx_1,
const Output& gx_2) {
auto sx_1 = Shape(scope, op.input(0));
auto sx_2 = Shape(scope, op.input(1));
auto rx = internal::BroadcastGradientArgs(scope, sx_1, sx_2);
auto dx_1 = Reshape(scope, Sum(scope, gx_1, rx.r0), sx_1);
auto dx_2 = Reshape(scope, Sum(scope, gx_2, rx.r1), sx_2);
grad_outputs->push_back(dx_1);
grad_outputs->push_back(dx_2);
return scope.status();
}
Status AddGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto gx_1 = Identity(scope, grad_inputs[0]);
auto gx_2 = Identity(scope, grad_inputs[0]);
return BinaryGradCommon(scope, op, grad_outputs, gx_1, gx_2);
}
REGISTER_GRADIENT_OP("Add", AddGrad);
REGISTER_GRADIENT_OP("AddV2", AddGrad);
Status SubGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto gx_1 = Identity(scope, grad_inputs[0]);
auto gx_2 = Neg(scope, grad_inputs[0]);
return BinaryGradCommon(scope, op, grad_outputs, gx_1, gx_2);
}
REGISTER_GRADIENT_OP("Sub", SubGrad);
Status MulGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto x_1 = ConjugateHelper(scope, op.input(0));
auto x_2 = ConjugateHelper(scope, op.input(1));
auto gx_1 = Mul(scope, grad_inputs[0], x_2);
auto gx_2 = Mul(scope, grad_inputs[0], x_1);
return BinaryGradCommon(scope, op, grad_outputs, gx_1, gx_2);
}
REGISTER_GRADIENT_OP("Mul", MulGrad);
Status DivGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto x_1 = ConjugateHelper(scope, op.input(0));
auto x_2 = ConjugateHelper(scope, op.input(1));
auto gx_1 = Div(scope, grad_inputs[0], x_2);
auto gx_2 = Mul(scope, grad_inputs[0],
Div(scope, Div(scope, Neg(scope, x_1), x_2), x_2));
return BinaryGradCommon(scope, op, grad_outputs, gx_1, gx_2);
}
REGISTER_GRADIENT_OP("Div", DivGrad);
Status RealDivGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto x_1 = ConjugateHelper(scope, op.input(0));
auto x_2 = ConjugateHelper(scope, op.input(1));
auto gx_1 = RealDiv(scope, grad_inputs[0], x_2);
auto gx_2 = Mul(scope, grad_inputs[0],
RealDiv(scope, RealDiv(scope, Neg(scope, x_1), x_2), x_2));
return BinaryGradCommon(scope, op, grad_outputs, gx_1, gx_2);
}
REGISTER_GRADIENT_OP("RealDiv", RealDivGrad);
Status DivNoNanGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto x_1 = ConjugateHelper(scope, op.input(0));
auto x_2 = ConjugateHelper(scope, op.input(1));
auto gx_1 = DivNoNan(scope, grad_inputs[0], x_2);
auto gx_2 = Mul(scope, grad_inputs[0],
DivNoNan(scope, DivNoNan(scope, Neg(scope, x_1), x_2), x_2));
return BinaryGradCommon(scope, op, grad_outputs, gx_1, gx_2);
}
REGISTER_GRADIENT_OP("DivNoNan", DivNoNanGrad);
Status SquaredDifferenceGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto x_1 = ConjugateHelper(scope, op.input(0));
auto x_2 = ConjugateHelper(scope, op.input(1));
auto two = Cast(scope, Const(scope, 2), grad_inputs[0].type());
auto gx_1 = Mul(scope, grad_inputs[0], Mul(scope, two, Sub(scope, x_1, x_2)));
auto gx_2 = Neg(scope, gx_1);
return BinaryGradCommon(scope, op, grad_outputs, gx_1, gx_2);
}
REGISTER_GRADIENT_OP("SquaredDifference", SquaredDifferenceGrad);
Status AddNGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto incoming = Identity(scope, grad_inputs[0]);
for (int32_t i = 0; i < op.num_inputs(); ++i) {
grad_outputs->push_back(incoming);
}
return scope.status();
}
REGISTER_GRADIENT_OP("AddN", AddNGrad);
Status PowGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto x = ConjugateHelper(scope, op.input(0));
auto y = ConjugateHelper(scope, op.input(1));
auto z = ConjugateHelper(scope, op.output(0));
auto grad = grad_inputs[0];
auto one = Cast(scope, Const(scope, 1.0), y.type());
auto gx_1 =
Mul(scope, Mul(scope, grad, y), Pow(scope, x, Sub(scope, y, one)));
DataType x_dtype = x.type();
auto zero = Cast(scope, Const(scope, 0.0), x_dtype);
if (x_dtype == DT_COMPLEX64 || x_dtype == DT_COMPLEX128) {
auto log_x = Where3(scope, NotEqual(scope, x, zero), Log(scope, x),
ZerosLike(scope, x));
auto gy_1 = Mul(scope, Mul(scope, grad, z), log_x);
return BinaryGradCommon(scope, op, grad_outputs, gx_1, gy_1);
} else {
auto log_x = Where3(scope, Greater(scope, x, zero), Log(scope, x),
ZerosLike(scope, x));
auto gy_1 = Mul(scope, Mul(scope, grad, z), log_x);
return BinaryGradCommon(scope, op, grad_outputs, gx_1, gy_1);
}
}
REGISTER_GRADIENT_OP("Pow", PowGrad);
Status MaximumMinimumGradCommon(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs,
const Output& comparator) {
auto grad = grad_inputs[0];
auto zeros = ZerosLike(scope, grad);
auto gx_1 = Where3(scope, comparator, grad, zeros);
auto gx_2 = Where3(scope, comparator, zeros, grad);
return BinaryGradCommon(scope, op, grad_outputs, gx_1, gx_2);
}
Status MaximumGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto comparator = GreaterEqual(scope, op.input(0), op.input(1));
return MaximumMinimumGradCommon(scope, op, grad_inputs, grad_outputs,
comparator);
}
REGISTER_GRADIENT_OP("Maximum", MaximumGrad);
Status MinimumGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto comparator = LessEqual(scope, op.input(0), op.input(1));
return MaximumMinimumGradCommon(scope, op, grad_inputs, grad_outputs,
comparator);
}
REGISTER_GRADIENT_OP("Minimum", MinimumGrad);
Status RealGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto zero = Cast(scope, Const(scope, 0.0), op.output(0).type());
auto dx = Complex(scope, grad_inputs[0], zero);
grad_outputs->push_back(dx);
return scope.status();
}
REGISTER_GRADIENT_OP("Real", RealGrad);
Status ImagGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto zero = Cast(scope, Const(scope, 0.0), op.output(0).type());
auto dx = Complex(scope, zero, grad_inputs[0]);
grad_outputs->push_back(dx);
return scope.status();
}
REGISTER_GRADIENT_OP("Imag", ImagGrad);
Status ComplexGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto gx_1 = Real(scope, grad_inputs[0]);
auto gx_2 = Imag(scope, grad_inputs[0]);
return BinaryGradCommon(scope, op, grad_outputs, gx_1, gx_2);
}
REGISTER_GRADIENT_OP("Complex", ComplexGrad);
Status AngleGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto re = Real(scope, op.input(0));
auto im = Imag(scope, op.input(0));
auto z_inv = Reciprocal(scope, Complex(scope, im, re));
auto zero = Cast(scope, Const(scope, 0), grad_inputs[0].type());
auto grad = Complex(scope, grad_inputs[0], zero);
auto dx = Neg(scope, Mul(scope, grad, z_inv));
grad_outputs->push_back(dx);
return scope.status();
}
REGISTER_GRADIENT_OP("Angle", AngleGrad);
Status ConjGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
grad_outputs->push_back(Conj(scope, grad_inputs[0]));
return scope.status();
}
REGISTER_GRADIENT_OP("Conj", ConjGrad);
Output SafeDivHelper(const Scope& scope, const Output& x, const Output& y) {
return Div(scope, x, Maximum(scope, y, Const(scope, 1)));
}
Output SumGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs) {
auto input_shape = Shape(scope, op.input(0));
auto output_shape_kept_dims =
ReducedShapeHelper(scope, input_shape, op.input(1));
auto tile_scaling = SafeDivHelper(scope, input_shape, output_shape_kept_dims);
auto grad = Reshape(scope, grad_inputs[0], output_shape_kept_dims);
return Tile(scope, grad, tile_scaling);
}
Status SumGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
grad_outputs->push_back(SumGradHelper(scope, op, grad_inputs));
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("Sum", SumGrad);
Status MeanGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto sum_grad = SumGradHelper(scope, op, grad_inputs);
auto input_shape = Shape(scope, op.input(0));
auto output_shape = Shape(scope, op.output(0));
auto zero = Const(scope, 0);
auto group_size = SafeDivHelper(scope, Prod(scope, input_shape, zero),
Prod(scope, output_shape, zero));
grad_outputs->push_back(
Div(scope, sum_grad, Cast(scope, group_size, sum_grad.type())));
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("Mean", MeanGrad);
Status ErfGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto grad = grad_inputs[0];
auto two_over_root_pi =
Cast(scope, Const(scope, 2 / std::sqrt(M_PI)), grad.type());
Scope grad_scope = scope.WithControlDependencies(grad);
auto x = ConjugateHelper(grad_scope, op.input(0));
auto dx = Mul(grad_scope, Mul(grad_scope, grad, two_over_root_pi),
Exp(grad_scope, Neg(grad_scope, Square(grad_scope, x))));
grad_outputs->push_back(dx);
return grad_scope.status();
}
REGISTER_GRADIENT_OP("Erf", ErfGrad);
Status ErfinvGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto grad = grad_inputs[0];
auto root_pi_over_two =
Cast(scope, Const(scope, std::sqrt(M_PI) / 2), grad.type());
Scope grad_scope = scope.WithControlDependencies(grad);
auto x = ConjugateHelper(grad_scope, op.input(0));
auto dx = Mul(grad_scope, Mul(grad_scope, grad, root_pi_over_two),
Exp(grad_scope, Square(grad_scope, op.output(0))));
grad_outputs->push_back(dx);
return grad_scope.status();
}
REGISTER_GRADIENT_OP("Erfinv", ErfinvGrad);
Status NdtriGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto grad = grad_inputs[0];
auto root_two_pi =
Cast(scope, Const(scope, std::sqrt(2 * M_PI)), grad.type());
auto two = Cast(scope, Const(scope, 2), grad.type());
Scope grad_scope = scope.WithControlDependencies(grad);
auto x = ConjugateHelper(grad_scope, op.input(0));
auto dx = Mul(
grad_scope, Mul(grad_scope, grad, root_two_pi),
Exp(grad_scope, Div(grad_scope, Square(grad_scope, op.output(0)), two)));
grad_outputs->push_back(dx);
return grad_scope.status();
}
REGISTER_GRADIENT_OP("Ndtri", NdtriGrad);
Status LgammaGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto grad = grad_inputs[0];
Scope grad_scope = scope.WithControlDependencies(grad);
auto x = ConjugateHelper(grad_scope, op.input(0));
auto dx = Mul(grad_scope, grad, Digamma(grad_scope, x));
grad_outputs->push_back(dx);
return grad_scope.status();
}
REGISTER_GRADIENT_OP("Lgamma", LgammaGrad);
Status MinOrMaxGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto input = op.input(0);
auto reduction_indices = op.input(1);
auto input_shape = Shape(scope, input);
auto output_shape_kept_dims =
ReducedShapeHelper(scope, input_shape, reduction_indices);
auto y = Reshape(scope, op.output(0), output_shape_kept_dims);
auto grad = Reshape(scope, grad_inputs[0], output_shape_kept_dims); | #include <functional>
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/grad_op_registry.h"
#include "tensorflow/cc/framework/gradient_checker.h"
#include "tensorflow/cc/framework/gradients.h"
#include "tensorflow/cc/framework/testutil.h"
#include "tensorflow/cc/gradients/grad_testutil.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/random.h"
namespace tensorflow {
namespace {
using ops::Abs;
using ops::Add;
using ops::AddN;
using ops::AddV2;
using ops::Atan2;
using ops::BatchMatMul;
using ops::BatchMatMulV3;
using ops::Cast;
using ops::ClipByValue;
using ops::Const;
using ops::Cumsum;
using ops::Div;
using ops::DivNoNan;
using ops::MatMul;
using ops::Max;
using ops::Maximum;
using ops::Mean;
using ops::Min;
using ops::Minimum;
using ops::Mul;
using ops::Placeholder;
using ops::Pow;
using ops::Prod;
using ops::RealDiv;
using ops::SegmentSum;
using ops::SelectV2;
using ops::SquaredDifference;
using ops::Sub;
using ops::Sum;
using ops::UnsortedSegmentMax;
using ops::UnsortedSegmentMin;
using ops::UnsortedSegmentSum;
using ops::Where3;
class CWiseUnaryGradTest : public ::testing::Test {
protected:
CWiseUnaryGradTest() : scope_(Scope::NewRootScope().WithDevice("/cpu:0")) {}
enum UnaryOpType {
ABS,
NEG,
INV,
SQUARE,
SQRT,
RSQRT,
EXP,
EXPM1,
LOG,
LOG1P,
SINH,
COSH,
TANH,
ASINH,
ACOSH,
ATANH,
SIGMOID,
SIGN,
SIN,
COS,
ASIN,
ACOS,
TAN,
ATAN,
REAL,
IMAG,
CONJ,
COMPLEX,
ANGLE,
LGAMMA,
ERF,
ERFINV,
NDTRI
};
template <typename X_T, typename Y_T>
void TestCWiseGrad(UnaryOpType op_type, const std::function<X_T(int)>& x_fn) {
TF_ASSERT_OK(scope_.status());
DataType x_type = DataTypeToEnum<X_T>::v();
TensorShape shape({2, 3, 2});
auto x = Placeholder(scope_, x_type, Placeholder::Shape(shape));
Tensor x_data(x_type, shape);
auto x_data_flat = x_data.flat<X_T>();
for (int i = 0; i < x_data_flat.size(); ++i) {
x_data_flat(i) = x_fn(i);
}
Output y;
switch (op_type) {
using namespace ops;
case ABS:
y = Abs(scope_, x);
break;
case NEG:
y = Neg(scope_, x);
break;
case INV:
y = Reciprocal(scope_, x);
break;
case SQUARE:
y = Square(scope_, x);
break;
case SQRT:
y = Sqrt(scope_, x);
break;
case RSQRT:
y = Rsqrt(scope_, x);
break;
case EXP:
y = Exp(scope_, x);
break;
case EXPM1:
y = Expm1(scope_, x);
break;
case LOG:
y = Log(scope_, x);
break;
case LOG1P:
y = Log1p(scope_, x);
break;
case SINH:
y = Sinh(scope_, x);
break;
case COSH:
y = Cosh(scope_, x);
break;
case TANH:
y = Tanh(scope_, x);
break;
case ASINH:
y = Asinh(scope_, x);
break;
case ACOSH:
y = Acosh(scope_, x);
break;
case ATANH:
y = Atanh(scope_, x);
break;
case SIGMOID:
y = Sigmoid(scope_, x);
break;
case SIGN:
y = Sign(scope_, x);
break;
case SIN:
y = Sin(scope_, x);
break;
case COS:
y = Cos(scope_, x);
break;
case ASIN:
y = Asin(scope_, x);
break;
case ACOS:
y = Acos(scope_, x);
break;
case TAN:
y = Tan(scope_, x);
break;
case ATAN:
y = Atan(scope_, x);
break;
case REAL:
y = Real(scope_, x);
break;
case IMAG:
y = Imag(scope_, x);
break;
case CONJ:
y = Conj(scope_, x);
break;
case COMPLEX:
y = Complex(scope_, x, x);
break;
case ANGLE:
y = Angle(scope_, x);
break;
case LGAMMA:
y = Lgamma(scope_, x);
break;
case ERF:
y = Erf(scope_, x);
break;
case ERFINV:
y = Erfinv(scope_, x);
break;
case NDTRI:
y = Ndtri(scope_, x);
break;
}
float max_error;
TF_ASSERT_OK((ComputeGradientError<X_T, Y_T, float>(scope_, x, x_data, y,
shape, &max_error)));
EXPECT_LT(max_error, 1e-3f);
}
float RV(const std::vector<float>& v) {
return v[random::New64() % v.size()];
}
complex64 CRV(const std::vector<complex64>& v) {
return v[random::New64() % v.size()];
}
complex64 conjugate(const complex64& val) {
return complex64(val.real(), -val.imag());
}
Scope scope_;
};
TEST_F(CWiseUnaryGradTest, Abs) {
auto x_fn = [this](const int i) { return RV({-1, 0, 1}); };
TestCWiseGrad<float, float>(ABS, x_fn);
}
TEST_F(CWiseUnaryGradTest, Neg) {
auto x_fn = [this](const int i) { return RV({-1, 0, 1}); };
TestCWiseGrad<float, float>(NEG, x_fn);
}
TEST_F(CWiseUnaryGradTest, Reciprocal) {
auto x_fn = [this](const int i) { return RV({-1, 1, -2, 2, -3, 3, -4, 4}); };
TestCWiseGrad<float, float>(INV, x_fn);
}
TEST_F(CWiseUnaryGradTest, Reciprocal_Complex) {
auto x_fn = [this](const int i) { return CRV({{-1, 0}, {1, 0}, {2, -1}}); };
TestCWiseGrad<complex64, complex64>(INV, x_fn);
}
TEST_F(CWiseUnaryGradTest, Square) {
auto x_fn = [this](const int i) { return RV({0, -1, 1, -2, 2, -3, 3}); };
TestCWiseGrad<float, float>(SQUARE, x_fn);
}
TEST_F(CWiseUnaryGradTest, Square_Complex) {
auto x_fn = [this](const int i) { return CRV({{-1, 0}, {1, 0}, {2, -1}}); };
TestCWiseGrad<complex64, complex64>(SQUARE, x_fn);
}
TEST_F(CWiseUnaryGradTest, Sqrt) {
auto x_fn = [this](const int i) { return RV({0.5, 1, 2, 3, 4, 5, 6, 7}); };
TestCWiseGrad<float, float>(SQRT, x_fn);
}
TEST_F(CWiseUnaryGradTest, Sqrt_Complex) {
auto x_fn = [this](const int i) {
return CRV({{-1.0f, 0.5f}, {1.0f, 0.5f}, {2, -1}});
};
TestCWiseGrad<complex64, complex64>(SQRT, x_fn);
}
TEST_F(CWiseUnaryGradTest, Rsqrt) {
auto x_fn = [this](const int i) { return RV({1, 2, 3, 4, 5, 6, 7, 8}); };
TestCWiseGrad<float, float>(RSQRT, x_fn);
}
TEST_F(CWiseUnaryGradTest, Rsqrt_Complex) {
auto x_fn = [this](const int i) {
return CRV({{-1.0f, 0.5f}, {1.0f, 0.5f}, {2, -1}});
};
TestCWiseGrad<complex64, complex64>(RSQRT, x_fn);
}
TEST_F(CWiseUnaryGradTest, Exp) {
auto x_fn = [this](const int i) {
return RV({0, -1, 1, -1.5f, 1.5f, -2, 2});
};
TestCWiseGrad<float, float>(EXP, x_fn);
}
TEST_F(CWiseUnaryGradTest, Exp_Complex) {
auto x_fn = [this](const int i) { return CRV({{-1, 0}, {1, 0}, {2, -1}}); };
TestCWiseGrad<complex64, complex64>(EXP, x_fn);
}
TEST_F(CWiseUnaryGradTest, Expm1) {
auto x_fn = [this](const int i) { return RV({0, -1, 1e-6, 1, -1.5, 1.5}); };
TestCWiseGrad<float, float>(EXPM1, x_fn);
}
TEST_F(CWiseUnaryGradTest, Expm1_Complex) {
auto x_fn = [this](const int i) {
return CRV({{-1, 0}, {1, 0}, {1.5, -1.5}});
};
TestCWiseGrad<complex64, complex64>(EXPM1, x_fn);
}
TEST_F(CWiseUnaryGradTest, Log) {
auto x_fn = [this](const int i) { return RV({0.5, 1, 2, 3, 4}); };
TestCWiseGrad<float, float>(LOG, x_fn);
}
TEST_F(CWiseUnaryGradTest, Log_Complex) {
auto x_fn = [this](const int i) {
return CRV({{-1, 0.5f}, {1, 0.5f}, {2, -1}});
};
TestCWiseGrad<complex64, complex64>(LOG, x_fn);
}
TEST_F(CWiseUnaryGradTest, Log1p) {
auto x_fn = [this](const int i) { return RV({0, 1e-6, 1, 2, 3, 4, 100}); };
TestCWiseGrad<float, float>(LOG1P, x_fn);
}
TEST_F(CWiseUnaryGradTest, Log1p_Complex) {
auto x_fn = [this](const int i) {
return CRV({{0, 0}, {1e-6, 0}, {2, -1}, {1, 2}, {3, 4}});
};
TestCWiseGrad<complex64, complex64>(LOG1P, x_fn);
}
TEST_F(CWiseUnaryGradTest, Sinh) {
auto x_fn = [this](const int i) { return RV({0.5, -0.5, 1, -1, 1.5, -1.5}); };
TestCWiseGrad<float, float>(SINH, x_fn);
}
TEST_F(CWiseUnaryGradTest, Sinh_Complex) {
auto x_fn = [this](const int i) {
return CRV({{0.5, 0.25}, {0.25, 0.5}, {1.5, -1}, {1, 1.5}});
};
TestCWiseGrad<complex64, complex64>(SINH, x_fn);
}
TEST_F(CWiseUnaryGradTest, Cosh) {
auto x_fn = [this](const int i) { return RV({0, -1, 1, -2, 2, -3, 3}); };
TestCWiseGrad<float, float>(COSH, x_fn);
}
TEST_F(CWiseUnaryGradTest, Cosh_Complex) {
auto x_fn = [this](const int i) {
return CRV({{0.5, 0.25}, {0.25, 0.5}, {1.5, -1}, {1, 1.5}});
};
TestCWiseGrad<complex64, complex64>(COSH, x_fn);
}
TEST_F(CWiseUnaryGradTest, Tanh) {
auto x_fn = [this](const int i) { return RV({0, -1, 1, -2, 2, -3, 3}); };
TestCWiseGrad<float, float>(TANH, x_fn);
}
TEST_F(CWiseUnaryGradTest, Tanh_Complex) {
auto x_fn = [this](const int i) {
return CRV({{1, 0}, {0, 1}, {2, -1}, {1, 2}, {3, 4}});
};
TestCWiseGrad<complex64, complex64>(TANH, x_fn);
}
TEST_F(CWiseUnaryGradTest, Asinh) {
auto x_fn = [this](const int i) { return RV({0.5, 1, -1, -1.5, 1.5}); };
TestCWiseGrad<float, float>(ASINH, x_fn);
}
TEST_F(CWiseUnaryGradTest, Asinh_Complex) {
auto x_fn = [this](const int i) {
return CRV({{1, 0.5}, {0.5, 1}, {0.5, -1}, {1, 1.5}});
};
TestCWiseGrad<complex64, complex64>(ASINH, x_fn);
}
TEST_F(CWiseUnaryGradTest, Acosh) {
auto x_fn = [this](const int i) { return RV({1.5, 2, 2.5}); };
TestCWiseGrad<float, float>(ACOSH, x_fn);
}
TEST_F(CWiseUnaryGradTest, Acosh_Complex) {
auto x_fn = [this](const int i) {
return CRV({{1, 0.5}, {0.5, 1}, {0.5, -1}, {1, 1.5}});
};
TestCWiseGrad<complex64, complex64>(ACOSH, x_fn);
}
TEST_F(CWiseUnaryGradTest, Atanh) {
auto x_fn = [this](const int i) { return RV({0, -0.5, 0.5, -0.1, 0.1}); };
TestCWiseGrad<float, float>(ATANH, x_fn);
}
TEST_F(CWiseUnaryGradTest, Atanh_Complex) {
auto x_fn = [this](const int i) {
return CRV({{0.1, 0}, {0, 0.1}, {0.2, -0.1}, {0.1, 0.2}, {0.3, 0.4}});
};
TestCWiseGrad<complex64, complex64>(ATANH, x_fn);
}
TEST_F(CWiseUnaryGradTest, Sigmoid) {
auto x_fn = [this](const int i) { return RV({0, -1, 1, -2, 2, -3, 3}); };
TestCWiseGrad<float, float>(SIGMOID, x_fn);
}
TEST_F(CWiseUnaryGradTest, Sigmoid_Complex) {
auto x_fn = [this](const int i) {
return CRV({{1, 0}, {0, 0}, {2, -1}, {1, 2}, {3, 4}});
};
TestCWiseGrad<complex64, complex64>(SIGMOID, x_fn);
}
TEST_F(CWiseUnaryGradTest, Sign) {
auto x_fn = [this](const int i) { return RV({-1, 1, -2, 2, -3, 3}); };
TestCWiseGrad<float, float>(SIGN, x_fn);
}
TEST_F(CWiseUnaryGradTest, Sin) {
auto x_fn = [this](const int i) { return RV({0, -1, 1, -2, 2, -3, 3}); };
TestCWiseGrad<float, float>(SIN, x_fn);
}
TEST_F(CWiseUnaryGradTest, Sin_Complex) {
auto x_fn = [this](const int i) {
return CRV({{1, 0}, {0, 1}, {2, -1}, {1, 2}});
};
TestCWiseGrad<complex64, complex64>(SIN, x_fn);
}
TEST_F(CWiseUnaryGradTest, Cos) {
auto x_fn = [this](const int i) { return RV({0, -1, 1, -2, 2, -3, 3}); };
TestCWiseGrad<float, float>(COS, x_fn);
}
TEST_F(CWiseUnaryGradTest, Cos_Complex) {
auto x_fn = [this](const int i) {
return CRV({{1, 0}, {0, 1}, {2, -1}, {1, 2}});
};
TestCWiseGrad<complex64, complex64>(COS, x_fn);
}
TEST_F(CWiseUnaryGradTest, Asin) {
auto x_fn = [this](const int i) { return RV({0, 0.25, -0.25, -0.5, 0.5}); };
TestCWiseGrad<float, float>(ASIN, x_fn);
}
TEST_F(CWiseUnaryGradTest, Asin_Complex) {
auto x_fn = [this](const int i) {
return CRV({{0.5, 0}, {0, 0.5}, {0.25, -0.75}, {0.5, 0.25}});
};
if ( (false)) {
TestCWiseGrad<complex64, complex64>(ASIN, x_fn);
}
}
TEST_F(CWiseUnaryGradTest, Acos) {
auto x_fn = [this](const int i) { return RV({0, -0.5, 0.5, -0.75, 0.75}); };
TestCWiseGrad<float, float>(ACOS, x_fn);
}
TEST_F(CWiseUnaryGradTest, Acos_Complex) {
auto x_fn = [this](const int i) {
return CRV({{0.5, 0}, {0, 0.5}, {0.25, -0.75}, {0.5, 0.25}});
};
if ( (false)) {
TestCWiseGrad<complex64, complex64>(ACOS, x_fn);
}
}
TEST_F(CWiseUnaryGradTest, Tan) {
auto x_fn = [this](const int i) { return RV({0, -1, 1, -2, 2, -3, 3}); };
TestCWiseGrad<float, float>(TAN, x_fn);
}
TEST_F(CWiseUnaryGradTest, Tan_Complex) {
auto x_fn = [this](const int i) {
return CRV({{1, 0}, {0, 1}, {2, -1}, {1, 2}, {3, 4}});
};
TestCWiseGrad<complex64, complex64>(TAN, x_fn);
}
TEST_F(CWiseUnaryGradTest, Atan) {
auto x_fn = [this](const int i) { return RV({0, -1, 1, -2, 2, -3, 3}); };
TestCWiseGrad<float, float>(ATAN, x_fn);
}
TEST_F(CWiseUnaryGradTest, Atan_Complex) {
auto x_fn = [this](const int i) {
return CRV({{1, 0}, {0, 1}, {2, -1}, {1, 2}, {3, 4}});
};
if ( (false)) {
TestCWiseGrad<complex64, complex64>(ATAN, x_fn);
}
}
TEST_F(CWiseUnaryGradTest, Real) {
auto x_fn = [this](const int i) {
return CRV({{1, -1}, {-2, 2}, {2, 3}, {-2, -3}});
};
TestCWiseGrad<complex64, float>(REAL, x_fn);
}
TEST_F(CWiseUnaryGradTest, Imag) {
auto x_fn = [this](const int i) {
return CRV({{1, -1}, {-2, 2}, {2, 3}, {-2, -3}});
};
TestCWiseGrad<complex64, float>(IMAG, x_fn);
}
TEST_F(CWiseUnaryGradTest, Conj) {
auto x_fn = [this](const int i) {
return CRV({{1, -1}, {-2, 2}, {2, 3}, {-2, -3}});
};
TestCWiseGrad<complex64, complex64>(CONJ, x_fn);
}
TEST_F(CWiseUnaryGradTest, Complex) {
auto x_fn = [this](const int i) { return RV({1, -1, 2, -2, 3, -3}); };
TestCWiseGrad<float, complex64>(COMPLEX, x_fn);
}
TEST_F(CWiseUnaryGradTest, Angle) {
auto x_fn = [this](const int i) {
return CRV({{1.5, 1.5}, {1.5, -1.5}, {-1.5, 1.5}, {-1.5, -1.5}});
};
TestCWiseGrad<complex64, float>(ANGLE, x_fn);
}
TEST_F(CWiseUnaryGradTest, Lgamma) {
auto x_fn = [this](const int i) {
return RV({-3.5, -2.5, -1.5, 1.0, 2.0, 3.5});
};
TestCWiseGrad<float, float>(LGAMMA, x_fn);
}
TEST_F(CWiseUnaryGradTest, Lgamma_Complex) {
auto x_fn = [this](const int i) {
return CRV({{-3.5, 0.5}, {-1.5, -0.5}, {1.5, -1.0}, {3.5, 1.0}});
};
if ( (false)) {
TestCWiseGrad<complex64, complex64>(LGAMMA, x_fn);
}
}
TEST_F(CWiseUnaryGradTest, Erf) {
auto x_fn = [this](const int i) {
return RV({-1.2, -1.0, -0.5, 0.3, 0.5, 1.3});
};
TestCWiseGrad<float, float>(ERF, x_fn);
}
TEST_F(CWiseUnaryGradTest, Erf_Complex) {
auto x_fn = [this](const int i) {
return CRV({{-1.2, 0.5}, {-0.5, -0.5}, {0.5, 0.5}, {1.2, -0.5}});
};
if ( (false)) {
TestCWiseGrad<complex64, complex64>(ERF, x_fn);
}
}
TEST_F(CWiseUnaryGradTest, Ndtri) {
auto x_fn = [this](const int i) {
return RV({0.1, 0.2, 0.3, 0.5, 0.7, 0.9});
};
TestCWiseGrad<float, float>(NDTRI, x_fn);
}
TEST_F(CWiseUnaryGradTest, Erfinv) {
auto x_fn = [this](const int i) {
return RV({-0.9, -0.3, -0.1, 0.2, 0.6, 0.8});
};
TestCWiseGrad<float, float>(ERFINV, x_fn);
}
class MathGradTest : public ::testing::Test {
protected:
MathGradTest() : root_(Scope::NewRootScope().WithDevice("/cpu:0")) {}
template <typename T>
void TestMatMulGrad(const bool t_x, const bool t_y) {
TestMatMulGradHelper<T>(
false, false, t_x, t_y,
[&](Output x, Output y) {
return MatMul(root_, x, y, MatMul::TransposeA(t_x).TransposeB(t_y));
});
}
template <typename T>
void TestBatchMatMulGrad(const bool t_x, const bool t_y) {
TestMatMulGradHelper<T>(
true, true, t_x, t_y,
[&](Output x, Output y) {
return BatchMatMul(root_, x, y, BatchMatMul::AdjX(t_x).AdjY(t_y));
});
}
template <typename T>
void TestBatchMatMulV3Grad(const bool is_x_batch, const bool is_y_batch,
const bool t_x, const bool t_y) {
TestMatMulGradHelper<T>(
true, true, t_x, t_y,
[&](Output x, Output y) {
return BatchMatMulV3(root_, x, y, DataTypeToEnum<T>::v(),
BatchMatMulV3::AdjX(t_x).AdjY(t_y));
});
}
template <typename T>
void TestMatMulGradHelper(const bool is_x_batch, const bool is_y_batch,
const bool t_x, const bool t_y,
std::function<Output(Output, Output)> mul_fn) {
TF_ASSERT_OK(root_.status());
std::vector<TensorShape> shapes;
RandMatMulShapes(is_x_batch, is_y_batch, t_x, t_y, &shapes);
TensorShape x_shape = shapes[0];
TensorShape y_shape = shapes[1];
TensorShape z_shape = shapes[2];
auto x =
Placeholder(root_, DataTypeToEnum<T>::v(), Placeholder::Shape(x_shape));
auto y =
Placeholder(root_, DataTypeToEnum<T>::v(), Placeholder::Shape(y_shape));
Output z = mul_fn(x, y);
float max_error;
TF_ASSERT_OK((ComputeGradientError<T, T, float>(
root_, {x, y}, {x_shape, y_shape}, {z}, {z_shape}, &max_error)));
EXPECT_LT(max_error, 1e-3);
}
void RandMatMulShapes(const bool is_x_batch, const bool is_y_batch,
const bool tx, const bool ty,
std::vector<TensorShape>* shapes) {
const int b = 1 + (random::New64() % 4);
const int m = Rand();
const int k = Rand();
const int n = Rand();
TensorShape x_shape;
if (is_x_batch) {
x_shape = tx ? TensorShape({b, k, m}) : TensorShape({b, m, k});
} else {
x_shape = tx ? TensorShape({k, m}) : TensorShape({m, k});
}
shapes->push_back(x_shape);
TensorShape y_shape;
if (is_y_batch) {
y_shape = ty ? TensorShape({b, n, k}) : TensorShape({b, k, n});
} else {
y_shape = ty ? TensorShape({n, k}) : TensorShape({k, n});
}
shapes->push_back(y_shape);
TensorShape z_shape;
if (is_x_batch || is_y_batch) {
z_shape = TensorShape({b, m, n});
} else {
z_shape = TensorShape({m, n});
}
shapes->push_back(z_shape);
}
int Rand() { return 1 + (random::New64() % 10); }
Scope root_;
};
TEST_F(MathGradTest, MatMulGrad_NoTranspose) {
TestMatMulGrad<float>(false, false);
}
TEST_F(MathGradTest, MatMulComplexGrad_NoTranspose) {
TestMatMulGrad<complex64>(false, false);
}
TEST_F(MathGradTest, MatMulGrad_TransposeX) {
TestMatMulGrad<float>(true, false);
}
TEST_F(MathGradTest, MatMulComplexGrad_TransposeX) {
TestMatMulGrad<complex64>(true, false);
}
TEST_F(MathGradTest, MatMulGrad_TransposeY) {
TestMatMulGrad<float>(false, true);
}
TEST_F(MathGradTest, MatMulComplexGrad_TransposeY) {
TestMatMulGrad<complex64>(false, true);
}
TEST_F(MathGradTest, MatMulGrad_TransposeX_TransposeY) {
TestMatMulGrad<float>(true, true);
}
TEST_F(MathGradTest, MatMulComplexGrad_TransposeX_TransposeY) {
TestMatMulGrad<complex64>(true, true);
}
TEST_F(MathGradTest, BatchMatMulGrad_NoTranspose) {
TestBatchMatMulGrad<float>(false, false);
}
TEST_F(MathGradTest, BatchMatMulComplexGrad_NoTranspose) {
TestBatchMatMulGrad<complex64>(false, false);
}
TEST_F(MathGradTest, BatchMatMulGrad_TransposeX) {
TestBatchMatMulGrad<float>(true, false);
}
TEST_F(MathGradTest, BatchMatMulComplexGrad_TransposeX) {
TestBatchMatMulGrad<complex64>(true, false);
}
TEST_F(MathGradTest, BatchMatMulGrad_TransposeY) {
TestBatchMatMulGrad<float>(false, true);
}
TEST_F(MathGradTest, BatchMatMulComplexGrad_TransposeY) {
TestBatchMatMulGrad<complex64>(false, true);
}
TEST_F(MathGradTest, BatchMatMulGrad_TransposeX_TransposeY) {
TestBatchMatMulGrad<float>(true, true);
}
TEST_F(MathGradTest, BatchMatMulComplexGrad_TransposeX_TransposeY) {
TestBatchMatMulGrad<complex64>(true, true);
}
TEST_F(MathGradTest, BatchMatMulV3Grad_BroadcastX) {
TestBatchMatMulV3Grad<float>(false, true, false, false);
}
TEST_F(MathGradTest, BatchMatMulV3Grad_BroadcastY) {
TestBatchMatMulV3Grad<float>(true, false, false, false);
}
TEST_F(MathGradTest, BatchMatMulV3Grad_BroadcastYTransposeY) {
TestBatchMatMulV3Grad<float>(true, false, false, true);
}
class NaryGradTest : public ::testing::Test {
protected:
NaryGradTest() : scope_(Scope::NewRootScope().WithDevice("/cpu:0")) {}
void RunTest(const OutputList& xs, const std::vector<TensorShape>& x_shapes,
const OutputList& ys, const std::vector<TensorShape>& y_shapes) {
TF_ASSERT_OK(scope_.status());
float max_error;
TF_ASSERT_OK((ComputeGradientError<float, float, float>(
scope_, xs, x_shapes, ys, y_shapes, &max_error)));
EXPECT_LT(max_error, 1e-3);
}
void RunTest(const Output& x, const Tensor& x_init_value, const Output& y,
const TensorShape& y_shape) {
TF_ASSERT_OK(scope_.status());
float max_error;
TF_ASSERT_OK((ComputeGradientError<float, float, float>(
scope_, x, x_init_value, y, y_shape, &max_error)));
EXPECT_LT(max_error, 1e-3);
}
Scope scope_;
};
TEST_F(NaryGradTest, Sum) {
TensorShape x_shape({2, 3, 5, 7});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto y = Sum(scope_, x, {1, -1});
TensorShape y_shape({2, 5});
RunTest({x}, {x_shape}, {y}, {y_shape});
}
TEST_F(NaryGradTest, Mean) {
TensorShape x_shape({2, 3, 5, 7});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto y = Mean(scope_, x, {1, -1});
TensorShape y_shape({2, 5});
RunTest({x}, {x_shape}, {y}, {y_shape});
}
TEST_F(NaryGradTest, Min) {
TensorShape x_shape({2, 3});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto y = Min(scope_, x, {-1});
TensorShape y_shape({2});
Tensor x_init_value =
test::AsTensor<float>({0.5f, 0.7f, 0.2f, 1.0f, 1.5f, -2.8f}, x_shape);
RunTest(x, x_init_value, y, y_shape);
}
TEST_F(NaryGradTest, Max) {
TensorShape x_shape({2, 3});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto y = Max(scope_, x, {-1});
TensorShape y_shape({2});
Tensor x_init_value =
test::AsTensor<float>({0.5f, 0.7f, 0.2f, 1.0f, 1.5f, -2.8f}, x_shape);
RunTest(x, x_init_value, y, y_shape);
}
TEST_F(NaryGradTest, MinMulti) {
TensorShape x_shape({1});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto all_same = Mul(scope_, Const(scope_, {1.f, 1.f, 1.f}), x);
auto y = Min(scope_, all_same, {0});
TensorShape y_shape({1});
RunTest({x}, {x_shape}, {y}, {y_shape});
}
TEST_F(NaryGradTest, MaxMulti) {
TensorShape x_shape({1});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto all_same = Mul(scope_, Const(scope_, {1.f, 1.f, 1.f}), x);
auto y = Max(scope_, all_same, {0});
TensorShape y_shape({1});
RunTest({x}, {x_shape}, {y}, {y_shape});
}
TEST_F(NaryGradTest, AddN) {
TensorShape shape({3, 2, 5});
std::vector<Output> xs;
xs.push_back(Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape)));
xs.push_back(Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape)));
xs.push_back(Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape)));
auto y = AddN(scope_, xs);
RunTest(xs, {shape, shape, shape}, {y}, {shape});
}
TEST_F(NaryGradTest, Add) {
TensorShape x1_shape({3, 2, 5});
TensorShape x2_shape({2, 5});
auto x1 = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x1_shape));
auto x2 = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x2_shape));
auto y = Add(scope_, x1, x2);
RunTest({x1, x2}, {x1_shape, x2_shape}, {y}, {x1_shape});
}
TEST_F(NaryGradTest, AddV2) {
TensorShape x1_shape({3, 2, 5});
TensorShape x2_shape({2, 5});
auto x1 = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x1_shape));
auto x2 = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x2_shape));
auto y = AddV2(scope_, x1, x2);
RunTest({x1, x2}, {x1_shape, x2_shape}, {y}, {x1_shape});
}
TEST_F(NaryGradTest, Sub) {
TensorShape x1_shape({3, 2, 5});
TensorShape x2_shape({2, 5});
auto x1 = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x1_shape));
auto x2 = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x2_shape));
auto y = Sub(scope_, x1, x2);
RunTest({x1, x2}, {x1_shape, x2_shape}, {y}, {x1_shape});
}
TEST_F(NaryGradTest, Mul) {
TensorShape x1_shape({3, 2, 5});
TensorShape x2_shape({2, 5});
auto x1 = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x1_shape));
auto x2 = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x2_shape));
auto y = Mul(scope_, x1, x2);
RunTest({x1, x2}, {x1_shape, x2_shape}, {y}, {x1_shape});
}
TEST_F(NaryGradTest, Div) {
TensorShape x_shape({3, 2, 5});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto y = Div(scope_, x, Add(scope_, Const<float>(scope_, 1), Abs(scope_, x)));
RunTest({x}, {x_shape}, {y}, {x_shape});
}
TEST_F(NaryGradTest, RealDiv) {
TensorShape x_shape({3, 2, 5});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto y =
RealDiv(scope_, x, Add(scope_, Const<float>(scope_, 1), Abs(scope_, x)));
RunTest({x}, {x_shape}, {y}, {x_shape});
}
TEST_F(NaryGradTest, DivNoNan) {
{
TensorShape x_shape({3, 2, 5});
const auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
const auto y = DivNoNan(
scope_, x, Add(scope_, Const<float>(scope_, 1), Abs(scope_, x)));
RunTest({x}, {x_shape}, {y}, {x_shape});
}
{
const auto x = Placeholder(scope_, DT_FLOAT);
const auto zero = Const<float>(scope_, 0.0);
const auto y = DivNoNan(scope_, x, zero);
std::vector<Output> grad_outputs;
TF_EXPECT_OK(AddSymbolicGradients(scope_, {y}, {x}, &grad_outputs));
ClientSession session(scope_);
std::vector<Tensor> grad_result;
TF_EXPECT_OK(
session.Run({{x, {-3.0f, 0.0f, 3.0f}}}, grad_outputs, &grad_result));
EXPECT_EQ(grad_result.size(), 1);
EXPECT_EQ(grad_result[0].NumElements(), 3);
EXPECT_EQ(grad_result[0].flat<float>()(0), 0.0f);
EXPECT_EQ(grad_result[0].flat<float>()(1), 0.0f);
EXPECT_EQ(grad_result[0].flat<float>()(2), 0.0f);
}
}
TEST_F(NaryGradTest, SquaredDifference) {
TensorShape x1_shape({3, 2, 5});
TensorShape x2_shape({2, 5});
auto x1 = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x1_shape));
auto x2 = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x2_shape));
auto y = SquaredDifference(scope_, x1, x2);
RunTest({x1, x2}, {x1_shape, x2_shape}, {y}, {x1_shape});
}
TEST_F(NaryGradTest, Pow) {
TensorShape shape({3});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto y = Pow(scope_, x, Const(scope_, {1.f, 2.f, 3.f}));
RunTest({x}, {shape}, {y}, {shape});
}
TEST_F(NaryGradTest, Maximum) {
TensorShape shape({3, 2});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto y = Maximum(scope_, x, Const(scope_, 1.0f));
Tensor x_init_value =
test::AsTensor<float>({0.5f, 1.5f, -1.2f, 3.0f, 0.1f, 2.8f}, {3, 2});
RunTest(x, x_init_value, y, shape);
}
TEST_F(NaryGradTest, Minimum) {
TensorShape shape({3, 2});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto y = Minimum(scope_, x, Const(scope_, 1.0f));
Tensor x_init_value =
test::AsTensor<float>({0.5f, 1.5f, -1.2f, 3.0f, 0.1f, 2.8f}, {3, 2});
RunTest(x, x_init_value, y, shape);
}
TEST_F(NaryGradTest, Prod) {
TensorShape x_shape({2, 3, 2});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto y = Prod(scope_, x, {1});
TensorShape y_shape({2, 1, 2});
RunTest({x}, {x_shape}, {y}, {y_shape});
}
TEST_F(NaryGradTest, SegmentSum) {
TensorShape x_shape({3, 4});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto y = SegmentSum(scope_, x, {0, 0, 1});
TensorShape y_shape({2, 4});
RunTest({x}, {x_shape}, {y}, {y_shape});
}
class CumsumGradTest
: public NaryGradTest,
public ::testing::WithParamInterface<std::tuple<bool, bool, int>> {};
TEST_P(CumsumGradTest, CumsumGrad) {
int axis = std::get<2>(GetParam());
TensorShape shape({2, 3, 2});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
Cumsum::Attrs attrs;
attrs.exclusive_ = std::get<0>(GetParam());
attrs.reverse_ = std::get<1>(GetParam());
auto y = Cumsum(scope_, x, axis, attrs);
RunTest({x}, {shape}, {y}, {shape});
}
INSTANTIATE_TEST_SUITE_P(CumsumGrad, CumsumGradTest,
::testing::Combine(::testing::Bool(),
::testing::Bool(),
::testing::Range(0, 2)));
TEST_F(NaryGradTest, CastGrad) {
TensorShape shape({2, 3, 2});
auto x = Placeholder(scope_, DT_DOUBLE, Placeholder::Shape(shape));
auto y = Cast(scope_, x, DT_FLOAT);
TF_ASSERT_OK(scope_.status());
double max_error;
TF_ASSERT_OK((ComputeGradientError<double, float, double>(
scope_, {x}, {shape}, {y}, {shape}, &max_error)));
EXPECT_LT(max_error, 1e-3);
}
TEST_F(NaryGradTest, Select) {
TensorShape shape({1, 3});
auto cond = Const<bool>(scope_, {{false, true, true}});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto y = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto z = Where3(scope_, cond, x, y);
RunTest({x, y}, {shape, shape}, {z}, {shape});
}
TEST_F(NaryGradTest, SelectV2_Basic) {
TensorShape shape({1, 3});
auto cond = Const<bool>(scope_, {{false, true, true}});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto y = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto z = SelectV2(scope_, cond, x, y);
RunTest({x, y}, {shape, shape}, {z}, {shape});
}
TEST_F(NaryGradTest, SelectV2 |
1,263 | cpp | tensorflow/tensorflow | array_grad | tensorflow/cc/gradients/array_grad.cc | tensorflow/cc/gradients/array_grad_test.cc | #ifndef TENSORFLOW_C_EXPERIMENTAL_GRADIENTS_ARRAY_GRAD_H_
#define TENSORFLOW_C_EXPERIMENTAL_GRADIENTS_ARRAY_GRAD_H_
#include "tensorflow/c/eager/gradients.h"
namespace tensorflow {
namespace gradients {
GradientFunction* IdentityNRegisterer(const ForwardOperation& op);
}
}
#endif
#include <vector>
#include "tensorflow/cc/framework/grad_op_registry.h"
#include "tensorflow/cc/framework/gradients.h"
#include "tensorflow/cc/ops/array_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/lib/strings/strcat.h"
namespace tensorflow {
namespace ops {
namespace {
REGISTER_NO_GRADIENT_OP("Const");
REGISTER_NO_GRADIENT_OP("StopGradient");
REGISTER_NO_GRADIENT_OP("ConcatOffset");
REGISTER_NO_GRADIENT_OP("EditDistance");
REGISTER_NO_GRADIENT_OP("ZerosLike");
REGISTER_NO_GRADIENT_OP("InvertPermutation");
REGISTER_NO_GRADIENT_OP("Shape");
REGISTER_NO_GRADIENT_OP("ShapeN");
REGISTER_NO_GRADIENT_OP("Rank");
REGISTER_NO_GRADIENT_OP("Size");
REGISTER_NO_GRADIENT_OP("BroadcastGradientArgs");
REGISTER_NO_GRADIENT_OP("OneHot");
Status PackGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
int N;
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "N", &N));
int axis;
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "axis", &axis));
grad_outputs->reserve(N);
auto grad_op = Unstack(scope, grad_inputs[0], N, Unstack::Axis(axis));
for (const Output& o : grad_op.output) {
grad_outputs->emplace_back(o);
}
return scope.status();
}
REGISTER_GRADIENT_OP("Pack", PackGrad);
Status UnpackGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
int axis;
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "axis", &axis));
grad_outputs->push_back(Stack(scope, grad_inputs, Stack::Axis(axis)));
return scope.status();
}
REGISTER_GRADIENT_OP("Unpack", UnpackGrad);
Status IdentityGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
grad_outputs->push_back(Identity(scope, grad_inputs[0]));
return scope.status();
}
REGISTER_GRADIENT_OP("Identity", IdentityGrad);
Status RefIdentityGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
grad_outputs->push_back(Identity(scope, grad_inputs[0]));
return scope.status();
}
REGISTER_GRADIENT_OP("RefIdentity", RefIdentityGrad);
Status QuantizeAndDequantizeGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
grad_outputs->push_back(Identity(scope, grad_inputs[0]));
return scope.status();
}
REGISTER_GRADIENT_OP("QuantizeAndDequantize", QuantizeAndDequantizeGrad);
Status QuantizeAndDequantizeV4GradHelper(const Scope& scope,
const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
Input input = Shape(scope, op.input(0));
Input input_min = op.input(1);
Input input_max = op.input(2);
int64_t axis;
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "axis", &axis));
auto qdq_v4_grad = QuantizeAndDequantizeV4Grad(
scope, grad_inputs[0], input, input_min, input_max,
QuantizeAndDequantizeV4Grad::Axis(axis));
grad_outputs->push_back(qdq_v4_grad.input_backprop);
grad_outputs->push_back(qdq_v4_grad.input_min_backprop);
grad_outputs->push_back(qdq_v4_grad.input_max_backprop);
return scope.status();
}
REGISTER_GRADIENT_OP("QuantizeAndDequantizeV4",
QuantizeAndDequantizeV4GradHelper);
Status QuantizeAndDequantizeV3Grad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
grad_outputs->push_back(Identity(scope, grad_inputs[0]));
grad_outputs->push_back(NoGradient());
grad_outputs->push_back(NoGradient());
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("QuantizeAndDequantizeV3", QuantizeAndDequantizeV3Grad);
Status SplitGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
grad_outputs->push_back(NoGradient());
grad_outputs->push_back(Concat(scope, grad_inputs, op.input(0)));
return scope.status();
}
REGISTER_GRADIENT_OP("Split", SplitGrad);
Status SplitVGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
if (op.num_inputs() < 3) {
return errors::InvalidArgument("SplitV requires 3 arguments");
}
grad_outputs->push_back(Concat(scope, grad_inputs, op.input(2)));
for (int i = 0; i < op.num_inputs() - 1; ++i) {
grad_outputs->push_back(NoGradient());
}
return scope.status();
}
REGISTER_GRADIENT_OP("SplitV", SplitVGrad);
Status FillGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
grad_outputs->push_back(NoGradient());
auto all_dims = Range(scope, Const(scope, 0), Rank(scope, grad_inputs[0]),
Const(scope, 1));
grad_outputs->push_back(ReduceSum(scope, grad_inputs[0], all_dims));
return scope.status();
}
REGISTER_GRADIENT_OP("Fill", FillGrad);
Status DiagGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
grad_outputs->push_back(DiagPart(scope, grad_inputs[0]));
return scope.status();
}
REGISTER_GRADIENT_OP("Diag", DiagGrad);
Status DiagPartGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
grad_outputs->push_back(Diag(scope, grad_inputs[0]));
return scope.status();
}
REGISTER_GRADIENT_OP("DiagPart", DiagPartGrad);
Status MatrixDiagGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
grad_outputs->push_back(MatrixDiagPart(scope, grad_inputs[0]));
return scope.status();
}
REGISTER_GRADIENT_OP("MatrixDiag", MatrixDiagGrad);
Status MatrixBandPartGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto num_lower = op.input(1);
auto num_upper = op.input(2);
grad_outputs->push_back(
MatrixBandPart(scope, grad_inputs[0], num_lower, num_upper));
grad_outputs->push_back(NoGradient());
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("MatrixBandPart", MatrixBandPartGrad);
Status GatherNdGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto ref = op.input(0);
auto indices = op.input(1);
Shape::Attrs shape_attrs;
shape_attrs.out_type_ = indices.type();
auto ref_shape = Shape(scope, ref, shape_attrs);
grad_outputs->push_back(ScatterNd(scope, indices, grad_inputs[0], ref_shape));
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("GatherNd", GatherNdGrad);
Status CheckNumericsGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
string message;
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "message", &message));
string err_msg = strings::StrCat(
"Not a number (NaN) or infinity (Inf) values detected in gradient. ",
message);
grad_outputs->push_back(CheckNumerics(scope, grad_inputs[0], err_msg));
return scope.status();
}
REGISTER_GRADIENT_OP("CheckNumerics", CheckNumericsGrad);
Status ReshapeGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto input_shape = Shape(scope, op.input(0));
grad_outputs->push_back(Reshape(scope, grad_inputs[0], input_shape));
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("Reshape", ReshapeGrad);
Status ExpandDimsGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto input_shape = Shape(scope, op.input(0));
grad_outputs->push_back(Reshape(scope, grad_inputs[0], input_shape));
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("ExpandDims", ExpandDimsGrad);
Status SqueezeGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto input_shape = Shape(scope, op.input(0));
grad_outputs->push_back(Reshape(scope, grad_inputs[0], input_shape));
return scope.status();
}
REGISTER_GRADIENT_OP("Squeeze", SqueezeGrad);
Status TransposeGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto inverted_perm = InvertPermutation(scope, op.input(1));
grad_outputs->push_back(Transpose(scope, grad_inputs[0], inverted_perm));
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("Transpose", TransposeGrad);
Status ReverseSequenceGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto seq_lengths = op.input(1);
int batch_dim;
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "batch_dim", &batch_dim));
int seq_dim;
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "seq_dim", &seq_dim));
grad_outputs->push_back(
ReverseSequence(scope, grad_inputs[0], seq_lengths, seq_dim,
ReverseSequence::BatchDim(batch_dim)));
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("ReverseSequence", ReverseSequenceGrad);
Status ReverseGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto reverse_dims = op.input(1);
grad_outputs->push_back(Reverse(scope, grad_inputs[0], reverse_dims));
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("ReverseV2", ReverseGrad);
Status ScatterNdGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto indices = op.input(0);
grad_outputs->push_back(NoGradient());
grad_outputs->push_back(GatherNd(scope, grad_inputs[0], indices));
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("ScatterNd", ScatterNdGrad);
Status ScatterNdNonAliasingAddGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto indices = op.input(1);
grad_outputs->push_back(Identity(scope, grad_inputs[0]));
grad_outputs->push_back(NoGradient());
grad_outputs->push_back(GatherNd(scope, grad_inputs[0], indices));
return scope.status();
}
REGISTER_GRADIENT_OP("ScatterNdNonAliasingAdd", ScatterNdNonAliasingAddGrad);
template <bool IsPadV2>
Status PadGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto x = op.input(0);
auto a = op.input(1);
auto size = Stack(scope, {Rank(scope, x), 1});
auto pad_before = Slice(scope, a, {0, 0}, size);
auto begin = Reshape(scope, pad_before, {-1});
grad_outputs->push_back(Slice(scope, grad_inputs[0], begin, Shape(scope, x)));
grad_outputs->push_back(NoGradient());
if (IsPadV2) {
grad_outputs->push_back(NoGradient());
}
return scope.status();
}
REGISTER_GRADIENT_OP("Pad", PadGrad<false>);
REGISTER_GRADIENT_OP("PadV2", PadGrad<true>);
Status SpaceToBatchGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
int block_size;
TF_RETURN_IF_ERROR(
GetNodeAttr(op.node()->attrs(), "block_size", &block_size));
grad_outputs->push_back(
BatchToSpace(scope, grad_inputs[0], op.input(1), block_size));
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("SpaceToBatch", SpaceToBatchGrad);
Status SpaceToBatchNDGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
grad_outputs->push_back(
BatchToSpaceND(scope, grad_inputs[0], op.input(1), op.input(2)));
grad_outputs->push_back(NoGradient());
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("SpaceToBatchND", SpaceToBatchNDGrad);
Status BatchToSpaceGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
int block_size;
TF_RETURN_IF_ERROR(
GetNodeAttr(op.node()->attrs(), "block_size", &block_size));
grad_outputs->push_back(
SpaceToBatch(scope, grad_inputs[0], op.input(1), block_size));
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("BatchToSpace", BatchToSpaceGrad);
Status BatchToSpaceNDGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
grad_outputs->push_back(
SpaceToBatchND(scope, grad_inputs[0], op.input(1), op.input(2)));
grad_outputs->push_back(NoGradient());
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("BatchToSpaceND", BatchToSpaceNDGrad);
Status SpaceToDepthGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
int block_size;
TF_RETURN_IF_ERROR(
GetNodeAttr(op.node()->attrs(), "block_size", &block_size));
grad_outputs->push_back(DepthToSpace(scope, grad_inputs[0], block_size));
return scope.status();
}
REGISTER_GRADIENT_OP("SpaceToDepth", SpaceToDepthGrad);
Status DepthToSpaceGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
int block_size;
TF_RETURN_IF_ERROR(
GetNodeAttr(op.node()->attrs(), "block_size", &block_size));
grad_outputs->push_back(SpaceToDepth(scope, grad_inputs[0], block_size));
return scope.status();
}
REGISTER_GRADIENT_OP("DepthToSpace", DepthToSpaceGrad);
Status MirrorPadGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
string mode;
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "mode", &mode));
grad_outputs->push_back(tensorflow::ops::internal::MirrorPadGrad(
scope, grad_inputs[0], op.input(1), mode));
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("MirrorPad", MirrorPadGrad);
Status MirrorPadGradGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
string mode;
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "mode", &mode));
grad_outputs->push_back(MirrorPad(scope, grad_inputs[0], op.input(1), mode));
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("MirrorPadGrad", MirrorPadGradGrad);
Status StridedSliceGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
Input x = Shape(scope, op.input(0));
Input begin = op.input(1);
Input end = op.input(2);
Input strides = op.input(3);
int64_t begin_mask;
int64_t end_mask;
int64_t ellipsis_mask;
int64_t new_axis_mask;
int64_t shrink_axis_mask;
TF_RETURN_IF_ERROR(
GetNodeAttr(op.node()->attrs(), "begin_mask", &begin_mask));
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "end_mask", &end_mask));
TF_RETURN_IF_ERROR(
GetNodeAttr(op.node()->attrs(), "ellipsis_mask", &ellipsis_mask));
TF_RETURN_IF_ERROR(
GetNodeAttr(op.node()->attrs(), "new_axis_mask", &new_axis_mask));
TF_RETURN_IF_ERROR(
GetNodeAttr(op.node()->attrs(), "shrink_axis_mask", &shrink_axis_mask));
grad_outputs->push_back(
StridedSliceGrad(scope, x, begin, end, strides, grad_inputs[0],
StridedSliceGrad::BeginMask(begin_mask)
.EndMask(end_mask)
.EllipsisMask(ellipsis_mask)
.NewAxisMask(new_axis_mask)
.ShrinkAxisMask(shrink_axis_mask)));
grad_outputs->push_back(NoGradient());
grad_outputs->push_back(NoGradient());
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("StridedSlice", StridedSliceGradHelper);
Status SliceGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
Input input = op.input(0);
Input begin = op.input(1);
auto input_rank = Rank(scope, input);
auto slice_size = Shape(scope, op.output(0));
auto padding_shape = Stack(scope, {input_rank, 1});
Input before_padding = Reshape(scope, begin, padding_shape);
auto after_padding_sizes =
Sub(scope, Sub(scope, Shape(scope, input), slice_size), begin);
Input after_padding = Reshape(scope, after_padding_sizes, padding_shape);
auto paddings =
Concat(scope, {before_padding, after_padding}, Const(scope, 1));
grad_outputs->push_back(Pad(scope, grad_inputs[0], paddings));
grad_outputs->push_back(NoGradient());
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("Slice", SliceGrad);
Status ConcatGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs,
int start_value_index, int end_value_index,
int dim_index) {
if (end_value_index >= op.num_inputs()) {
return errors::Internal("Invalid input index");
}
std::vector<Output> inputs;
inputs.reserve(end_value_index - start_value_index);
for (int i = start_value_index; i < end_value_index; ++i) {
inputs.push_back(op.input(i));
}
auto shapes = ShapeN(scope, inputs);
const auto unique_name = scope.GetUniqueNameForOp("ConcatOffset");
auto builder =
::tensorflow::NodeBuilder(unique_name, "ConcatOffset")
.Input(::tensorflow::ops::AsNodeOut(scope, op.input(dim_index)))
.Input(::tensorflow::ops::AsNodeOutList(scope, shapes.output));
scope.UpdateBuilder(&builder);
::tensorflow::Node* concat_offset_node;
scope.UpdateStatus(builder.Finalize(scope.graph(), &concat_offset_node));
scope.UpdateStatus(scope.DoShapeInference(concat_offset_node));
if (concat_offset_node->num_outputs() != inputs.size()) {
return errors::Internal("ConcatOffset has invalid output count");
}
if (grad_inputs.size() != 1) {
return errors::InvalidArgument("Concat grad should have 1 input");
}
const Output& dy = grad_inputs[0];
for (int i = 0; i < inputs.size(); ++i) {
grad_outputs->push_back(
Slice(scope, dy, Output(concat_offset_node, i), shapes.output[i]));
}
grad_outputs->insert(grad_outputs->begin() + dim_index, NoGradient());
return scope.status();
}
Status ConcatV2Grad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
return ConcatGradHelper(scope, op, grad_inputs, grad_outputs,
0,
op.num_inputs() - 1,
op.num_inputs() - 1);
}
REGISTER_GRADIENT_OP("ConcatV2", ConcatV2Grad);
Status BroadcastToGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
if (grad_inputs.size() != 1) {
return errors::InvalidArgument("BroadcastTo grad should have 1 grad input");
}
if (op.num_inputs() != 2) {
return errors::InvalidArgument("BroadcastTo requires 2 inputs");
}
auto x_shape = Shape(scope, op.input(0));
auto args = internal::BroadcastGradientArgs(scope, x_shape, op.input(1));
auto sum_gx = Sum(scope, grad_inputs[0], args.r0);
grad_outputs->push_back(Reshape(scope, sum_gx, x_shape));
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("BroadcastTo", BroadcastToGrad);
Status TileGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
if (op.num_inputs() != 2) {
return errors::InvalidArgument("Tile requires 2 inputs");
}
if (grad_inputs.size() != 1) {
return errors::InvalidArgument("Tile grad requires 1 grad input");
}
Shape::Attrs shape_attrs;
shape_attrs.out_type_ = op.input_type(1);
auto input_shape = Shape(scope, op.input(0), shape_attrs);
auto stack = Stack(scope, {op.input(1), input_shape.output});
auto perm = Range(scope, Sub(scope, Rank(scope, stack), 1), -1, -1);
auto split_shape = Reshape(scope, Transpose(scope, stack, perm), {-1});
auto axes = Range(scope, Const(scope, 0), Size(scope, split_shape.output), 2);
auto input_grad = ReduceSum(
scope, Reshape(scope, grad_inputs[0], split_shape.output), axes.output);
grad_outputs->push_back(input_grad.output);
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("Tile", TileGrad);
Output ConstHelper(const Scope& scope, int value, DataType d_type) {
return Cast(scope, Const(scope, value), d_type);
}
Output GetBatchIndices(const Scope& scope, const Output& params_shape,
const Output& indices, int batch_dims) {
Output batch_indices = indices;
auto indices_ndims = Rank(scope, indices);
auto casted_params_shape = Cast(scope, params_shape, indices.type());
Output accum_dim_value = ConstHelper(scope, 1, indices.type());
for (int dim = batch_dims; dim > 0; dim--) {
Output dim_value = Slice(scope, casted_params_shape, {dim - 1}, {1});
accum_dim_value = Multiply(scope, accum_dim_value,
Slice(scope, casted_params_shape, {dim}, {1}));
auto start = ConstHelper(scope, 0, indices.type());
auto step = ConstHelper(scope, 1, indices.type());
Output dim_indices = Range(scope, start, Squeeze(scope, dim_value), step);
dim_indices = Multiply(scope, dim_indices, accum_dim_value);
auto one = Cast(scope, Const(scope, {1}), indices.type());
auto dim_shape = Concat(
scope,
{Output(Tile(scope, one, Const(scope, {dim - 1}))), dim_value,
Output(Tile(scope, one,
ExpandDims(scope, Sub(scope, indices_ndims, dim), 0)))},
0);
batch_indices =
Add(scope, batch_indices, Reshape(scope, dim_indices, dim_shape));
}
return batch_indices;
}
Output BatchGatherGrad(const Scope& scope, Output params_shape, Output values,
Output indices, int batch_dims, Output gather_dim_size) {
auto indices_size = ExpandDims(scope, Size(scope, indices), 0);
Output outer_shape, flat_values_shape;
if (batch_dims != 0) {
auto values_shape = Shape(scope, values);
outer_shape = Slice(scope, values_shape, {0}, {batch_dims});
auto inner_shape =
Slice(scope, Slice(scope, values_shape, {batch_dims}, {-1}), {1}, {-1});
auto batch_size = Prod(scope, outer_shape, 0);
flat_values_shape = Concat(scope, {{-1}, inner_shape}, 0);
gather_dim_size = Multiply(scope, gather_dim_size, batch_size);
indices = GetBatchIndices(scope, params_shape, indices, batch_dims);
values = Reshape(scope, values, flat_values_shape);
}
indices = Reshape(scope, indices, indices_size);
Output params_grad =
UnsortedSegmentSum(scope, values, indices, gather_dim_size);
if (batch_dims != 0) {
params_grad = Reshape(scope, params_grad, params_shape);
}
return params_grad;
}
Status GatherV2Grad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
if (op.num_inputs() != 3) {
return errors::InvalidArgument("Gather requires 3 inputs");
}
if (grad_inputs.size() != 1) {
return errors::InvalidArgument("Gather grad requires 1 grad input");
}
auto params = op.input(0);
auto colocate_scope = scope.ColocateWith(params);
Shape::Attrs shape_attrs;
shape_attrs.out_type_ = DT_INT64;
auto params_shape64 = Shape(colocate_scope, params, shape_attrs);
Output params_shape = Cast(colocate_scope, params_shape64, DT_INT32);
auto indices = op.input(1);
auto indices_size = ExpandDims(scope, Size(scope, indices), 0);
auto axis = op.input(2);
auto axis_expand = ExpandDims(scope, axis, 0);
int batch_dims;
TF_RETURN_IF_ERROR(
GetNodeAttr(op.node()->attrs(), "batch_dims", &batch_dims));
if (batch_dims < 0) {
return errors::InvalidArgument(
"C++ GatherV2 gradient does not support negative batch_dims.");
}
auto outer_shape = Slice(scope, params_shape, {0}, axis_expand);
auto inner_shape =
Slice(scope, Slice(scope, params_shape, axis_expand, {-1}), {1}, {-1});
auto values_shape = Concat(scope, {outer_shape, {-1}, inner_shape}, 0);
auto values_dims = Size(scope, values_shape);
auto axis_dims = Size(scope, outer_shape);
Output outer_batches_indices = Range(scope, 0, batch_dims, 1);
Output batch_axis_indices = Range(scope, batch_dims, axis_dims, 1);
Output inner_axes_indices =
Range(scope, Add(scope, axis_dims, 1), values_dims, 1);
Output axis_dims_expand = ExpandDims(scope, axis_dims, 0);
auto values = Reshape(scope, grad_inputs[0], values_shape);
Output transpose_dims = Concat(scope,
{outer_batches_indices, axis_dims_expand,
batch_axis_indices, inner_axes_indices},
0);
auto values_transpose = Transpose(scope, values, transpose_dims);
Output gather_dim_size =
Squeeze(scope, Slice(scope, params_shape, axis_expand, {1}));
params_shape = Gather(scope, params_shape, transpose_dims);
auto params_grad = BatchGatherGrad(scope, params_shape, values_transpose,
indices, batch_dims, gather_dim_size); | #include "tensorflow/cc/framework/grad_op_registry.h"
#include "tensorflow/cc/framework/gradient_checker.h"
#include "tensorflow/cc/framework/testutil.h"
#include "tensorflow/cc/gradients/grad_testutil.h"
#include "tensorflow/cc/ops/array_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
namespace tensorflow {
namespace {
using namespace ops;
using ops::internal::MirrorPadGrad;
class ArrayGradTest : public ::testing::Test {
protected:
ArrayGradTest() : scope_(Scope::NewRootScope()) {}
void RunTest(const Output& x, const TensorShape& x_shape, const Output& y,
const TensorShape& y_shape) {
TF_ASSERT_OK(scope_.status());
float max_error;
TF_ASSERT_OK((ComputeGradientError<float, float, float>(
scope_, {x}, {x_shape}, {y}, {y_shape}, &max_error)));
EXPECT_LT(max_error, 1e-3);
}
void RunTest(const OutputList& xs, const std::vector<TensorShape>& x_shapes,
const OutputList& ys, const std::vector<TensorShape>& y_shapes) {
TF_ASSERT_OK(scope_.status());
float max_error;
TF_ASSERT_OK((ComputeGradientError<float, float, float>(
scope_, xs, x_shapes, ys, y_shapes, &max_error)));
EXPECT_LT(max_error, 1e-3);
}
Scope scope_;
};
TEST_F(ArrayGradTest, StackGrad_Axis0) {
TensorShape x_shape({1, 2, 3});
std::vector<Output> xs;
xs.push_back(Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape)));
xs.push_back(Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape)));
auto y = Stack(scope_, xs, Stack::Axis(0));
TensorShape y_shape({2, 1, 2, 3});
RunTest(xs, {x_shape, x_shape}, {y}, {y_shape});
}
TEST_F(ArrayGradTest, StackGrad_Axis1) {
TensorShape x_shape({1, 2, 3});
std::vector<Output> xs;
xs.push_back(Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape)));
xs.push_back(Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape)));
auto y = Stack(scope_, xs, Stack::Axis(1));
TensorShape y_shape({1, 2, 2, 3});
RunTest(xs, {x_shape, x_shape}, {y}, {y_shape});
}
TEST_F(ArrayGradTest, UnstackGrad_Axis0) {
TensorShape x_shape({4, 2, 3});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
std::vector<TensorShape> y_shapes(4, TensorShape({2, 3}));
auto y = Unstack(scope_, x, 4, Unstack::Axis(0));
RunTest({x}, {x_shape}, y.output, y_shapes);
}
TEST_F(ArrayGradTest, UnstackGrad_Axis1) {
TensorShape x_shape({4, 2, 3});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
std::vector<TensorShape> y_shapes(2, TensorShape({4, 3}));
auto y = Unstack(scope_, x, 2, Unstack::Axis(1));
RunTest({x}, {x_shape}, y.output, y_shapes);
}
TEST_F(ArrayGradTest, IdentityGrad) {
TensorShape shape({5, 2});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto y = Identity(scope_, x);
RunTest(x, shape, y, shape);
}
TEST_F(ArrayGradTest, SplitGrad) {
TensorShape x_shape({5, 2});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto split_dim = Const(scope_, 1, {});
auto y = Split(scope_, split_dim, x, 2);
TensorShape y_shape = TensorShape({5, 1});
RunTest({x}, {x_shape}, y.output, {y_shape, y_shape});
}
TEST_F(ArrayGradTest, SplitVGrad) {
TensorShape x_shape({2, 6});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto y = SplitV(scope_, x, {1, 2, 3}, 1, 3);
RunTest({x}, {x_shape}, y.output,
{TensorShape({2, 1}), TensorShape({2, 2}), TensorShape({2, 3})});
}
TEST_F(ArrayGradTest, FillGrad) {
TensorShape x_shape({});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
TensorShape y_shape({2, 5, 3});
auto y = Fill(scope_, {2, 5, 3}, x);
RunTest(x, x_shape, y, y_shape);
}
TEST_F(ArrayGradTest, DiagGrad) {
TensorShape x_shape({5, 2});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto y = Diag(scope_, x);
TensorShape y_shape({5, 2, 5, 2});
RunTest(x, x_shape, y, y_shape);
}
TEST_F(ArrayGradTest, DiagPartGrad) {
TensorShape x_shape({5, 2, 5, 2});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto y = DiagPart(scope_, x);
TensorShape y_shape({5, 2});
RunTest(x, x_shape, y, y_shape);
}
TEST_F(ArrayGradTest, MatrixDiagGrad) {
TensorShape x_shape({5, 2});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto y = MatrixDiag(scope_, x);
TensorShape y_shape({5, 2, 2});
RunTest(x, x_shape, y, y_shape);
}
TEST_F(ArrayGradTest, MatrixBandPartGrad) {
TensorShape shape({5, 5});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
const int64_t num_lower = 1;
const int64_t num_upper = 2;
auto y = MatrixBandPart(scope_, x, num_lower, num_upper);
RunTest(x, shape, y, shape);
}
TEST_F(ArrayGradTest, GatherNdGrad_SimpleIndexing) {
TensorShape x_shape({2, 2});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto indices = Const(scope_, {{0, 0}, {1, 1}});
TensorShape y_shape({2});
auto y = GatherNd(scope_, x, indices);
RunTest(x, x_shape, y, y_shape);
}
TEST_F(ArrayGradTest, GatherNdGrad_SliceIndexing) {
TensorShape shape({2, 2});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto indices = Const(scope_, {{1}, {0}});
auto y = GatherNd(scope_, x, indices);
RunTest(x, shape, y, shape);
}
TEST_F(ArrayGradTest, GatherNdGrad_SliceIndexing_Int64) {
TensorShape shape({2, 2});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto indices = Cast(scope_, Const(scope_, {{1}, {0}}), DT_INT64);
auto y = GatherNd(scope_, x, indices);
RunTest(x, shape, y, shape);
}
TEST_F(ArrayGradTest, CheckNumericsGrad) {
TensorShape shape({5, 2});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto y = CheckNumerics(scope_, x, "CheckNumerics failed");
RunTest(x, shape, y, shape);
}
TEST_F(ArrayGradTest, ReshapeGrad) {
TensorShape x_shape({5, 2});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
TensorShape y_shape({2, 5});
auto y = Reshape(scope_, x, {2, 5});
RunTest(x, x_shape, y, y_shape);
}
TEST_F(ArrayGradTest, ExpandDimsGrad) {
TensorShape x_shape({5, 2});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
TensorShape y_shape({1, 5, 2});
auto y = ExpandDims(scope_, x, 0);
RunTest(x, x_shape, y, y_shape);
}
TEST_F(ArrayGradTest, SqueezeGrad) {
TensorShape x_shape({1, 5, 1, 2});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
TensorShape y_shape({5, 2});
auto y = Squeeze(scope_, x);
RunTest(x, x_shape, y, y_shape);
}
TEST_F(ArrayGradTest, TransposeGrad) {
TensorShape x_shape({5, 2});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
TensorShape y_shape({2, 5});
auto y = Transpose(scope_, x, {1, 0});
RunTest(x, x_shape, y, y_shape);
}
TEST_F(ArrayGradTest, ReverseSequenceGrad) {
TensorShape shape({5, 2, 5});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto seq_lengths = Const(scope_, {1, 2, 3, 4, 5});
auto y = ReverseSequence(scope_, x, seq_lengths, 2);
RunTest(x, shape, y, shape);
}
TEST_F(ArrayGradTest, ReverseGrad) {
TensorShape shape({5, 2, 5});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto y = Reverse(scope_, x, {0, 2});
RunTest(x, shape, y, shape);
}
TEST_F(ArrayGradTest, ScatterNdGrad_SimpleIndexing) {
TensorShape updates_shape({4});
auto updates =
Placeholder(scope_, DT_FLOAT, Placeholder::Shape(updates_shape));
auto indices = Const(scope_, {{4}, {3}, {1}, {7}});
TensorShape y_shape({8});
auto y = ScatterNd(scope_, indices, updates, {8});
RunTest(updates, updates_shape, y, y_shape);
}
TEST_F(ArrayGradTest, ScatterNdGrad_SliceIndexing) {
TensorShape updates_shape({2, 4, 4});
auto updates =
Placeholder(scope_, DT_FLOAT, Placeholder::Shape(updates_shape));
auto indices = Const(scope_, {{0}, {2}});
TensorShape y_shape({4, 4, 4});
auto y = ScatterNd(scope_, indices, updates, {4, 4, 4});
RunTest(updates, updates_shape, y, y_shape);
}
TEST_F(ArrayGradTest, ScatterNdNonAliasingAddGrad_SimpleIndexing) {
TensorShape updates_shape({4});
TensorShape input_shape({8});
auto input = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(input_shape));
auto updates =
Placeholder(scope_, DT_FLOAT, Placeholder::Shape(updates_shape));
auto indices = Const(scope_, {{4}, {3}, {1}, {7}});
auto y = ScatterNdNonAliasingAdd(scope_, input, indices, updates);
RunTest({input, updates}, {input_shape, updates_shape}, {y}, {input_shape});
}
TEST_F(ArrayGradTest, ScatterNdNonAliasingAddGrad_SliceIndexing) {
TensorShape updates_shape({2, 4, 4});
TensorShape input_shape({4, 4, 4});
auto input = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(input_shape));
auto updates =
Placeholder(scope_, DT_FLOAT, Placeholder::Shape(updates_shape));
auto indices = Const(scope_, {{0}, {2}});
auto y = ScatterNdNonAliasingAdd(scope_, input, indices, updates);
RunTest({input, updates}, {input_shape, updates_shape}, {y}, {input_shape});
}
TEST_F(ArrayGradTest, PadGrad) {
TensorShape x_shape({2, 3});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto paddings = Const(scope_, {{1, 1}, {2, 2}});
TensorShape y_shape({4, 7});
auto y = Pad(scope_, x, paddings);
RunTest(x, x_shape, y, y_shape);
}
TEST_F(ArrayGradTest, SpaceToBatchGrad) {
TensorShape x_shape({1, 2, 2, 1});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto paddings = Const(scope_, {{1, 1}, {1, 1}});
TensorShape y_shape({4, 2, 2, 1});
auto y = SpaceToBatch(scope_, x, paddings, 2);
RunTest(x, x_shape, y, y_shape);
}
TEST_F(ArrayGradTest, SpaceToBatchNdGrad) {
TensorShape x_shape({2, 2, 4, 1});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto block_shape = Const(scope_, {2, 2});
auto paddings = Const(scope_, {{0, 0}, {2, 0}});
TensorShape y_shape({8, 1, 3, 1});
auto y = SpaceToBatchND(scope_, x, block_shape, paddings);
RunTest(x, x_shape, y, y_shape);
}
TEST_F(ArrayGradTest, BatchToSpaceGrad) {
TensorShape x_shape({4, 2, 2, 1});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto paddings = Const(scope_, {{1, 1}, {1, 1}});
TensorShape y_shape({1, 2, 2, 1});
auto y = BatchToSpace(scope_, x, paddings, 2);
RunTest(x, x_shape, y, y_shape);
}
TEST_F(ArrayGradTest, BatchToSpaceNdGrad) {
TensorShape x_shape({8, 1, 3, 1});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto block_shape = Const(scope_, {2, 2});
auto paddings = Const(scope_, {{0, 0}, {2, 0}});
TensorShape y_shape({2, 2, 4, 1});
auto y = BatchToSpaceND(scope_, x, block_shape, paddings);
RunTest(x, x_shape, y, y_shape);
}
TEST_F(ArrayGradTest, SpaceToDepthGrad) {
TensorShape x_shape({1, 2, 2, 1});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
TensorShape y_shape({1, 1, 1, 4});
auto y = SpaceToDepth(scope_, x, 2);
RunTest(x, x_shape, y, y_shape);
}
TEST_F(ArrayGradTest, DepthToSpaceGrad) {
TensorShape x_shape({1, 1, 1, 4});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
TensorShape y_shape({1, 2, 2, 1});
auto y = DepthToSpace(scope_, x, 2);
RunTest(x, x_shape, y, y_shape);
}
TEST_F(ArrayGradTest, MirrorPadGrad_Reflect) {
TensorShape x_shape({2, 3});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto paddings = Const(scope_, {{1, 1}, {2, 2}});
TensorShape y_shape({4, 7});
auto y = MirrorPad(scope_, x, paddings, "REFLECT");
RunTest(x, x_shape, y, y_shape);
}
TEST_F(ArrayGradTest, MirrorPadGrad_Symmetric) {
TensorShape x_shape({2, 3});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto paddings = Const(scope_, {{1, 1}, {2, 2}});
TensorShape y_shape({4, 7});
auto y = MirrorPad(scope_, x, paddings, "SYMMETRIC");
RunTest(x, x_shape, y, y_shape);
}
TEST_F(ArrayGradTest, MirrorPadGradGrad_Reflect) {
TensorShape x_shape({4, 7});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto paddings = Const(scope_, {{1, 1}, {2, 2}});
TensorShape y_shape({2, 3});
auto y = MirrorPadGrad(scope_, x, paddings, "REFLECT");
RunTest(x, x_shape, y, y_shape);
}
TEST_F(ArrayGradTest, MirrorPadGradGrad_Symmetric) {
TensorShape x_shape({4, 7});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto paddings = Const(scope_, {{1, 1}, {2, 2}});
TensorShape y_shape({2, 3});
auto y = MirrorPadGrad(scope_, x, paddings, "SYMMETRIC");
RunTest(x, x_shape, y, y_shape);
}
TEST_F(ArrayGradTest, StridedSliceGrad) {
TensorShape x_shape({6, 4, 4});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto y = StridedSlice(scope_, x, {2, 1, 1}, {6, 3, 3}, {2, 1, 1});
RunTest(x, x_shape, y, {2, 2, 2});
y = StridedSlice(scope_, x, {2, 1, 1}, {6, 3, 3}, {2, 1, 1},
StridedSlice::BeginMask(1 << 1).EndMask(1 << 2));
RunTest(x, x_shape, y, {2, 3, 3});
y = StridedSlice(scope_, x, {0, 2, 1, 1}, {0, 6, 3, 3}, {1, 2, 1, 1},
StridedSlice::NewAxisMask(1 << 0));
RunTest(x, x_shape, y, {1, 2, 2, 2});
}
TEST_F(ArrayGradTest, SliceGrad) {
TensorShape x_shape({3, 5, 3});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto y = Slice(scope_, x, {1, 2, 1}, {1, 3, 2});
RunTest(x, x_shape, y, {1, 3, 2});
}
TEST_F(ArrayGradTest, ConcatV2Grad) {
TensorShape shape({3, 2, 5});
std::vector<Output> xs;
xs.push_back(Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape)));
xs.push_back(Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape)));
xs.push_back(Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape)));
auto axis = Const(scope_, 0);
auto y = Concat(scope_, xs, axis);
TensorShape result_shape({9, 2, 5});
RunTest(xs, {shape, shape, shape}, {y}, {result_shape});
}
TEST_F(ArrayGradTest, BroadcastToGrad) {
TensorShape x_shape({2, 5});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
TensorShape y_shape({3, 2, 5});
auto y = BroadcastTo(scope_, x, Const(scope_, {3, 2, 5}));
RunTest(x, x_shape, y, y_shape);
}
TEST_F(ArrayGradTest, TileGrad) {
TensorShape x_shape({2, 5});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto y = Tile(scope_, x, Const(scope_, {3, 2}));
TensorShape y_shape({6, 10});
RunTest(x, x_shape, y, y_shape);
}
TEST_F(ArrayGradTest, GatherV2Grad_Simple) {
TensorShape shape({100});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto y = GatherV2(scope_, x, {2, 0, 2, 5}, 0);
TensorShape y_shape({4});
RunTest(x, shape, y, y_shape);
}
TEST_F(ArrayGradTest, GatherV2Grad_MoreParamDims) {
TensorShape shape({100, 2, 3, 2});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto y = GatherV2(scope_, x, {2, 0, 2, 5}, 0);
TensorShape y_shape({4, 2, 3, 2});
RunTest(x, shape, y, y_shape);
}
TEST_F(ArrayGradTest, GatherV2Grad_MoreIndexDims) {
TensorShape shape({100});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto y = GatherV2(scope_, x, {{2, 0}, {2, 5}}, 0);
TensorShape y_shape({2, 2});
RunTest(x, shape, y, y_shape);
}
TEST_F(ArrayGradTest, GatherV2Grad_DifferentAxis) {
TensorShape shape({2, 10, 10, 2, 7});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto y = GatherV2(scope_, x, {2, 0, 2, 5, 5}, 1);
TensorShape y_shape({2, 5, 10, 2, 7});
RunTest(x, shape, y, y_shape);
}
TEST_F(ArrayGradTest, GatherV2Grad_DifferentAxis2) {
TensorShape shape({2, 3, 100, 2, 7});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto y = GatherV2(scope_, x, {2, 0, 2, 5, 5}, 2);
TensorShape y_shape({2, 3, 5, 2, 7});
RunTest(x, shape, y, y_shape);
}
TEST_F(ArrayGradTest, GatherV2Grad_LastAxis) {
TensorShape shape({2, 3, 10});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto y = GatherV2(scope_, x, {2, 0, 2, 5, 5}, 2);
TensorShape y_shape({2, 3, 5});
RunTest(x, shape, y, y_shape);
}
TEST_F(ArrayGradTest, GatherV2Grad_LastAxis2) {
TensorShape shape({2, 3, 7, 10});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto y = GatherV2(scope_, x, {9, 8, 7, 6}, 3);
TensorShape y_shape({2, 3, 7, 4});
RunTest(x, shape, y, y_shape);
}
TEST_F(ArrayGradTest, GatherV2Grad_BatchDim) {
TensorShape shape({2, 100, 3});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
GatherV2::Attrs attrs;
attrs.batch_dims_ = 1;
auto y =
GatherV2(scope_, x, {{2, 0, 2, 5}, {1, 1, 7, 10}}, 1, attrs);
TensorShape y_shape({2, 4, 3});
RunTest(x, shape, y, y_shape);
}
TEST_F(ArrayGradTest, GatherV2Grad_BatchDim2) {
TensorShape shape({2, 19});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
GatherV2::Attrs attrs;
attrs.batch_dims_ = 1;
auto y = GatherV2(scope_, x, {{0}, {0}}, 1, attrs);
TensorShape y_shape({2, 1});
RunTest(x, shape, y, y_shape);
}
TEST_F(ArrayGradTest, GatherV2Grad_BatchDimWithAxis) {
TensorShape shape({2, 1, 3});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
GatherV2::Attrs attrs;
attrs.batch_dims_ = 1;
auto y = GatherV2(scope_, x, {{0}, {0}}, 2, attrs);
TensorShape y_shape({2, 1, 1});
RunTest(x, shape, y, y_shape);
}
TEST_F(ArrayGradTest, GatherV2Grad_TwoBatchDims) {
TensorShape shape({2, 2, 100});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
GatherV2::Attrs attrs;
attrs.batch_dims_ = 2;
auto y = GatherV2(scope_, x, {{{2, 0}, {2, 5}}, {{1, 1}, {7, 10}}},
2, attrs);
TensorShape y_shape({2, 2, 2});
RunTest(x, shape, y, y_shape);
}
TEST_F(ArrayGradTest, GatherV2Grad_TwoBatchDimsWithAxis) {
TensorShape shape({2, 2, 3, 100});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
GatherV2::Attrs attrs;
attrs.batch_dims_ = 2;
auto y = GatherV2(scope_, x, {{{2, 0}, {2, 5}}, {{1, 1}, {7, 10}}},
3, attrs);
TensorShape y_shape({2, 2, 2, 3});
RunTest(x, shape, y, y_shape);
}
}
} |
1,264 | cpp | tensorflow/tensorflow | modular_filesystem | tensorflow/c/experimental/filesystem/modular_filesystem.cc | tensorflow/c/experimental/filesystem/modular_filesystem_test.cc | #ifndef TENSORFLOW_C_EXPERIMENTAL_FILESYSTEM_MODULAR_FILESYSTEM_H_
#define TENSORFLOW_C_EXPERIMENTAL_FILESYSTEM_MODULAR_FILESYSTEM_H_
#include <memory>
#include "tensorflow/c/experimental/filesystem/filesystem_interface.h"
#include "tensorflow/core/platform/file_system.h"
namespace tensorflow {
class ModularFileSystem final : public FileSystem {
public:
ModularFileSystem(
std::unique_ptr<TF_Filesystem> filesystem,
std::unique_ptr<const TF_FilesystemOps> filesystem_ops,
std::unique_ptr<const TF_RandomAccessFileOps> random_access_file_ops,
std::unique_ptr<const TF_WritableFileOps> writable_file_ops,
std::unique_ptr<const TF_ReadOnlyMemoryRegionOps>
read_only_memory_region_ops,
std::function<void*(size_t)> plugin_memory_allocate,
std::function<void(void*)> plugin_memory_free)
: filesystem_(std::move(filesystem)),
ops_(std::move(filesystem_ops)),
random_access_file_ops_(std::move(random_access_file_ops)),
writable_file_ops_(std::move(writable_file_ops)),
read_only_memory_region_ops_(std::move(read_only_memory_region_ops)),
plugin_memory_allocate_(std::move(plugin_memory_allocate)),
plugin_memory_free_(std::move(plugin_memory_free)) {}
~ModularFileSystem() override { ops_->cleanup(filesystem_.get()); }
TF_USE_FILESYSTEM_METHODS_WITH_NO_TRANSACTION_SUPPORT;
Status NewRandomAccessFile(
const std::string& fname, TransactionToken* token,
std::unique_ptr<RandomAccessFile>* result) override;
Status NewWritableFile(const std::string& fname, TransactionToken* token,
std::unique_ptr<WritableFile>* result) override;
Status NewAppendableFile(const std::string& fname, TransactionToken* token,
std::unique_ptr<WritableFile>* result) override;
Status NewReadOnlyMemoryRegionFromFile(
const std::string& fname, TransactionToken* token,
std::unique_ptr<ReadOnlyMemoryRegion>* result) override;
Status FileExists(const std::string& fname, TransactionToken* token) override;
bool FilesExist(const std::vector<std::string>& files,
TransactionToken* token,
std::vector<Status>* status) override;
Status GetChildren(const std::string& dir, TransactionToken* token,
std::vector<std::string>* result) override;
Status GetMatchingPaths(const std::string& pattern, TransactionToken* token,
std::vector<std::string>* results) override;
Status DeleteFile(const std::string& fname, TransactionToken* token) override;
Status DeleteRecursively(const std::string& dirname, TransactionToken* token,
int64_t* undeleted_files,
int64_t* undeleted_dirs) override;
Status DeleteDir(const std::string& dirname,
TransactionToken* token) override;
Status RecursivelyCreateDir(const std::string& dirname,
TransactionToken* token) override;
Status CreateDir(const std::string& dirname,
TransactionToken* token) override;
Status Stat(const std::string& fname, TransactionToken* token,
FileStatistics* stat) override;
Status IsDirectory(const std::string& fname,
TransactionToken* token) override;
Status GetFileSize(const std::string& fname, TransactionToken* token,
uint64* file_size) override;
Status RenameFile(const std::string& src, const std::string& target,
TransactionToken* token) override;
Status CopyFile(const std::string& src, const std::string& target,
TransactionToken* token) override;
std::string TranslateName(const std::string& name) const override;
void FlushCaches(TransactionToken* token) override;
Status SetOption(const std::string& name,
const std::vector<string>& values) override;
Status SetOption(const std::string& name,
const std::vector<int64_t>& values) override;
Status SetOption(const std::string& name,
const std::vector<double>& values) override;
private:
std::unique_ptr<TF_Filesystem> filesystem_;
std::unique_ptr<const TF_FilesystemOps> ops_;
std::unique_ptr<const TF_RandomAccessFileOps> random_access_file_ops_;
std::unique_ptr<const TF_WritableFileOps> writable_file_ops_;
std::unique_ptr<const TF_ReadOnlyMemoryRegionOps>
read_only_memory_region_ops_;
std::function<void*(size_t)> plugin_memory_allocate_;
std::function<void(void*)> plugin_memory_free_;
ModularFileSystem(const ModularFileSystem&) = delete;
void operator=(const ModularFileSystem&) = delete;
};
class ModularRandomAccessFile final : public RandomAccessFile {
public:
ModularRandomAccessFile(const std::string& filename,
std::unique_ptr<TF_RandomAccessFile> file,
const TF_RandomAccessFileOps* ops)
: filename_(filename), file_(std::move(file)), ops_(ops) {}
~ModularRandomAccessFile() override { ops_->cleanup(file_.get()); }
Status Read(uint64 offset, size_t n, StringPiece* result,
char* scratch) const override;
Status Name(StringPiece* result) const override;
private:
std::string filename_;
std::unique_ptr<TF_RandomAccessFile> file_;
const TF_RandomAccessFileOps* ops_;
ModularRandomAccessFile(const ModularRandomAccessFile&) = delete;
void operator=(const ModularRandomAccessFile&) = delete;
};
class ModularWritableFile final : public WritableFile {
public:
ModularWritableFile(const std::string& filename,
std::unique_ptr<TF_WritableFile> file,
const TF_WritableFileOps* ops)
: filename_(filename), file_(std::move(file)), ops_(ops) {}
~ModularWritableFile() override { ops_->cleanup(file_.get()); }
Status Append(StringPiece data) override;
Status Close() override;
Status Flush() override;
Status Sync() override;
Status Name(StringPiece* result) const override;
Status Tell(int64_t* position) override;
private:
std::string filename_;
std::unique_ptr<TF_WritableFile> file_;
const TF_WritableFileOps* ops_;
ModularWritableFile(const ModularWritableFile&) = delete;
void operator=(const ModularWritableFile&) = delete;
};
class ModularReadOnlyMemoryRegion final : public ReadOnlyMemoryRegion {
public:
ModularReadOnlyMemoryRegion(std::unique_ptr<TF_ReadOnlyMemoryRegion> region,
const TF_ReadOnlyMemoryRegionOps* ops)
: region_(std::move(region)), ops_(ops) {}
~ModularReadOnlyMemoryRegion() override { ops_->cleanup(region_.get()); };
const void* data() override { return ops_->data(region_.get()); }
uint64 length() override { return ops_->length(region_.get()); }
private:
std::unique_ptr<TF_ReadOnlyMemoryRegion> region_;
const TF_ReadOnlyMemoryRegionOps* ops_;
ModularReadOnlyMemoryRegion(const ModularReadOnlyMemoryRegion&) = delete;
void operator=(const ModularReadOnlyMemoryRegion&) = delete;
};
Status RegisterFilesystemPlugin(const std::string& dso_path);
}
#endif
#include "tensorflow/c/experimental/filesystem/modular_filesystem.h"
#include <algorithm>
#include <string>
#include <utility>
#include "tensorflow/c/experimental/filesystem/modular_filesystem_registration.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/file_system_helper.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
using UniquePtrTo_TF_Status =
::std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)>;
Status ModularFileSystem::NewRandomAccessFile(
const std::string& fname, TransactionToken* token,
std::unique_ptr<RandomAccessFile>* result) {
if (ops_->new_random_access_file == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", fname, " does not support NewRandomAccessFile()"));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
auto file = std::make_unique<TF_RandomAccessFile>();
std::string translated_name = TranslateName(fname);
ops_->new_random_access_file(filesystem_.get(), translated_name.c_str(),
file.get(), plugin_status.get());
if (TF_GetCode(plugin_status.get()) == TF_OK)
*result = std::make_unique<ModularRandomAccessFile>(
translated_name, std::move(file), random_access_file_ops_.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::NewWritableFile(
const std::string& fname, TransactionToken* token,
std::unique_ptr<WritableFile>* result) {
if (ops_->new_writable_file == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", fname, " does not support NewWritableFile()"));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
auto file = std::make_unique<TF_WritableFile>();
std::string translated_name = TranslateName(fname);
ops_->new_writable_file(filesystem_.get(), translated_name.c_str(),
file.get(), plugin_status.get());
if (TF_GetCode(plugin_status.get()) == TF_OK)
*result = std::make_unique<ModularWritableFile>(
translated_name, std::move(file), writable_file_ops_.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::NewAppendableFile(
const std::string& fname, TransactionToken* token,
std::unique_ptr<WritableFile>* result) {
if (ops_->new_appendable_file == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", fname, " does not support NewAppendableFile()"));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
auto file = std::make_unique<TF_WritableFile>();
std::string translated_name = TranslateName(fname);
ops_->new_appendable_file(filesystem_.get(), translated_name.c_str(),
file.get(), plugin_status.get());
if (TF_GetCode(plugin_status.get()) == TF_OK)
*result = std::make_unique<ModularWritableFile>(
translated_name, std::move(file), writable_file_ops_.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::NewReadOnlyMemoryRegionFromFile(
const std::string& fname, TransactionToken* token,
std::unique_ptr<ReadOnlyMemoryRegion>* result) {
if (ops_->new_read_only_memory_region_from_file == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", fname,
" does not support NewReadOnlyMemoryRegionFromFile()"));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
auto region = std::make_unique<TF_ReadOnlyMemoryRegion>();
std::string translated_name = TranslateName(fname);
ops_->new_read_only_memory_region_from_file(
filesystem_.get(), translated_name.c_str(), region.get(),
plugin_status.get());
if (TF_GetCode(plugin_status.get()) == TF_OK)
*result = std::make_unique<ModularReadOnlyMemoryRegion>(
std::move(region), read_only_memory_region_ops_.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::FileExists(const std::string& fname,
TransactionToken* token) {
if (ops_->path_exists == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", fname, " does not support FileExists()"));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
const std::string translated_name = TranslateName(fname);
ops_->path_exists(filesystem_.get(), translated_name.c_str(),
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
bool ModularFileSystem::FilesExist(const std::vector<std::string>& files,
TransactionToken* token,
std::vector<Status>* status) {
if (ops_->paths_exist == nullptr)
return FileSystem::FilesExist(files, token, status);
std::vector<char*> translated_names;
translated_names.reserve(files.size());
for (int i = 0; i < files.size(); i++)
translated_names.push_back(strdup(TranslateName(files[i]).c_str()));
bool result;
if (status == nullptr) {
result = ops_->paths_exist(filesystem_.get(), translated_names.data(),
files.size(), nullptr);
} else {
std::vector<TF_Status*> plugin_status;
plugin_status.reserve(files.size());
for (int i = 0; i < files.size(); i++)
plugin_status.push_back(TF_NewStatus());
result = ops_->paths_exist(filesystem_.get(), translated_names.data(),
files.size(), plugin_status.data());
for (int i = 0; i < files.size(); i++) {
status->push_back(StatusFromTF_Status(plugin_status[i]));
TF_DeleteStatus(plugin_status[i]);
}
}
for (int i = 0; i < files.size(); i++) free(translated_names[i]);
return result;
}
Status ModularFileSystem::GetChildren(const std::string& dir,
TransactionToken* token,
std::vector<std::string>* result) {
if (ops_->get_children == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", dir, " does not support GetChildren()"));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_name = TranslateName(dir);
char** children = nullptr;
const int num_children =
ops_->get_children(filesystem_.get(), translated_name.c_str(), &children,
plugin_status.get());
if (num_children >= 0) {
for (int i = 0; i < num_children; i++) {
result->push_back(std::string(children[i]));
plugin_memory_free_(children[i]);
}
plugin_memory_free_(children);
}
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::GetMatchingPaths(const std::string& pattern,
TransactionToken* token,
std::vector<std::string>* result) {
if (ops_->get_matching_paths == nullptr)
return internal::GetMatchingPaths(this, Env::Default(), pattern, result);
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
char** matches = nullptr;
const int num_matches = ops_->get_matching_paths(
filesystem_.get(), pattern.c_str(), &matches, plugin_status.get());
if (num_matches >= 0) {
for (int i = 0; i < num_matches; i++) {
result->push_back(std::string(matches[i]));
plugin_memory_free_(matches[i]);
}
plugin_memory_free_(matches);
}
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::DeleteFile(const std::string& fname,
TransactionToken* token) {
if (ops_->delete_file == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", fname, " does not support DeleteFile()"));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_name = TranslateName(fname);
ops_->delete_file(filesystem_.get(), translated_name.c_str(),
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::DeleteRecursively(const std::string& dirname,
TransactionToken* token,
int64_t* undeleted_files,
int64_t* undeleted_dirs) {
if (undeleted_files == nullptr || undeleted_dirs == nullptr)
return errors::FailedPrecondition(
"DeleteRecursively must not be called with `undeleted_files` or "
"`undeleted_dirs` set to NULL");
if (ops_->delete_recursively == nullptr)
return FileSystem::DeleteRecursively(dirname, token, undeleted_files,
undeleted_dirs);
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_name = TranslateName(dirname);
uint64_t plugin_undeleted_files, plugin_undeleted_dirs;
ops_->delete_recursively(filesystem_.get(), translated_name.c_str(),
&plugin_undeleted_files, &plugin_undeleted_dirs,
plugin_status.get());
*undeleted_files = plugin_undeleted_files;
*undeleted_dirs = plugin_undeleted_dirs;
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::DeleteDir(const std::string& dirname,
TransactionToken* token) {
if (ops_->delete_dir == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", dirname, " does not support DeleteDir()"));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_name = TranslateName(dirname);
ops_->delete_dir(filesystem_.get(), translated_name.c_str(),
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::RecursivelyCreateDir(const std::string& dirname,
TransactionToken* token) {
if (ops_->recursively_create_dir == nullptr)
return FileSystem::RecursivelyCreateDir(dirname, token);
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_name = TranslateName(dirname);
ops_->recursively_create_dir(filesystem_.get(), translated_name.c_str(),
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::CreateDir(const std::string& dirname,
TransactionToken* token) {
if (ops_->create_dir == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", dirname, " does not support CreateDir()"));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_name = TranslateName(dirname);
ops_->create_dir(filesystem_.get(), translated_name.c_str(),
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::Stat(const std::string& fname,
TransactionToken* token, FileStatistics* stat) {
if (ops_->stat == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", fname, " does not support Stat()"));
if (stat == nullptr)
return errors::InvalidArgument("FileStatistics pointer must not be NULL");
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_name = TranslateName(fname);
TF_FileStatistics stats;
ops_->stat(filesystem_.get(), translated_name.c_str(), &stats,
plugin_status.get());
if (TF_GetCode(plugin_status.get()) == TF_OK) {
stat->length = stats.length;
stat->mtime_nsec = stats.mtime_nsec;
stat->is_directory = stats.is_directory;
}
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::IsDirectory(const std::string& name,
TransactionToken* token) {
if (ops_->is_directory == nullptr)
return FileSystem::IsDirectory(name, token);
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_name = TranslateName(name);
ops_->is_directory(filesystem_.get(), translated_name.c_str(),
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::GetFileSize(const std::string& fname,
TransactionToken* token,
uint64* file_size) {
if (ops_->get_file_size == nullptr) {
FileStatistics stat;
Status status = Stat(fname, &stat);
if (!status.ok()) return status;
if (stat.is_directory)
return errors::FailedPrecondition("Called GetFileSize on a directory");
*file_size = stat.length;
return status;
}
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_name = TranslateName(fname);
*file_size = ops_->get_file_size(filesystem_.get(), translated_name.c_str(),
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::RenameFile(const std::string& src,
const std::string& target,
TransactionToken* token) {
if (ops_->rename_file == nullptr) {
Status status = CopyFile(src, target);
if (status.ok()) status = DeleteFile(src);
return status;
}
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_src = TranslateName(src);
std::string translated_target = TranslateName(target);
ops_->rename_file(filesystem_.get(), translated_src.c_str(),
translated_target.c_str(), plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::CopyFile(const std::string& src,
const std::string& target,
TransactionToken* token) {
if (ops_->copy_file == nullptr)
return FileSystem::CopyFile(src, target, token);
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_src = TranslateName(src);
std::string translated_target = TranslateName(target);
ops_->copy_file(filesystem_.get(), translated_src.c_str(),
translated_target.c_str(), plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
std::string ModularFileSystem::TranslateName(const std::string& name) const {
if (ops_->translate_name == nullptr) return FileSystem::TranslateName(name);
char* p = ops_->translate_name(filesystem_.get(), name.c_str());
CHECK(p != nullptr) << "TranslateName(" << name << ") returned nullptr";
std::string ret(p);
plugin_memory_free_(p);
return ret;
}
void ModularFileSystem::FlushCaches(TransactionToken* token) {
if (ops_->flush_caches != nullptr) ops_->flush_caches(filesystem_.get());
}
Status ModularFileSystem::SetOption(const std::string& name,
const std::vector<string>& values) {
if (ops_->set_filesystem_configuration == nullptr) {
return errors::Unimplemented(
"Filesystem does not support SetConfiguration()");
}
if (values.empty()) {
return errors::InvalidArgument(
"SetConfiguration() needs number of values > 0");
}
TF_Filesystem_Option option;
memset(&option, 0, sizeof(option));
option.name = const_cast<char*>(name.c_str());
TF_Filesystem_Option_Value option_value;
memset(&option_value, 0, sizeof(option_value));
option_value.type_tag = TF_Filesystem_Option_Type_Buffer;
option_value.num_values = values.size();
std::vector<TF_Filesystem_Option_Value_Union> option_values(values.size());
for (size_t i = 0; i < values.size(); i++) {
memset(&option_values[i], 0, sizeof(option_values[i]));
option_values[i].buffer_val.buf = const_cast<char*>(values[i].c_str());
option_values[i].buffer_val.buf_length = values[i].size();
}
option_value.values = &option_values[0];
option.value = &option_value;
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
ops_->set_filesystem_configuration(filesystem_.get(), &option, 1,
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::SetOption(const std::string& name,
const std::vector<int64_t>& values) {
if (ops_->set_filesystem_configuration == nullptr) {
return errors::Unimplemented(
"Filesystem does not support SetConfiguration()");
}
if (values.empty()) {
return errors::InvalidArgument(
"SetConfiguration() needs number of values > 0");
}
TF_Filesystem_Option option;
memset(&option, 0, sizeof(option));
option.name = const_cast<char*>(name.c_str());
TF_Filesystem_Option_Value option_value;
memset(&option_value, 0, sizeof(option_value));
option_value.type_tag = TF_Filesystem_Option_Type_Int;
option_value.num_values = values.size();
std::vector<TF_Filesystem_Option_Value_Union> option_values(values.size());
for (size_t i = 0; i < values.size(); i++) {
memset(&option_values[i], 0, sizeof(option_values[i]));
option_values[i].int_val = values[i];
}
option_value.values = &option_values[0];
option.value = &option_value;
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
ops_->set_filesystem_configuration(filesystem_.get(), &option, 1,
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::SetOption(const std::string& name,
const std::vector<double>& values) {
if (ops_->set_filesystem_configuration == nullptr) {
return errors::Unimplemented(
"Filesystem does not support SetConfiguration()");
}
if (values.empty()) {
return errors::InvalidArgument(
"SetConfiguration() needs number of values > 0");
}
TF_Filesystem_Option option;
memset(&option, 0, sizeof(option));
option.name = const_cast<char*>(name.c_str());
TF_Filesystem_Option_Value option_value;
memset(&option_value, 0, sizeof(option_value));
option_value.type_tag = TF_Filesystem_Option_Type_Real;
option_value.num_values = values.size();
std::vector<TF_Filesystem_Option_Value_Union> option_values(values.size());
for (size_t i = 0; i < values.size(); i++) {
memset(&option_values[i], 0, sizeof(option_values[i]));
option_values[i].real_val = values[i];
}
option_value.values = &option_values[0];
option.value = &option_value;
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
ops_->set_filesystem_configuration(filesystem_.get(), &option, 1,
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularRandomAccessFile::Read(uint64 offset, size_t n,
StringPiece* result, char* scratch) const {
if (ops_->read == nullptr)
return errors::Unimplemented(
tensorflow::strings::StrCat("Read() not implemented for ", filename_));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
int64_t read =
ops_->read(file_.get(), offset, n, scratch, plugin_status.get());
if (read > 0) *result = StringPiece(scratch, read);
return StatusFromTF_Status(plugin_status.get());
}
Status ModularRandomAccessFile::Name(StringPiece* result) const {
*result = filename_;
return OkStatus();
}
Status ModularWritableFile::Append(StringPiece data) {
if (ops_->append == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Append() not implemented for ", filename_));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
ops_->append(file_.get(), data.data(), data.size(), plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularWritableFile::Close() {
if (ops_->close == nullptr)
return errors::Unimplemented(
tensorflow::strings::StrCat("Close() not implemented for ", filename_));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
ops_->close(file_.get(), plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularWritableFile::Flush() {
if (ops_->flush == nullptr) return OkStatus();
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
ops_->flush(file_.get(), plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularWritableFile::Sync() {
if (ops_->sync == nullptr) return Flush();
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
ops_->sync(file_.get(), plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularWritableFile::Name(StringPiece* result) const {
*result = filename_;
return OkStatus();
}
Status ModularWritableFile::Tell(int64_t* position) {
if (ops_->tell == nullptr)
return errors::Unimplemented(
tensorflow::strings::StrCat("Tell() not implemented for ", filename_));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
*position = ops_->tell(file_.get(), plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status RegisterFilesystemPlugin(const std::string& dso_path) {
Env* env = Env::Default();
void* dso_handle;
TF_RETURN_IF_ERROR(env->LoadDynamicLibrary(dso_path.c_str(), &dso_handle));
void* dso_symbol;
TF_RETURN_WITH_CONTEXT_IF_ERROR(
env->GetSymbolFromLibrary(dso_handle, "TF_InitPlugin", &dso_symbol),
"Failed to load TF_InitPlugin symbol for DSO: ", dso_path);
TF_FilesystemPluginInfo info;
memset(&info, 0, sizeof(info));
auto TF_InitPlugin =
reinterpret_cast<int (*)(TF_FilesystemPluginInfo*)>(dso_symbol);
TF_InitPlugin(&info);
return filesystem_registration::RegisterFilesystemPluginImpl(&info);
}
} | #include "tensorflow/c/experimental/filesystem/modular_filesystem.h"
#include <memory>
#include <random>
#include <string>
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/stacktrace_handler.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/command_line_flags.h"
#if defined(PLATFORM_WINDOWS)
#include <direct.h>
#define mkdir(name, mode) _mkdir(name)
#undef CopyFile
#undef DeleteFile
#undef TranslateName
#endif
namespace tensorflow {
namespace {
using ::tensorflow::error::Code;
class ModularFileSystemTest : public ::testing::TestWithParam<std::string> {
public:
ModularFileSystemTest() {
const std::string test_name = tensorflow::str_util::StringReplace(
::testing::UnitTest::GetInstance()->current_test_info()->name(), "/",
"_", true);
if (!cloud_path_.empty()) {
root_dir_ = tensorflow::strings::StrCat(
"/", tmp_dir_,
tensorflow::strings::StrCat("tf_fs_", rng_val_, "_", test_name), "/");
} else {
root_dir_ = tensorflow::io::JoinPath(
tmp_dir_,
tensorflow::strings::StrCat("tf_fs_", rng_val_, "_", test_name));
}
if (!GetParam().empty()) {
root_dir_ = tensorflow::strings::StrCat(GetParam(), ":
root_dir_);
}
env_ = Env::Default();
}
void SetUp() override {
FileSystem* fs = nullptr;
Status s = env_->GetFileSystemForFile(root_dir_, &fs);
if (fs == nullptr || !s.ok())
GTEST_SKIP() << "No filesystem registered: " << s;
s = fs->CreateDir(root_dir_);
if (!s.ok()) {
GTEST_SKIP() << "Cannot create working directory: " << s;
}
}
std::string GetURIForPath(StringPiece path) {
const std::string translated_name =
tensorflow::io::JoinPath(root_dir_, path);
return translated_name;
}
StringPiece GetRelativePath(StringPiece absolute_path) {
return tensorflow::str_util::StripPrefix(absolute_path, root_dir_);
}
static void InitializeTestRNG() {
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> distribution;
rng_val_ = distribution(gen);
}
static void SetCloudPath(const std::string& cloud_path) {
cloud_path_ = cloud_path;
if (cloud_path_.back() == '/') cloud_path_.pop_back();
}
static void SetTmpDir(const std::string& tmp_dir) {
tmp_dir_ = tmp_dir.empty() ? ::testing::TempDir() : tmp_dir;
}
protected:
Env* env_;
private:
std::string root_dir_;
static int rng_val_;
static std::string cloud_path_;
static std::string tmp_dir_;
};
int ModularFileSystemTest::rng_val_;
std::string ModularFileSystemTest::cloud_path_;
std::string ModularFileSystemTest::tmp_dir_;
bool UnimplementedOrReturnsCode(Status actual_status, Code expected_code) {
Code actual_code = actual_status.code();
return (actual_code == Code::UNIMPLEMENTED) || (actual_code == expected_code);
}
TEST_P(ModularFileSystemTest, TestTranslateName) {
const std::string generic_path = GetURIForPath("some_path");
FileSystem* fs = nullptr;
Status s = env_->GetFileSystemForFile(generic_path, &fs);
if (fs == nullptr || !s.ok())
GTEST_SKIP() << "No filesystem registered: " << s;
if (GetParam().empty()) {
EXPECT_EQ(fs->TranslateName(""), "");
EXPECT_EQ(fs->TranslateName("/"), "/");
EXPECT_EQ(fs->TranslateName("
EXPECT_EQ(fs->TranslateName("a_file"), "a_file");
EXPECT_EQ(fs->TranslateName("a_dir/.."), ".");
} else {
EXPECT_EQ(fs->TranslateName(tensorflow::strings::StrCat(GetParam(), ":
"/");
EXPECT_EQ(
fs->TranslateName(tensorflow::strings::StrCat(GetParam(), ":
"/");
EXPECT_EQ(
fs->TranslateName(tensorflow::strings::StrCat(GetParam(), ":
"/");
}
EXPECT_EQ(GetRelativePath(fs->TranslateName(GetURIForPath("a_file"))),
"/a_file");
EXPECT_EQ(GetRelativePath(fs->TranslateName(GetURIForPath("a_dir/a_file"))),
"/a_dir/a_file");
EXPECT_EQ(GetRelativePath(fs->TranslateName(GetURIForPath("./a_file"))),
"/a_file");
EXPECT_EQ(GetRelativePath(fs->TranslateName(
GetURIForPath("a/convoluted/../path/./to/.
"/a/path/to/a/file");
}
TEST_P(ModularFileSystemTest, TestCreateFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestCreateFileNonExisting) {
const std::string filepath = GetURIForPath("dir_not_found/a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestCreateFileExistingDir) {
const std::string filepath = GetURIForPath("a_file");
Status status = env_->CreateDir(filepath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
std::unique_ptr<WritableFile> new_file;
status = env_->NewWritableFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestCreateFilePathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
const std::string new_path = GetURIForPath("a_file/a_file");
std::unique_ptr<WritableFile> new_file;
status = env_->NewWritableFile(new_path, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestAppendFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewAppendableFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestAppendFileNonExisting) {
const std::string filepath = GetURIForPath("dir_not_found/a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewAppendableFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestAppendFileExistingDir) {
const std::string filepath = GetURIForPath("a_file");
Status status = env_->CreateDir(filepath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
std::unique_ptr<WritableFile> new_file;
status = env_->NewAppendableFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestCreateThenAppendFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
std::unique_ptr<WritableFile> same_file;
status = env_->NewAppendableFile(filepath, &same_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestAppendFilePathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_path = GetURIForPath("a_file/a_file");
std::unique_ptr<WritableFile> same_file;
status = env_->NewAppendableFile(new_path, &same_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestReadFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<RandomAccessFile> new_file;
Status status = env_->NewRandomAccessFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestReadFileNonExisting) {
const std::string filepath = GetURIForPath("dir_not_found/a_file");
std::unique_ptr<RandomAccessFile> new_file;
Status status = env_->NewRandomAccessFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestReadFileExistingDir) {
const std::string filepath = GetURIForPath("a_file");
Status status = env_->CreateDir(filepath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
std::unique_ptr<RandomAccessFile> new_file;
status = env_->NewRandomAccessFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestCreateThenReadFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
std::unique_ptr<RandomAccessFile> same_file;
status = env_->NewRandomAccessFile(filepath, &same_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestReadFilePathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_path = GetURIForPath("a_file/a_file");
std::unique_ptr<RandomAccessFile> same_file;
status = env_->NewRandomAccessFile(new_path, &same_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestCreateMemoryRegion) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<ReadOnlyMemoryRegion> region;
Status status = env_->NewReadOnlyMemoryRegionFromFile(filepath, ®ion);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestCreateMemoryRegionNonExisting) {
const std::string filepath = GetURIForPath("dir_not_found/a_file");
std::unique_ptr<ReadOnlyMemoryRegion> region;
Status status = env_->NewReadOnlyMemoryRegionFromFile(filepath, ®ion);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestCreateMemoryRegionExistingDir) {
const std::string filepath = GetURIForPath("a_file");
Status status = env_->CreateDir(filepath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
std::unique_ptr<ReadOnlyMemoryRegion> new_file;
status = env_->NewReadOnlyMemoryRegionFromFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestCreateMemoryRegionFromEmptyFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
std::unique_ptr<ReadOnlyMemoryRegion> region;
status = env_->NewReadOnlyMemoryRegionFromFile(filepath, ®ion);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::INVALID_ARGUMENT);
}
TEST_P(ModularFileSystemTest, TestCreateMemoryRegionFromFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string test_data("asdf");
status = new_file->Append(test_data);
if (!status.ok()) GTEST_SKIP() << "Append() not supported: " << status;
status = new_file->Flush();
if (!status.ok()) GTEST_SKIP() << "Flush() not supported: " << status;
status = new_file->Close();
if (!status.ok()) GTEST_SKIP() << "Close() not supported: " << status;
std::unique_ptr<ReadOnlyMemoryRegion> region;
status = env_->NewReadOnlyMemoryRegionFromFile(filepath, ®ion);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok())
GTEST_SKIP() << "NewReadOnlyMemoryRegionFromFile() not supported: "
<< status;
EXPECT_EQ(region->length(), test_data.size());
EXPECT_STREQ(reinterpret_cast<const char*>(region->data()),
test_data.c_str());
}
TEST_P(ModularFileSystemTest, TestCreateMemoryRegionFromFilePathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
std::string new_path = GetURIForPath("a_file/a_file");
std::unique_ptr<ReadOnlyMemoryRegion> region;
status = env_->NewReadOnlyMemoryRegionFromFile(new_path, ®ion);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestCreateDir) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestCreateDirNoParent) {
const std::string dirpath = GetURIForPath("dir_not_found/a_dir");
Status status = env_->CreateDir(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestCreateDirWhichIsFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
status = env_->CreateDir(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::ALREADY_EXISTS);
}
TEST_P(ModularFileSystemTest, TestCreateDirTwice) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
status = env_->CreateDir(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::ALREADY_EXISTS);
}
TEST_P(ModularFileSystemTest, TestCreateDirPathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_path = GetURIForPath("a_file/a_dir");
status = env_->CreateDir(new_path);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestRecursivelyCreateDir) {
const std::string dirpath = GetURIForPath("a/path/to/a/dir");
Status status = env_->RecursivelyCreateDir(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestRecursivelyCreateDirInATree) {
const std::string dirpath = GetURIForPath("a/path/to/a/dir");
Status status = env_->RecursivelyCreateDir(dirpath);
if (!status.ok())
GTEST_SKIP() << "RecursivelyCreateDir() not supported: " << status;
const std::string new_dirpath = GetURIForPath("a/path/to/a/another/dir");
status = env_->RecursivelyCreateDir(new_dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestRecursivelyCreateDirWhichIsFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
status = env_->RecursivelyCreateDir(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestRecursivelyCreateDirTwice) {
const std::string dirpath = GetURIForPath("a/path/to/a/dir");
Status status = env_->RecursivelyCreateDir(dirpath);
if (!status.ok())
GTEST_SKIP() << "RecursivelyCreateDir() not supported: " << status;
status = env_->RecursivelyCreateDir(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestRecursivelyCreateDirPathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_path = GetURIForPath("a_file/a_dir");
status = env_->RecursivelyCreateDir(new_path);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestRecursivelyCreateDirFromNestedDir) {
const std::string parent_path = GetURIForPath("some/path");
Status status = env_->RecursivelyCreateDir(parent_path);
if (!status.ok())
GTEST_SKIP() << "RecursivelyCreateDir() not supported: " << status;
const std::string new_dirpath = GetURIForPath("some/path/that/is/extended");
status = env_->RecursivelyCreateDir(new_dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestRecursivelyCreateDirFromNestedFile) {
const std::string parent_path = GetURIForPath("some/path");
Status status = env_->RecursivelyCreateDir(parent_path);
if (!status.ok())
GTEST_SKIP() << "RecursivelyCreateDir() not supported: " << status;
const std::string filepath = GetURIForPath("some/path/to_a_file");
std::unique_ptr<WritableFile> file;
status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_dirpath = GetURIForPath("some/path/to_a_file/error");
status = env_->RecursivelyCreateDir(new_dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestDeleteFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
status = env_->DeleteFile(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestDeleteFileFromDirectory) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
const std::string filepath = GetURIForPath("a_dir/a_file");
std::unique_ptr<WritableFile> new_file;
status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
status = env_->DeleteFile(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestDeleteFileDoesNotExist) {
const std::string filepath = GetURIForPath("a_file");
Status status = env_->DeleteFile(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestDeleteFileWhichIsDirectory) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
status = env_->DeleteFile(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestDeleteFilePathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_path = GetURIForPath("a_file/a_new_file");
status = env_->DeleteFile(new_path);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestDeleteDirectory) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
status = env_->DeleteDir(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestDeleteDirectoryFromDirectory) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
const std::string target_path = GetURIForPath("a_dir/another_dir");
EXPECT_EQ(env_->CreateDir(target_path).code(), Code::OK);
status = env_->DeleteDir(target_path);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestDeleteDirectoryDoesNotExist) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->DeleteDir(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestDeleteDirectoryNotEmpty) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
const std::string filepath = GetURIForPath("a_dir/a_file");
std::unique_ptr<WritableFile> new_file;
status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
status = env_->DeleteDir(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestDeleteDirectoryWhichIsFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
status = env_->DeleteDir(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestDeleteDirectoryPathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_path = GetURIForPath("a_file/a_dir");
status = env_->DeleteDir(new_path);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestDeleteRecursivelyEmpty) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
status = env_->DeleteRecursively(dirpath, &undeleted_files, &undeleted_dirs);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
EXPECT_EQ(undeleted_files, 0);
EXPECT_EQ(undeleted_dirs, 0);
}
TEST_P(ModularFileSystemTest, TestDeleteRecursivelyNotEmpty) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
const std::string some_path = GetURIForPath("a_dir/another_dir");
status = env_->CreateDir(some_path);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
const std::string another_path = GetURIForPath("a_dir/yet_another_dir");
status = env_->CreateDir(another_path);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
const std::string filepath = GetURIForPath("a_dir/a_file");
std::unique_ptr<WritableFile> new_file;
status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
status = env_->DeleteRecursively(dirpath, &undeleted_files, &undeleted_dirs);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
EXPECT_EQ(undeleted_files, 0);
EXPECT_EQ(undeleted_dirs, 0);
}
TEST_P(ModularFileSystemTest, TestDeleteRecursivelyDoesNotExist) {
const std::string dirpath = GetURIForPath("a_dir");
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
Status status =
env_->DeleteRecursively(dirpath, &undeleted_files, &undeleted_dirs);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
EXPECT_EQ(undeleted_files, 0);
EXPECT_EQ(undeleted_dirs, 1);
}
TEST_P(ModularFileSystemTest, TestDeleteRecursivelyAFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
status = env_->DeleteRecursively(filepath, &undeleted_files, &undeleted_dirs);
EXPECT_EQ(undeleted_files, 0);
EXPECT_EQ(undeleted_dirs, 0);
}
TEST_P(ModularFileSystemTest, TestDeleteRecursivelyPathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_path = GetURIForPath("a_file/a_dir");
int64_t undeleted_files, undeleted_dirs;
status = env_->DeleteRecursively(new_path, &undeleted_files, &undeleted_dirs);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestDeleteRecursivelyANestedDir) {
const std::string parent_path = GetURIForPath("parent/path");
Status status = env_->RecursivelyCreateDir(parent_path);
if (!status.ok())
GTEST_SKIP() << "RecursivelyCreateDir() not supported: " << status;
const std::string new_dirpath = GetURIForPath("parent/path/that/is/extended");
status = env_->RecursivelyCreateDir(new_dirpath);
if (!status.ok())
GTEST_SKIP() << "RecursivelyCreateDir() not supported: " << status;
const std::string path = GetURIForPath("parent/path/that");
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
status = env_->DeleteRecursively(path, &undeleted_files, &undeleted_dirs);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
EXPECT_EQ(undeleted_files, 0);
EXPECT_EQ(undeleted_dirs, 0);
status = env_->FileExists(parent_path);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestDeleteRecursivelyANestedFile) {
const std::string parent_path = GetURIForPath("some/path");
Status status = env_->RecursivelyCreateDir(parent_path);
if (!status.ok())
GTEST_SKIP() << "RecursivelyCreateDir() not supported: " << status;
const std::string filepath = GetURIForPath("some/path/to_a_file");
std::unique_ptr<WritableFile> file;
status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
status = env_->DeleteRecursively(filepath, &undeleted_files, &undeleted_dirs);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
EXPECT_EQ(undeleted_files, 0);
EXPECT_EQ(undeleted_dirs, 0);
status = env_->FileExists(parent_path);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestRenameFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_filepath = GetURIForPath("a_new_file");
status = env_->RenameFile(filepath, new_filepath);
EXPECT_PRED2(Unimplemen |
1,265 | cpp | tensorflow/tensorflow | ram_file_block_cache | third_party/xla/third_party/tsl/tsl/platform/cloud/ram_file_block_cache.cc | third_party/xla/third_party/tsl/tsl/platform/cloud/ram_file_block_cache_test.cc | #ifndef TENSORFLOW_TSL_PLATFORM_CLOUD_RAM_FILE_BLOCK_CACHE_H_
#define TENSORFLOW_TSL_PLATFORM_CLOUD_RAM_FILE_BLOCK_CACHE_H_
#include <functional>
#include <list>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "tsl/platform/cloud/file_block_cache.h"
#include "tsl/platform/env.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/notification.h"
#include "tsl/platform/status.h"
#include "tsl/platform/stringpiece.h"
#include "tsl/platform/thread_annotations.h"
#include "tsl/platform/types.h"
namespace tsl {
class RamFileBlockCache : public FileBlockCache {
public:
typedef std::function<Status(const string& filename, size_t offset,
size_t buffer_size, char* buffer,
size_t* bytes_transferred)>
BlockFetcher;
RamFileBlockCache(size_t block_size, size_t max_bytes, uint64 max_staleness,
BlockFetcher block_fetcher, Env* env = Env::Default())
: block_size_(block_size),
max_bytes_(max_bytes),
max_staleness_(max_staleness),
block_fetcher_(block_fetcher),
env_(env) {
if (max_staleness_ > 0) {
pruning_thread_.reset(env_->StartThread(ThreadOptions(), "TF_prune_FBC",
[this] { Prune(); }));
}
VLOG(1) << "GCS file block cache is "
<< (IsCacheEnabled() ? "enabled" : "disabled");
}
~RamFileBlockCache() override {
if (pruning_thread_) {
stop_pruning_thread_.Notify();
pruning_thread_.reset();
}
}
Status Read(const string& filename, size_t offset, size_t n, char* buffer,
size_t* bytes_transferred) override;
bool ValidateAndUpdateFileSignature(const string& filename,
int64_t file_signature) override
TF_LOCKS_EXCLUDED(mu_);
void RemoveFile(const string& filename) override TF_LOCKS_EXCLUDED(mu_);
void Flush() override TF_LOCKS_EXCLUDED(mu_);
size_t block_size() const override { return block_size_; }
size_t max_bytes() const override { return max_bytes_; }
uint64 max_staleness() const override { return max_staleness_; }
size_t CacheSize() const override TF_LOCKS_EXCLUDED(mu_);
bool IsCacheEnabled() const override {
return block_size_ > 0 && max_bytes_ > 0;
}
private:
const size_t block_size_;
const size_t max_bytes_;
const uint64 max_staleness_;
const BlockFetcher block_fetcher_;
Env* const env_;
typedef std::pair<string, size_t> Key;
enum class FetchState {
CREATED,
FETCHING,
FINISHED,
ERROR,
};
struct Block {
std::vector<char> data;
std::list<Key>::iterator lru_iterator;
std::list<Key>::iterator lra_iterator;
uint64 timestamp;
mutex mu;
FetchState state TF_GUARDED_BY(mu) = FetchState::CREATED;
condition_variable cond_var;
};
typedef std::map<Key, std::shared_ptr<Block>> BlockMap;
void Prune() TF_LOCKS_EXCLUDED(mu_);
bool BlockNotStale(const std::shared_ptr<Block>& block)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
std::shared_ptr<Block> Lookup(const Key& key) TF_LOCKS_EXCLUDED(mu_);
Status MaybeFetch(const Key& key, const std::shared_ptr<Block>& block)
TF_LOCKS_EXCLUDED(mu_);
void Trim() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
Status UpdateLRU(const Key& key, const std::shared_ptr<Block>& block)
TF_LOCKS_EXCLUDED(mu_);
void RemoveFile_Locked(const string& filename)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void RemoveBlock(BlockMap::iterator entry) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
std::unique_ptr<Thread> pruning_thread_;
Notification stop_pruning_thread_;
mutable mutex mu_;
BlockMap block_map_ TF_GUARDED_BY(mu_);
std::list<Key> lru_list_ TF_GUARDED_BY(mu_);
std::list<Key> lra_list_ TF_GUARDED_BY(mu_);
size_t cache_size_ TF_GUARDED_BY(mu_) = 0;
std::map<string, int64_t> file_signature_map_ TF_GUARDED_BY(mu_);
};
}
#endif
#include "tsl/platform/cloud/ram_file_block_cache.h"
#include <cstring>
#include <memory>
#include "absl/cleanup/cleanup.h"
#include "tsl/platform/env.h"
namespace tsl {
bool RamFileBlockCache::BlockNotStale(const std::shared_ptr<Block>& block) {
mutex_lock l(block->mu);
if (block->state != FetchState::FINISHED) {
return true;
}
if (max_staleness_ == 0) return true;
return env_->NowSeconds() - block->timestamp <= max_staleness_;
}
std::shared_ptr<RamFileBlockCache::Block> RamFileBlockCache::Lookup(
const Key& key) {
mutex_lock lock(mu_);
auto entry = block_map_.find(key);
if (entry != block_map_.end()) {
if (BlockNotStale(entry->second)) {
if (cache_stats_ != nullptr) {
cache_stats_->RecordCacheHitBlockSize(entry->second->data.size());
}
return entry->second;
} else {
RemoveFile_Locked(key.first);
}
}
auto new_entry = std::make_shared<Block>();
lru_list_.push_front(key);
lra_list_.push_front(key);
new_entry->lru_iterator = lru_list_.begin();
new_entry->lra_iterator = lra_list_.begin();
new_entry->timestamp = env_->NowSeconds();
block_map_.emplace(std::make_pair(key, new_entry));
return new_entry;
}
void RamFileBlockCache::Trim() {
while (!lru_list_.empty() && cache_size_ > max_bytes_) {
RemoveBlock(block_map_.find(lru_list_.back()));
}
}
Status RamFileBlockCache::UpdateLRU(const Key& key,
const std::shared_ptr<Block>& block) {
mutex_lock lock(mu_);
if (block->timestamp == 0) {
return OkStatus();
}
if (block->lru_iterator != lru_list_.begin()) {
lru_list_.erase(block->lru_iterator);
lru_list_.push_front(key);
block->lru_iterator = lru_list_.begin();
}
if (block->data.size() < block_size_) {
Key fmax = std::make_pair(key.first, std::numeric_limits<size_t>::max());
auto fcmp = block_map_.upper_bound(fmax);
if (fcmp != block_map_.begin() && key < (--fcmp)->first) {
return errors::Internal("Block cache contents are inconsistent.");
}
}
Trim();
return OkStatus();
}
Status RamFileBlockCache::MaybeFetch(const Key& key,
const std::shared_ptr<Block>& block) {
bool downloaded_block = false;
auto reconcile_state =
absl::MakeCleanup([this, &downloaded_block, &key, &block] {
if (downloaded_block) {
mutex_lock l(mu_);
if (block->timestamp != 0) {
cache_size_ += block->data.capacity();
lra_list_.erase(block->lra_iterator);
lra_list_.push_front(key);
block->lra_iterator = lra_list_.begin();
block->timestamp = env_->NowSeconds();
}
}
});
mutex_lock l(block->mu);
Status status = OkStatus();
while (true) {
switch (block->state) {
case FetchState::ERROR:
TF_FALLTHROUGH_INTENDED;
case FetchState::CREATED:
block->state = FetchState::FETCHING;
block->mu.unlock();
block->data.clear();
block->data.resize(block_size_, 0);
size_t bytes_transferred;
status.Update(block_fetcher_(key.first, key.second, block_size_,
block->data.data(), &bytes_transferred));
if (cache_stats_ != nullptr) {
cache_stats_->RecordCacheMissBlockSize(bytes_transferred);
}
block->mu.lock();
if (status.ok()) {
block->data.resize(bytes_transferred, 0);
std::vector<char>(block->data).swap(block->data);
downloaded_block = true;
block->state = FetchState::FINISHED;
} else {
block->state = FetchState::ERROR;
}
block->cond_var.notify_all();
return status;
case FetchState::FETCHING:
block->cond_var.wait_for(l, std::chrono::seconds(60));
if (block->state == FetchState::FINISHED) {
return OkStatus();
}
break;
case FetchState::FINISHED:
return OkStatus();
}
}
return errors::Internal(
"Control flow should never reach the end of RamFileBlockCache::Fetch.");
}
Status RamFileBlockCache::Read(const string& filename, size_t offset, size_t n,
char* buffer, size_t* bytes_transferred) {
*bytes_transferred = 0;
if (n == 0) {
return OkStatus();
}
if (!IsCacheEnabled() || (n > max_bytes_)) {
return block_fetcher_(filename, offset, n, buffer, bytes_transferred);
}
size_t start = block_size_ * (offset / block_size_);
size_t finish = block_size_ * ((offset + n) / block_size_);
if (finish < offset + n) {
finish += block_size_;
}
size_t total_bytes_transferred = 0;
for (size_t pos = start; pos < finish; pos += block_size_) {
Key key = std::make_pair(filename, pos);
std::shared_ptr<Block> block = Lookup(key);
DCHECK(block) << "No block for key " << key.first << "@" << key.second;
TF_RETURN_IF_ERROR(MaybeFetch(key, block));
TF_RETURN_IF_ERROR(UpdateLRU(key, block));
const auto& data = block->data;
if (offset >= pos + data.size()) {
*bytes_transferred = total_bytes_transferred;
return errors::OutOfRange("EOF at offset ", offset, " in file ", filename,
" at position ", pos, "with data size ",
data.size());
}
auto begin = data.begin();
if (offset > pos) {
begin += offset - pos;
}
auto end = data.end();
if (pos + data.size() > offset + n) {
end -= (pos + data.size()) - (offset + n);
}
if (begin < end) {
size_t bytes_to_copy = end - begin;
memcpy(&buffer[total_bytes_transferred], &*begin, bytes_to_copy);
total_bytes_transferred += bytes_to_copy;
}
if (data.size() < block_size_) {
break;
}
}
*bytes_transferred = total_bytes_transferred;
return OkStatus();
}
bool RamFileBlockCache::ValidateAndUpdateFileSignature(const string& filename,
int64_t file_signature) {
mutex_lock lock(mu_);
auto it = file_signature_map_.find(filename);
if (it != file_signature_map_.end()) {
if (it->second == file_signature) {
return true;
}
RemoveFile_Locked(filename);
it->second = file_signature;
return false;
}
file_signature_map_[filename] = file_signature;
return true;
}
size_t RamFileBlockCache::CacheSize() const {
mutex_lock lock(mu_);
return cache_size_;
}
void RamFileBlockCache::Prune() {
while (!WaitForNotificationWithTimeout(&stop_pruning_thread_, 1000000)) {
mutex_lock lock(mu_);
uint64 now = env_->NowSeconds();
while (!lra_list_.empty()) {
auto it = block_map_.find(lra_list_.back());
if (now - it->second->timestamp <= max_staleness_) {
break;
}
RemoveFile_Locked(std::string(it->first.first));
}
}
}
void RamFileBlockCache::Flush() {
mutex_lock lock(mu_);
block_map_.clear();
lru_list_.clear();
lra_list_.clear();
cache_size_ = 0;
}
void RamFileBlockCache::RemoveFile(const string& filename) {
mutex_lock lock(mu_);
RemoveFile_Locked(filename);
}
void RamFileBlockCache::RemoveFile_Locked(const string& filename) {
Key begin = std::make_pair(filename, 0);
auto it = block_map_.lower_bound(begin);
while (it != block_map_.end() && it->first.first == filename) {
auto next = std::next(it);
RemoveBlock(it);
it = next;
}
}
void RamFileBlockCache::RemoveBlock(BlockMap::iterator entry) {
entry->second->timestamp = 0;
lru_list_.erase(entry->second->lru_iterator);
lra_list_.erase(entry->second->lra_iterator);
cache_size_ -= entry->second->data.capacity();
block_map_.erase(entry);
}
} | #include "tsl/platform/cloud/ram_file_block_cache.h"
#include <cstring>
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/blocking_counter.h"
#include "tsl/platform/cloud/now_seconds_env.h"
#include "tsl/platform/env.h"
#include "tsl/platform/notification.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace {
Status ReadCache(RamFileBlockCache* cache, const string& filename,
size_t offset, size_t n, std::vector<char>* out) {
out->clear();
out->resize(n, 0);
size_t bytes_transferred = 0;
Status status =
cache->Read(filename, offset, n, out->data(), &bytes_transferred);
EXPECT_LE(bytes_transferred, n);
out->resize(bytes_transferred, n);
return status;
}
TEST(RamFileBlockCacheTest, IsCacheEnabled) {
auto fetcher = [](const string& filename, size_t offset, size_t n,
char* buffer, size_t* bytes_transferred) {
return OkStatus();
};
RamFileBlockCache cache1(0, 0, 0, fetcher);
RamFileBlockCache cache2(16, 0, 0, fetcher);
RamFileBlockCache cache3(0, 32, 0, fetcher);
RamFileBlockCache cache4(16, 32, 0, fetcher);
EXPECT_FALSE(cache1.IsCacheEnabled());
EXPECT_FALSE(cache2.IsCacheEnabled());
EXPECT_FALSE(cache3.IsCacheEnabled());
EXPECT_TRUE(cache4.IsCacheEnabled());
}
TEST(RamFileBlockCacheTest, ValidateAndUpdateFileSignature) {
int calls = 0;
auto fetcher = [&calls](const string& filename, size_t offset, size_t n,
char* buffer, size_t* bytes_transferred) {
calls++;
memset(buffer, 'x', n);
*bytes_transferred = n;
return OkStatus();
};
string filename = "file";
RamFileBlockCache cache(16, 32, 0, fetcher);
std::vector<char> out;
EXPECT_TRUE(cache.ValidateAndUpdateFileSignature(filename, 123));
TF_EXPECT_OK(ReadCache(&cache, filename, 0, 16, &out));
EXPECT_EQ(calls, 1);
EXPECT_TRUE(cache.ValidateAndUpdateFileSignature(filename, 123));
TF_EXPECT_OK(ReadCache(&cache, filename, 0, 16, &out));
EXPECT_EQ(calls, 1);
EXPECT_FALSE(cache.ValidateAndUpdateFileSignature(filename, 321));
TF_EXPECT_OK(ReadCache(&cache, filename, 0, 16, &out));
EXPECT_EQ(calls, 2);
}
TEST(RamFileBlockCacheTest, PassThrough) {
const string want_filename = "foo/bar";
const size_t want_offset = 42;
const size_t want_n = 1024;
int calls = 0;
auto fetcher = [&calls, want_filename, want_offset, want_n](
const string& got_filename, size_t got_offset,
size_t got_n, char* buffer, size_t* bytes_transferred) {
EXPECT_EQ(got_filename, want_filename);
EXPECT_EQ(got_offset, want_offset);
EXPECT_EQ(got_n, want_n);
calls++;
memset(buffer, 'x', got_n);
*bytes_transferred = got_n;
return OkStatus();
};
RamFileBlockCache cache1(1, 0, 0, fetcher);
RamFileBlockCache cache2(0, 1, 0, fetcher);
RamFileBlockCache cache3(0, 0, 0, fetcher);
RamFileBlockCache cache4(1000, 1000, 0, fetcher);
std::vector<char> out;
TF_EXPECT_OK(ReadCache(&cache1, want_filename, want_offset, want_n, &out));
EXPECT_EQ(calls, 1);
TF_EXPECT_OK(ReadCache(&cache2, want_filename, want_offset, want_n, &out));
EXPECT_EQ(calls, 2);
TF_EXPECT_OK(ReadCache(&cache3, want_filename, want_offset, want_n, &out));
EXPECT_EQ(calls, 3);
TF_EXPECT_OK(ReadCache(&cache4, want_filename, want_offset, want_n, &out));
EXPECT_EQ(calls, 4);
}
TEST(RamFileBlockCacheTest, BlockAlignment) {
const size_t size = 256;
std::vector<char> buf;
for (int i = 0; i < size; i++) {
buf.push_back(i);
}
auto fetcher = [&buf](const string& filename, size_t offset, size_t n,
char* buffer, size_t* bytes_transferred) {
if (offset < buf.size()) {
size_t bytes_to_copy = std::min<size_t>(buf.size() - offset, n);
memcpy(buffer, buf.data() + offset, bytes_to_copy);
*bytes_transferred = bytes_to_copy;
} else {
*bytes_transferred = 0;
}
return OkStatus();
};
for (size_t block_size = 2; block_size <= 4; block_size++) {
RamFileBlockCache cache(block_size, block_size, 0, fetcher);
for (size_t offset = 0; offset < 10; offset++) {
for (size_t n = block_size - 2; n <= block_size + 2; n++) {
std::vector<char> got;
TF_EXPECT_OK(ReadCache(&cache, "", offset, n, &got));
if (offset + n <= size) {
EXPECT_EQ(got.size(), n) << "block size = " << block_size
<< ", offset = " << offset << ", n = " << n;
} else {
EXPECT_EQ(got.size(), size - offset)
<< "block size = " << block_size << ", offset = " << offset
<< ", n = " << n;
}
std::vector<char>::const_iterator begin = buf.begin() + offset;
std::vector<char>::const_iterator end =
offset + n > buf.size() ? buf.end() : begin + n;
std::vector<char> want(begin, end);
EXPECT_EQ(got, want) << "block size = " << block_size
<< ", offset = " << offset << ", n = " << n;
}
}
}
}
TEST(RamFileBlockCacheTest, CacheHits) {
const size_t block_size = 16;
std::set<size_t> calls;
auto fetcher = [&calls, block_size](const string& filename, size_t offset,
size_t n, char* buffer,
size_t* bytes_transferred) {
EXPECT_EQ(n, block_size);
EXPECT_EQ(offset % block_size, 0);
EXPECT_EQ(calls.find(offset), calls.end()) << "at offset " << offset;
calls.insert(offset);
memset(buffer, 'x', n);
*bytes_transferred = n;
return OkStatus();
};
const uint32 block_count = 256;
RamFileBlockCache cache(block_size, block_count * block_size, 0, fetcher);
std::vector<char> out;
out.resize(block_count, 0);
for (int i = 0; i < 2; i++) {
for (int j = 0; j < block_count; j++) {
TF_EXPECT_OK(ReadCache(&cache, "", block_size * j, block_size, &out));
}
}
}
TEST(RamFileBlockCacheTest, OutOfRange) {
const size_t block_size = 16;
const size_t file_size = 24;
bool first_block = false;
bool second_block = false;
auto fetcher = [block_size, file_size, &first_block, &second_block](
const string& filename, size_t offset, size_t n,
char* buffer, size_t* bytes_transferred) {
EXPECT_EQ(n, block_size);
EXPECT_EQ(offset % block_size, 0);
size_t bytes_to_copy = 0;
if (offset == 0) {
memset(buffer, 'x', n);
bytes_to_copy = n;
first_block = true;
} else if (offset == block_size) {
bytes_to_copy = file_size - block_size;
memset(buffer, 'x', bytes_to_copy);
second_block = true;
}
*bytes_transferred = bytes_to_copy;
return OkStatus();
};
RamFileBlockCache cache(block_size, block_size, 0, fetcher);
std::vector<char> out;
TF_EXPECT_OK(ReadCache(&cache, "", 0, block_size, &out));
EXPECT_TRUE(first_block);
EXPECT_EQ(out.size(), block_size);
Status status = ReadCache(&cache, "", file_size + 4, 4, &out);
EXPECT_EQ(status.code(), error::OUT_OF_RANGE);
EXPECT_TRUE(second_block);
second_block = false;
TF_EXPECT_OK(ReadCache(&cache, "", block_size, block_size, &out));
EXPECT_FALSE(second_block);
EXPECT_EQ(out.size(), file_size - block_size);
}
TEST(RamFileBlockCacheTest, Inconsistent) {
const size_t block_size = 16;
auto fetcher = [block_size](const string& filename, size_t offset, size_t n,
char* buffer, size_t* bytes_transferred) {
EXPECT_EQ(n, block_size);
EXPECT_EQ(offset % block_size, 0);
EXPECT_GE(n, 1);
memset(buffer, 'x', 1);
*bytes_transferred = 1;
return OkStatus();
};
RamFileBlockCache cache(block_size, 2 * block_size, 0, fetcher);
std::vector<char> out;
TF_EXPECT_OK(ReadCache(&cache, "", block_size, block_size, &out));
EXPECT_EQ(out.size(), 1);
Status status = ReadCache(&cache, "", 0, block_size, &out);
EXPECT_EQ(status.code(), error::INTERNAL);
}
TEST(RamFileBlockCacheTest, LRU) {
const size_t block_size = 16;
std::list<size_t> calls;
auto fetcher = [&calls, block_size](const string& filename, size_t offset,
size_t n, char* buffer,
size_t* bytes_transferred) {
EXPECT_EQ(n, block_size);
EXPECT_FALSE(calls.empty()) << "at offset = " << offset;
if (!calls.empty()) {
EXPECT_EQ(offset, calls.front());
calls.pop_front();
}
memset(buffer, 'x', n);
*bytes_transferred = n;
return OkStatus();
};
const uint32 block_count = 2;
RamFileBlockCache cache(block_size, block_count * block_size, 0, fetcher);
std::vector<char> out;
calls.push_back(0);
TF_EXPECT_OK(ReadCache(&cache, "", 0, 1, &out));
TF_EXPECT_OK(ReadCache(&cache, "", 0, 1, &out));
calls.push_back(block_size);
TF_EXPECT_OK(ReadCache(&cache, "", block_size, 1, &out));
TF_EXPECT_OK(ReadCache(&cache, "", block_size, 1, &out));
calls.push_back(2 * block_size);
TF_EXPECT_OK(ReadCache(&cache, "", 2 * block_size, 1, &out));
TF_EXPECT_OK(ReadCache(&cache, "", 2 * block_size, 1, &out));
calls.push_back(0);
TF_EXPECT_OK(ReadCache(&cache, "", 0, 1, &out));
TF_EXPECT_OK(ReadCache(&cache, "", 2 * block_size, 1, &out));
calls.push_back(block_size);
TF_EXPECT_OK(ReadCache(&cache, "", block_size, 1, &out));
calls.push_back(0);
TF_EXPECT_OK(ReadCache(&cache, "", 0, 1, &out));
}
TEST(RamFileBlockCacheTest, MaxStaleness) {
int calls = 0;
auto fetcher = [&calls](const string& filename, size_t offset, size_t n,
char* buffer, size_t* bytes_transferred) {
calls++;
memset(buffer, 'x', n);
*bytes_transferred = n;
return OkStatus();
};
std::vector<char> out;
std::unique_ptr<NowSecondsEnv> env(new NowSecondsEnv);
RamFileBlockCache cache1(8, 16, 2 , fetcher, env.get());
TF_EXPECT_OK(ReadCache(&cache1, "", 0, 1, &out));
EXPECT_EQ(calls, 1);
for (int i = 1; i <= 10; i++) {
env->SetNowSeconds(i + 1);
TF_EXPECT_OK(ReadCache(&cache1, "", 0, 1, &out));
EXPECT_EQ(calls, 1 + i / 3);
}
calls = 0;
env->SetNowSeconds(0);
RamFileBlockCache cache2(8, 16, 0 , fetcher, env.get());
TF_EXPECT_OK(ReadCache(&cache2, "", 0, 1, &out));
EXPECT_EQ(calls, 1);
env->SetNowSeconds(365 * 24 * 60 * 60);
TF_EXPECT_OK(ReadCache(&cache2, "", 0, 1, &out));
EXPECT_EQ(calls, 1);
}
TEST(RamFileBlockCacheTest, RemoveFile) {
int calls = 0;
auto fetcher = [&calls](const string& filename, size_t offset, size_t n,
char* buffer, size_t* bytes_transferred) {
calls++;
char c = (filename == "a") ? 'a' : (filename == "b") ? 'b' : 'x';
if (offset > 0) {
c = toupper(c);
}
memset(buffer, c, n);
*bytes_transferred = n;
return OkStatus();
};
const size_t n = 3;
RamFileBlockCache cache(8, 32, 0, fetcher);
std::vector<char> out;
std::vector<char> a(n, 'a');
std::vector<char> b(n, 'b');
std::vector<char> A(n, 'A');
std::vector<char> B(n, 'B');
TF_EXPECT_OK(ReadCache(&cache, "a", 0, n, &out));
EXPECT_EQ(out, a);
EXPECT_EQ(calls, 1);
TF_EXPECT_OK(ReadCache(&cache, "a", 8, n, &out));
EXPECT_EQ(out, A);
EXPECT_EQ(calls, 2);
TF_EXPECT_OK(ReadCache(&cache, "b", 0, n, &out));
EXPECT_EQ(out, b);
EXPECT_EQ(calls, 3);
TF_EXPECT_OK(ReadCache(&cache, "b", 8, n, &out));
EXPECT_EQ(out, B);
EXPECT_EQ(calls, 4);
TF_EXPECT_OK(ReadCache(&cache, "a", 0, n, &out));
EXPECT_EQ(out, a);
TF_EXPECT_OK(ReadCache(&cache, "a", 8, n, &out));
EXPECT_EQ(out, A);
TF_EXPECT_OK(ReadCache(&cache, "b", 0, n, &out));
EXPECT_EQ(out, b);
TF_EXPECT_OK(ReadCache(&cache, "b", 8, n, &out));
EXPECT_EQ(out, B);
EXPECT_EQ(calls, 4);
cache.RemoveFile("a");
TF_EXPECT_OK(ReadCache(&cache, "b", 0, n, &out));
EXPECT_EQ(out, b);
TF_EXPECT_OK(ReadCache(&cache, "b", 8, n, &out));
EXPECT_EQ(out, B);
EXPECT_EQ(calls, 4);
TF_EXPECT_OK(ReadCache(&cache, "a", 0, n, &out));
EXPECT_EQ(out, a);
EXPECT_EQ(calls, 5);
TF_EXPECT_OK(ReadCache(&cache, "a", 8, n, &out));
EXPECT_EQ(out, A);
EXPECT_EQ(calls, 6);
}
TEST(RamFileBlockCacheTest, Prune) {
int calls = 0;
auto fetcher = [&calls](const string& filename, size_t offset, size_t n,
char* buffer, size_t* bytes_transferred) {
calls++;
memset(buffer, 'x', n);
*bytes_transferred = n;
return OkStatus();
};
std::vector<char> out;
std::unique_ptr<NowSecondsEnv> env(new NowSecondsEnv);
uint64 now = Env::Default()->NowSeconds();
env->SetNowSeconds(now);
RamFileBlockCache cache(8, 32, 1 , fetcher, env.get());
TF_EXPECT_OK(ReadCache(&cache, "a", 0, 1, &out));
env->SetNowSeconds(now + 1);
TF_EXPECT_OK(ReadCache(&cache, "b", 0, 1, &out));
TF_EXPECT_OK(ReadCache(&cache, "a", 8, 1, &out));
EXPECT_EQ(cache.CacheSize(), 24);
EXPECT_EQ(calls, 3);
TF_EXPECT_OK(ReadCache(&cache, "a", 0, 1, &out));
TF_EXPECT_OK(ReadCache(&cache, "b", 0, 1, &out));
TF_EXPECT_OK(ReadCache(&cache, "a", 8, 1, &out));
EXPECT_EQ(calls, 3);
env->SetNowSeconds(now + 2);
uint64 start = Env::Default()->NowSeconds();
do {
Env::Default()->SleepForMicroseconds(100000);
} while (cache.CacheSize() == 24 && Env::Default()->NowSeconds() - start < 3);
EXPECT_EQ(cache.CacheSize(), 8);
TF_EXPECT_OK(ReadCache(&cache, "b", 0, 1, &out));
EXPECT_EQ(calls, 3);
env->SetNowSeconds(now + 3);
start = Env::Default()->NowSeconds();
do {
Env::Default()->SleepForMicroseconds(100000);
} while (cache.CacheSize() == 8 && Env::Default()->NowSeconds() - start < 3);
EXPECT_EQ(cache.CacheSize(), 0);
}
TEST(RamFileBlockCacheTest, ParallelReads) {
const int callers = 4;
BlockingCounter counter(callers);
auto fetcher = [&counter](const string& filename, size_t offset, size_t n,
char* buffer, size_t* bytes_transferred) {
counter.DecrementCount();
if (!counter.WaitFor(std::chrono::seconds(10))) {
return errors::FailedPrecondition("desired concurrency not reached");
}
memset(buffer, 'x', n);
*bytes_transferred = n;
return OkStatus();
};
const int block_size = 8;
RamFileBlockCache cache(block_size, 2 * callers * block_size, 0, fetcher);
std::vector<std::unique_ptr<Thread>> threads;
for (int i = 0; i < callers; i++) {
threads.emplace_back(
Env::Default()->StartThread({}, "caller", [&cache, i, block_size]() {
std::vector<char> out;
TF_EXPECT_OK(
ReadCache(&cache, "a", i * block_size, block_size, &out));
std::vector<char> x(block_size, 'x');
EXPECT_EQ(out, x);
}));
}
}
TEST(RamFileBlockCacheTest, CoalesceConcurrentReads) {
const size_t block_size = 16;
int num_requests = 0;
Notification notification;
auto fetcher = [&num_requests, ¬ification, block_size](
const string& filename, size_t offset, size_t n,
char* buffer, size_t* bytes_transferred) {
EXPECT_EQ(n, block_size);
EXPECT_EQ(offset, 0);
num_requests++;
memset(buffer, 'x', n);
*bytes_transferred = n;
notification.Notify();
Env::Default()->SleepForMicroseconds(100000);
return OkStatus();
};
RamFileBlockCache cache(block_size, block_size, 0, fetcher);
std::unique_ptr<Thread> concurrent(
Env::Default()->StartThread({}, "concurrent", [&cache, block_size] {
std::vector<char> out;
TF_EXPECT_OK(ReadCache(&cache, "", 0, block_size / 2, &out));
EXPECT_EQ(out.size(), block_size / 2);
}));
notification.WaitForNotification();
std::vector<char> out;
TF_EXPECT_OK(ReadCache(&cache, "", block_size / 2, block_size / 2, &out));
EXPECT_EQ(out.size(), block_size / 2);
EXPECT_EQ(1, num_requests);
}
TEST(RamFileBlockCacheTest, Flush) {
int calls = 0;
auto fetcher = [&calls](const string& filename, size_t offset, size_t n,
char* buffer, size_t* bytes_transferred) {
calls++;
memset(buffer, 'x', n);
*bytes_transferred = n;
return OkStatus();
};
RamFileBlockCache cache(16, 32, 0, fetcher);
std::vector<char> out;
TF_EXPECT_OK(ReadCache(&cache, "", 0, 16, &out));
TF_EXPECT_OK(ReadCache(&cache, "", 0, 16, &out));
EXPECT_EQ(calls, 1);
cache.Flush();
TF_EXPECT_OK(ReadCache(&cache, "", 0, 16, &out));
EXPECT_EQ(calls, 2);
}
}
} |
1,266 | cpp | tensorflow/tensorflow | gcs_filesystem | tensorflow/c/experimental/filesystem/plugins/gcs/gcs_filesystem.cc | tensorflow/c/experimental/filesystem/plugins/gcs/gcs_filesystem_test.cc | #ifndef TENSORFLOW_C_EXPERIMENTAL_FILESYSTEM_PLUGINS_GCS_GCS_FILESYSTEM_H_
#define TENSORFLOW_C_EXPERIMENTAL_FILESYSTEM_PLUGINS_GCS_GCS_FILESYSTEM_H_
#include "google/cloud/storage/client.h"
#include "tensorflow/c/experimental/filesystem/filesystem_interface.h"
#include "tensorflow/c/experimental/filesystem/plugins/gcs/expiring_lru_cache.h"
#include "tensorflow/c/experimental/filesystem/plugins/gcs/ram_file_block_cache.h"
#include "tensorflow/c/tf_status.h"
void ParseGCSPath(const std::string& fname, bool object_empty_ok,
std::string* bucket, std::string* object, TF_Status* status);
namespace tf_random_access_file {
void Cleanup(TF_RandomAccessFile* file);
int64_t Read(const TF_RandomAccessFile* file, uint64_t offset, size_t n,
char* buffer, TF_Status* status);
}
namespace tf_writable_file {
void Cleanup(TF_WritableFile* file);
void Append(const TF_WritableFile* file, const char* buffer, size_t n,
TF_Status* status);
int64_t Tell(const TF_WritableFile* file, TF_Status* status);
void Flush(const TF_WritableFile* file, TF_Status* status);
void Sync(const TF_WritableFile* file, TF_Status* status);
void Close(const TF_WritableFile* file, TF_Status* status);
}
namespace tf_read_only_memory_region {
void Cleanup(TF_ReadOnlyMemoryRegion* region);
const void* Data(const TF_ReadOnlyMemoryRegion* region);
uint64_t Length(const TF_ReadOnlyMemoryRegion* region);
}
namespace tf_gcs_filesystem {
typedef struct GcsFileStat {
TF_FileStatistics base;
int64_t generation_number;
} GcsFileStat;
typedef struct GCSFile {
google::cloud::storage::Client gcs_client;
bool compose;
absl::Mutex block_cache_lock;
std::shared_ptr<RamFileBlockCache> file_block_cache
ABSL_GUARDED_BY(block_cache_lock);
uint64_t block_size;
std::unique_ptr<ExpiringLRUCache<GcsFileStat>> stat_cache;
GCSFile(google::cloud::storage::Client&& gcs_client);
GCSFile(google::cloud::storage::Client&& gcs_client, bool compose,
uint64_t block_size, size_t max_bytes, uint64_t max_staleness,
uint64_t stat_cache_max_age, size_t stat_cache_max_entries);
} GCSFile;
void InitTest(TF_Filesystem* filesystem, bool compose, uint64_t block_size,
size_t max_bytes, uint64_t max_staleness,
uint64_t stat_cache_max_age, size_t stat_cache_max_entries,
TF_Status* status);
void Init(TF_Filesystem* filesystem, TF_Status* status);
void Cleanup(TF_Filesystem* filesystem);
void NewRandomAccessFile(const TF_Filesystem* filesystem, const char* path,
TF_RandomAccessFile* file, TF_Status* status);
void NewWritableFile(const TF_Filesystem* filesystem, const char* path,
TF_WritableFile* file, TF_Status* status);
void NewAppendableFile(const TF_Filesystem* filesystem, const char* path,
TF_WritableFile* file, TF_Status* status);
void NewReadOnlyMemoryRegionFromFile(const TF_Filesystem* filesystem,
const char* path,
TF_ReadOnlyMemoryRegion* region,
TF_Status* status);
int64_t GetFileSize(const TF_Filesystem* filesystem, const char* path,
TF_Status* status);
void PathExists(const TF_Filesystem* filesystem, const char* path,
TF_Status* status);
void CreateDir(const TF_Filesystem* filesystem, const char* path,
TF_Status* status);
int GetChildren(const TF_Filesystem* filesystem, const char* path,
char*** entries, TF_Status* status);
void DeleteFile(const TF_Filesystem* filesystem, const char* path,
TF_Status* status);
void Stat(const TF_Filesystem* filesystem, const char* path,
TF_FileStatistics* stats, TF_Status* status);
void DeleteDir(const TF_Filesystem* filesystem, const char* path,
TF_Status* status);
void CopyFile(const TF_Filesystem* filesystem, const char* src, const char* dst,
TF_Status* status);
void RenameFile(const TF_Filesystem* filesystem, const char* src,
const char* dst, TF_Status* status);
}
#endif
#include "tensorflow/c/experimental/filesystem/plugins/gcs/gcs_filesystem.h"
#include <stdlib.h>
#include <string.h>
#include <variant>
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/types/variant.h"
#include "google/cloud/storage/client.h"
#include "tensorflow/c/env.h"
#include "tensorflow/c/experimental/filesystem/plugins/gcs/gcs_helper.h"
#include "tensorflow/c/logging.h"
#include "tensorflow/c/tf_status.h"
namespace gcs = google::cloud::storage;
constexpr char kBlockSize[] = "GCS_READ_CACHE_BLOCK_SIZE_MB";
constexpr size_t kDefaultBlockSize = 64 * 1024 * 1024;
constexpr char kMaxCacheSize[] = "GCS_READ_CACHE_MAX_SIZE_MB";
constexpr size_t kDefaultMaxCacheSize = 0;
constexpr char kMaxStaleness[] = "GCS_READ_CACHE_MAX_STALENESS";
constexpr uint64_t kDefaultMaxStaleness = 0;
constexpr char kStatCacheMaxAge[] = "GCS_STAT_CACHE_MAX_AGE";
constexpr uint64_t kStatCacheDefaultMaxAge = 5;
constexpr char kStatCacheMaxEntries[] = "GCS_STAT_CACHE_MAX_ENTRIES";
constexpr size_t kStatCacheDefaultMaxEntries = 1024;
constexpr char kAppendMode[] = "GCS_APPEND_MODE";
constexpr char kComposeAppend[] = "compose";
static inline void TF_SetStatusFromGCSStatus(
const google::cloud::Status& gcs_status, TF_Status* status) {
TF_SetStatus(status, static_cast<TF_Code>(gcs_status.code()),
gcs_status.message().c_str());
}
static void* plugin_memory_allocate(size_t size) { return calloc(1, size); }
static void plugin_memory_free(void* ptr) { free(ptr); }
void ParseGCSPath(const std::string& fname, bool object_empty_ok,
std::string* bucket, std::string* object, TF_Status* status) {
size_t scheme_end = fname.find(":
if (fname.substr(0, scheme_end + 1) != "gs:
TF_SetStatus(status, TF_INVALID_ARGUMENT,
"GCS path doesn't start with 'gs:
return;
}
size_t bucket_end = fname.find('/', scheme_end + 1);
if (bucket_end == std::string::npos) {
TF_SetStatus(status, TF_INVALID_ARGUMENT,
"GCS path doesn't contain a bucket name.");
return;
}
*bucket = fname.substr(scheme_end + 1, bucket_end - scheme_end - 1);
*object = fname.substr(bucket_end + 1);
if (object->empty() && !object_empty_ok) {
TF_SetStatus(status, TF_INVALID_ARGUMENT,
"GCS path doesn't contain an object name.");
}
}
static void MaybeAppendSlash(std::string* name) {
if (name->empty())
*name = "/";
else if (name->back() != '/')
name->push_back('/');
}
static int64_t LoadBufferFromGCS(const std::string& path, size_t offset,
size_t buffer_size, char* buffer,
tf_gcs_filesystem::GCSFile* gcs_file,
TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, false, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK) return -1;
auto stream = gcs_file->gcs_client.ReadObject(
bucket, object, gcs::ReadRange(offset, offset + buffer_size));
TF_SetStatusFromGCSStatus(stream.status(), status);
if ((TF_GetCode(status) != TF_OK) &&
(TF_GetCode(status) != TF_OUT_OF_RANGE)) {
return -1;
}
int64_t read;
auto content_length = stream.headers().find("content-length");
if (content_length == stream.headers().end()) {
read = 0;
} else if (!absl::SimpleAtoi(content_length->second, &read)) {
TF_SetStatus(status, TF_UNKNOWN, "Could not get content-length header");
return -1;
}
TF_SetStatus(status, TF_OK, "");
TF_VLog(1, "Successful read of %s @ %u of size: %u", path.c_str(), offset,
read);
stream.read(buffer, read);
read = stream.gcount();
if (read < buffer_size) {
tf_gcs_filesystem::GcsFileStat stat;
if (gcs_file->stat_cache->Lookup(path, &stat)) {
if (offset + read < stat.base.length) {
TF_SetStatus(status, TF_INTERNAL,
absl::StrCat("File contents are inconsistent for file: ",
path, " @ ", offset)
.c_str());
}
TF_VLog(2, "Successful integrity check for: %s @ %u", path.c_str(),
offset);
}
}
return read;
}
namespace tf_random_access_file {
using ReadFn =
std::function<int64_t(const std::string& path, uint64_t offset, size_t n,
char* buffer, TF_Status* status)>;
typedef struct GCSFile {
const std::string path;
const bool is_cache_enable;
const uint64_t buffer_size;
ReadFn read_fn;
absl::Mutex buffer_mutex;
uint64_t buffer_start ABSL_GUARDED_BY(buffer_mutex);
bool buffer_end_is_past_eof ABSL_GUARDED_BY(buffer_mutex);
std::string buffer ABSL_GUARDED_BY(buffer_mutex);
GCSFile(std::string path, bool is_cache_enable, uint64_t buffer_size,
ReadFn read_fn)
: path(path),
is_cache_enable(is_cache_enable),
buffer_size(buffer_size),
read_fn(std::move(read_fn)),
buffer_mutex(),
buffer_start(0),
buffer_end_is_past_eof(false),
buffer() {}
} GCSFile;
void Cleanup(TF_RandomAccessFile* file) {
auto gcs_file = static_cast<GCSFile*>(file->plugin_file);
delete gcs_file;
}
int64_t Read(const TF_RandomAccessFile* file, uint64_t offset, size_t n,
char* buffer, TF_Status* status) {
auto gcs_file = static_cast<GCSFile*>(file->plugin_file);
if (gcs_file->is_cache_enable || n > gcs_file->buffer_size) {
return gcs_file->read_fn(gcs_file->path, offset, n, buffer, status);
} else {
absl::MutexLock l(&gcs_file->buffer_mutex);
size_t buffer_end = gcs_file->buffer_start + gcs_file->buffer.size();
size_t copy_size = 0;
if (offset < buffer_end && gcs_file->buffer_start) {
copy_size = (std::min)(n, static_cast<size_t>(buffer_end - offset));
memcpy(buffer,
gcs_file->buffer.data() + (offset - gcs_file->buffer_start),
copy_size);
}
bool consumed_buffer_to_eof =
offset + copy_size >= buffer_end && gcs_file->buffer_end_is_past_eof;
if (copy_size < n && !consumed_buffer_to_eof) {
gcs_file->buffer_start = offset + copy_size;
gcs_file->buffer.resize(gcs_file->buffer_size);
auto read_fill_buffer = gcs_file->read_fn(
gcs_file->path, gcs_file->buffer_start, gcs_file->buffer_size,
&(gcs_file->buffer[0]), status);
gcs_file->buffer_end_is_past_eof =
(TF_GetCode(status) == TF_OUT_OF_RANGE);
if (read_fill_buffer >= 0) gcs_file->buffer.resize(read_fill_buffer);
if (TF_GetCode(status) != TF_OK &&
TF_GetCode(status) != TF_OUT_OF_RANGE) {
gcs_file->buffer.resize(0);
return -1;
}
size_t remaining_copy =
(std::min)(n - copy_size, gcs_file->buffer.size());
memcpy(buffer + copy_size, gcs_file->buffer.data(), remaining_copy);
copy_size += remaining_copy;
}
if (copy_size < n) {
gcs_file->buffer_end_is_past_eof = false;
TF_SetStatus(status, TF_OUT_OF_RANGE, "Read less bytes than requested");
return copy_size;
}
TF_SetStatus(status, TF_OK, "");
return copy_size;
}
}
}
namespace tf_writable_file {
typedef struct GCSFile {
const std::string bucket;
const std::string object;
gcs::Client* gcs_client;
TempFile outfile;
bool sync_need;
int64_t offset;
} GCSFile;
static void SyncImpl(const std::string& bucket, const std::string& object,
int64_t* offset, TempFile* outfile,
gcs::Client* gcs_client, TF_Status* status) {
outfile->flush();
if (*offset == -1 || *offset == 0) {
auto metadata = gcs_client->UploadFile(outfile->getName(), bucket, object,
gcs::Fields("size"));
if (!metadata) {
TF_SetStatusFromGCSStatus(metadata.status(), status);
return;
}
if (*offset == 0) {
if (!outfile->truncate()) {
TF_SetStatus(status, TF_INTERNAL,
"Could not truncate internal temporary file.");
return;
}
*offset = static_cast<int64_t>(metadata->size());
}
outfile->clear();
outfile->seekp(0, std::ios::end);
TF_SetStatus(status, TF_OK, "");
} else {
std::string temporary_object =
gcs::CreateRandomPrefixName("tf_writable_file_gcs");
auto metadata = gcs_client->UploadFile(outfile->getName(), bucket,
temporary_object, gcs::Fields(""));
if (!metadata) {
TF_SetStatusFromGCSStatus(metadata.status(), status);
return;
}
TF_VLog(3, "AppendObject: gs:
temporary_object.c_str(), bucket.c_str(), object.c_str());
const std::vector<gcs::ComposeSourceObject> source_objects = {
{object, {}, {}}, {temporary_object, {}, {}}};
metadata = gcs_client->ComposeObject(bucket, source_objects, object,
gcs::Fields("size"));
if (!metadata) {
TF_SetStatusFromGCSStatus(metadata.status(), status);
return;
}
auto delete_status = gcs_client->DeleteObject(bucket, temporary_object);
if (!delete_status.ok()) {
TF_SetStatusFromGCSStatus(delete_status, status);
return;
}
if (!outfile->truncate()) {
TF_SetStatus(status, TF_INTERNAL,
"Could not truncate internal temporary file.");
return;
}
*offset = static_cast<int64_t>(metadata->size());
TF_SetStatus(status, TF_OK, "");
}
}
void Cleanup(TF_WritableFile* file) {
auto gcs_file = static_cast<GCSFile*>(file->plugin_file);
delete gcs_file;
}
void Append(const TF_WritableFile* file, const char* buffer, size_t n,
TF_Status* status) {
auto gcs_file = static_cast<GCSFile*>(file->plugin_file);
if (!gcs_file->outfile.is_open()) {
TF_SetStatus(status, TF_FAILED_PRECONDITION,
"The internal temporary file is not writable.");
return;
}
TF_VLog(3, "Append: gs:
gcs_file->object.c_str(), n);
gcs_file->sync_need = true;
gcs_file->outfile.write(buffer, n);
if (!gcs_file->outfile)
TF_SetStatus(status, TF_INTERNAL,
"Could not append to the internal temporary file.");
else
TF_SetStatus(status, TF_OK, "");
}
int64_t Tell(const TF_WritableFile* file, TF_Status* status) {
auto gcs_file = static_cast<GCSFile*>(file->plugin_file);
int64_t position = int64_t(gcs_file->outfile.tellp());
if (position == -1)
TF_SetStatus(status, TF_INTERNAL,
"tellp on the internal temporary file failed");
else
TF_SetStatus(status, TF_OK, "");
return position == -1
? -1
: position + (gcs_file->offset == -1 ? 0 : gcs_file->offset);
}
void Flush(const TF_WritableFile* file, TF_Status* status) {
auto gcs_file = static_cast<GCSFile*>(file->plugin_file);
if (gcs_file->sync_need) {
TF_VLog(3, "Flush started: gs:
gcs_file->object.c_str());
if (!gcs_file->outfile) {
TF_SetStatus(status, TF_INTERNAL,
"Could not append to the internal temporary file.");
return;
}
SyncImpl(gcs_file->bucket, gcs_file->object, &gcs_file->offset,
&gcs_file->outfile, gcs_file->gcs_client, status);
TF_VLog(3, "Flush finished: gs:
gcs_file->object.c_str());
if (TF_GetCode(status) != TF_OK) return;
gcs_file->sync_need = false;
} else {
TF_SetStatus(status, TF_OK, "");
}
}
void Sync(const TF_WritableFile* file, TF_Status* status) {
auto gcs_file = static_cast<GCSFile*>(file->plugin_file);
TF_VLog(3, "Sync: gs:
gcs_file->object.c_str());
Flush(file, status);
}
void Close(const TF_WritableFile* file, TF_Status* status) {
auto gcs_file = static_cast<GCSFile*>(file->plugin_file);
TF_VLog(3, "Close: gs:
gcs_file->object.c_str());
if (gcs_file->sync_need) {
Flush(file, status);
}
gcs_file->outfile.close();
}
}
namespace tf_read_only_memory_region {
typedef struct GCSMemoryRegion {
const void* const address;
const uint64_t length;
} GCSMemoryRegion;
void Cleanup(TF_ReadOnlyMemoryRegion* region) {
auto r = static_cast<GCSMemoryRegion*>(region->plugin_memory_region);
plugin_memory_free(const_cast<void*>(r->address));
delete r;
}
const void* Data(const TF_ReadOnlyMemoryRegion* region) {
auto r = static_cast<GCSMemoryRegion*>(region->plugin_memory_region);
return r->address;
}
uint64_t Length(const TF_ReadOnlyMemoryRegion* region) {
auto r = static_cast<GCSMemoryRegion*>(region->plugin_memory_region);
return r->length;
}
}
namespace tf_gcs_filesystem {
GCSFile::GCSFile(google::cloud::storage::Client&& gcs_client)
: gcs_client(gcs_client), block_cache_lock() {
const char* append_mode = std::getenv(kAppendMode);
compose = (append_mode != nullptr) && (!strcmp(kAppendMode, append_mode));
uint64_t value;
block_size = kDefaultBlockSize;
size_t max_bytes = kDefaultMaxCacheSize;
uint64_t max_staleness = kDefaultMaxStaleness;
const char* block_size_env = std::getenv(kBlockSize);
if (block_size_env && absl::SimpleAtoi(block_size_env, &value)) {
block_size = value * 1024 * 1024;
}
const char* max_bytes_env = std::getenv(kMaxCacheSize);
if (max_bytes_env && absl::SimpleAtoi(max_bytes_env, &value)) {
max_bytes = static_cast<size_t>(value * 1024 * 1024);
}
const char* max_staleness_env = std::getenv(kMaxStaleness);
if (max_staleness_env && absl::SimpleAtoi(max_staleness_env, &value)) {
max_staleness = value;
}
TF_VLog(1, "GCS cache max size = %u ; block size = %u ; max staleness = %u",
max_bytes, block_size, max_staleness);
file_block_cache = std::make_unique<RamFileBlockCache>(
block_size, max_bytes, max_staleness,
[this](const std::string& filename, size_t offset, size_t buffer_size,
char* buffer, TF_Status* status) {
return LoadBufferFromGCS(filename, offset, buffer_size, buffer, this,
status);
});
uint64_t stat_cache_max_age = kStatCacheDefaultMaxAge;
size_t stat_cache_max_entries = kStatCacheDefaultMaxEntries;
const char* stat_cache_max_age_env = std::getenv(kStatCacheMaxAge);
if (stat_cache_max_age_env &&
absl::SimpleAtoi(stat_cache_max_age_env, &value)) {
stat_cache_max_age = value;
}
const char* stat_cache_max_entries_env = std::getenv(kStatCacheMaxEntries);
if (stat_cache_max_entries_env &&
absl::SimpleAtoi(stat_cache_max_entries_env, &value)) {
stat_cache_max_entries = static_cast<size_t>(value);
}
stat_cache = std::make_unique<ExpiringLRUCache<GcsFileStat>>(
stat_cache_max_age, stat_cache_max_entries);
}
GCSFile::GCSFile(google::cloud::storage::Client&& gcs_client, bool compose,
uint64_t block_size, size_t max_bytes, uint64_t max_staleness,
uint64_t stat_cache_max_age, size_t stat_cache_max_entries)
: gcs_client(gcs_client),
compose(compose),
block_cache_lock(),
block_size(block_size) {
file_block_cache = std::make_unique<RamFileBlockCache>(
block_size, max_bytes, max_staleness,
[this](const std::string& filename, size_t offset, size_t buffer_size,
char* buffer, TF_Status* status) {
return LoadBufferFromGCS(filename, offset, buffer_size, buffer, this,
status);
});
stat_cache = std::make_unique<ExpiringLRUCache<GcsFileStat>>(
stat_cache_max_age, stat_cache_max_entries);
}
void InitTest(TF_Filesystem* filesystem, bool compose, uint64_t block_size,
size_t max_bytes, uint64_t max_staleness,
uint64_t stat_cache_max_age, size_t stat_cache_max_entries,
TF_Status* status) {
google::cloud::StatusOr<gcs::Client> client =
gcs::Client::CreateDefaultClient();
if (!client) {
TF_SetStatusFromGCSStatus(client.status(), status);
return;
}
filesystem->plugin_filesystem =
new GCSFile(std::move(client.value()), compose, block_size, max_bytes,
max_staleness, stat_cache_max_age, stat_cache_max_entries);
TF_SetStatus(status, TF_OK, "");
}
void Init(TF_Filesystem* filesystem, TF_Status* status) {
google::cloud::StatusOr<gcs::Client> client =
gcs::Client::CreateDefaultClient();
if (!client) {
TF_SetStatusFromGCSStatus(client.status(), status);
return;
}
filesystem->plugin_filesystem = new GCSFile(std::move(client.value()));
TF_SetStatus(status, TF_OK, "");
}
void Cleanup(TF_Filesystem* filesystem) {
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
delete gcs_file;
}
static void UncachedStatForObject(const std::string& bucket,
const std::string& object, GcsFileStat* stat,
gcs::Client* gcs_client, TF_Status* status) {
auto metadata = gcs_client->GetObjectMetadata(
bucket, object, gcs::Fields("generation,size,timeStorageClassUpdated"));
if (!metadata) return TF_SetStatusFromGCSStatus(metadata.status(), status);
stat->generation_number = metadata->generation();
stat->base.length = metadata->size();
stat->base.mtime_nsec =
metadata->time_storage_class_updated().time_since_epoch().count();
stat->base.is_directory = object.back() == '/';
TF_VLog(1,
"Stat of: gs:
bucket.c_str(), object.c_str(), stat->base.length,
stat->generation_number, stat->base.mtime_nsec);
return TF_SetStatus(status, TF_OK, "");
}
void NewRandomAccessFile(const TF_Filesystem* filesystem, const char* path,
TF_RandomAccessFile* file, TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, false, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK) return;
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
bool is_cache_enabled;
{
absl::MutexLock l(&gcs_file->block_cache_lock);
is_cache_enabled = gcs_file->file_block_cache->IsCacheEnabled();
}
auto read_fn = [gcs_file, is_cache_enabled, bucket, object](
const std::string& path, uint64_t offset, size_t n,
char* buffer, TF_Status* status) -> int64_t {
int64_t read = 0;
if (is_cache_enabled) {
absl::ReaderMutexLock l(&gcs_file->block_cache_lock);
GcsFileStat stat;
gcs_file->stat_cache->LookupOrCompute(
path, &stat,
[gcs_file, bucket, object](const std::string& path, GcsFileStat* stat,
TF_Status* status) {
UncachedStatForObject(bucket, object, stat, &gcs_file->gcs_client,
status);
},
status);
if (TF_GetCode(status) != TF_OK) return -1;
if (!gcs_file->file_block_cache->ValidateAndUpdateFileSignature(
path, stat.generation_number)) {
TF_VLog(
1,
"File signature has been changed. Refreshing the cache. Path: %s",
path.c_str());
}
read = gcs_file->file_block_cache->Read(path, offset, n, buffer, status);
} else {
read = LoadBufferFromGCS(path, offset, n, buffer, gcs_file, status);
}
if (TF_GetCode(status) != TF_OK) return -1;
if (read < n)
TF_SetStatus(status, TF_OUT_OF_RANGE, "Read less bytes than requested");
else
TF_SetStatus(status, TF_OK, "");
return read;
};
file->plugin_file = new tf_random_access_file::GCSFile(
std::move(path), is_cache_enabled, gcs_file->block_size, read_fn);
TF_SetStatus(status, TF_OK, "");
}
void NewWritableFile(const TF_Filesystem* filesystem, const char* path,
TF_WritableFile* file, TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, false, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK) return;
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
char* temp_file_name = TF_GetTempFileName("");
file->plugin_file = new tf_writable_file::GCSFile(
{std::move(bucket), std::move(object), &gcs_file->gcs_client,
TempFile(temp_file_name, std::ios::binary | std::ios::out), true,
(gcs_file->compose ? 0 : -1)});
free(temp_file_name);
TF_VLog(3, "GcsWritableFile: %s", path);
TF_SetStatus(status, TF_OK, "");
}
void NewAppendableFile(const TF_Filesystem* filesystem, const char* path,
TF_WritableFile* file, TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, false, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK) return;
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
char* temp_file_name_c_str = TF_GetTempFileName("");
std::string temp_file_name(temp_file_name_c_str);
free(temp_file_name_c_str);
if (!gcs_file->compose) {
auto gcs_status =
gcs_file->gcs_client.DownloadToFile(bucket, object, temp_file_name);
TF_SetStatusFromGCSStatus(gcs_status, status);
auto status_code = TF_GetCode(status);
if (status_code != TF_OK && status_code != TF_NOT_FOUND) return;
bool sync_need = (status_code == TF_NOT_FOUND);
file->plugin_file = new tf_writable_file::GCSFile(
{std::move(bucket), std::move(object), &gcs_file->gcs_client,
TempFile(temp_file_name, std::ios::binary | std::ios::app), sync_need,
-1});
} else {
auto metadata = gcs_file->gcs_client.GetObjectMetadata(bucket, object,
gcs::Fields("size"));
TF_SetStatusFromGCSStatus(metadata.status(), status);
if (TF_GetCode(status) == TF_OK) {
file->plugin_file = new tf_writable_file::GCSFile(
{std::move(bucket), std::move(object), &gcs_file->gcs_client,
TempFile(temp_file_name, std::ios::binary | std::ios::trunc), false,
static_cast<int64_t>(metadata->size())});
} else if (TF_GetCode(status) == TF_NOT_FOUND) {
file->plugin_file = new tf_writable_file::GCSFile(
{std::move(bucket), std::move(object), &gcs_file->gcs_client,
TempFile(temp_file_name, std::ios::binary | std::ios::trunc), true,
0});
} else {
return;
}
}
TF_VLog(3, "GcsWritableFile: %s with existing file %s", path,
temp_file_name.c_str());
TF_SetStatus(status, TF_OK, "");
}
void NewReadOnlyMemoryRegionFromFile(const TF_Filesystem* filesystem,
const char* path,
TF_ReadOnlyMemoryRegion* region,
TF_Status* status) { | #include "tensorflow/c/experimental/filesystem/plugins/gcs/gcs_filesystem.h"
#include <random>
#include "absl/strings/string_view.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/stacktrace_handler.h"
#include "tensorflow/core/platform/test.h"
#define ASSERT_TF_OK(x) ASSERT_EQ(TF_OK, TF_GetCode(x)) << TF_Message(x)
#define EXPECT_TF_OK(x) EXPECT_EQ(TF_OK, TF_GetCode(x)) << TF_Message(x)
static const char* content = "abcdefghijklmnopqrstuvwxyz1234567890";
static const absl::string_view content_view = content;
namespace gcs = google::cloud::storage;
static std::string InitializeTmpDir() {
const char* test_dir = getenv("GCS_TEST_TMPDIR");
if (test_dir != nullptr) {
std::string bucket, object;
TF_Status* status = TF_NewStatus();
ParseGCSPath(test_dir, true, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK) {
TF_DeleteStatus(status);
return "";
}
TF_DeleteStatus(status);
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> distribution;
std::string rng_val = std::to_string(distribution(gen));
return tensorflow::io::JoinPath(std::string(test_dir), rng_val);
} else {
return "";
}
}
static std::string* GetTmpDir() {
static std::string tmp_dir = InitializeTmpDir();
if (tmp_dir == "")
return nullptr;
else
return &tmp_dir;
}
namespace tensorflow {
namespace {
class GCSFilesystemTest : public ::testing::Test {
public:
void SetUp() override {
root_dir_ = io::JoinPath(
*GetTmpDir(),
::testing::UnitTest::GetInstance()->current_test_info()->name());
status_ = TF_NewStatus();
filesystem_ = new TF_Filesystem;
filesystem_->plugin_filesystem = nullptr;
}
void TearDown() override {
TF_DeleteStatus(status_);
if (filesystem_->plugin_filesystem != nullptr)
tf_gcs_filesystem::Cleanup(filesystem_);
delete filesystem_;
}
std::string GetURIForPath(absl::string_view path) {
const std::string translated_name =
tensorflow::io::JoinPath(root_dir_, path);
return translated_name;
}
std::unique_ptr<TF_WritableFile, void (*)(TF_WritableFile* file)>
GetWriter() {
std::unique_ptr<TF_WritableFile, void (*)(TF_WritableFile * file)> writer(
new TF_WritableFile, [](TF_WritableFile* file) {
if (file != nullptr) {
if (file->plugin_file != nullptr) tf_writable_file::Cleanup(file);
delete file;
}
});
writer->plugin_file = nullptr;
return writer;
}
std::unique_ptr<TF_RandomAccessFile, void (*)(TF_RandomAccessFile* file)>
GetReader() {
std::unique_ptr<TF_RandomAccessFile, void (*)(TF_RandomAccessFile * file)>
reader(new TF_RandomAccessFile, [](TF_RandomAccessFile* file) {
if (file != nullptr) {
if (file->plugin_file != nullptr)
tf_random_access_file::Cleanup(file);
delete file;
}
});
reader->plugin_file = nullptr;
return reader;
}
void WriteString(const std::string& path, const std::string& content) {
auto writer = GetWriter();
tf_gcs_filesystem::NewWritableFile(filesystem_, path.c_str(), writer.get(),
status_);
if (TF_GetCode(status_) != TF_OK) return;
tf_writable_file::Append(writer.get(), content.c_str(), content.length(),
status_);
if (TF_GetCode(status_) != TF_OK) return;
tf_writable_file::Close(writer.get(), status_);
if (TF_GetCode(status_) != TF_OK) return;
}
std::string ReadAll(const std::string& path) {
auto reader = GetReader();
tf_gcs_filesystem::NewRandomAccessFile(filesystem_, path.c_str(),
reader.get(), status_);
if (TF_GetCode(status_) != TF_OK) return "";
auto file_size =
tf_gcs_filesystem::GetFileSize(filesystem_, path.c_str(), status_);
if (TF_GetCode(status_) != TF_OK) return "";
std::string content;
content.resize(file_size);
auto read = tf_random_access_file::Read(reader.get(), 0, file_size,
&content[0], status_);
if (TF_GetCode(status_) != TF_OK) return "";
if (read >= 0) content.resize(read);
if (file_size != content.size())
TF_SetStatus(
status_, TF_DATA_LOSS,
std::string("expected " + std::to_string(file_size) + " got " +
std::to_string(content.size()) + " bytes")
.c_str());
return content;
}
protected:
TF_Filesystem* filesystem_;
TF_Status* status_;
private:
std::string root_dir_;
};
::testing::AssertionResult WriteToServer(const std::string& path, size_t offset,
size_t length, gcs::Client* gcs_client,
TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, false, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK)
return ::testing::AssertionFailure() << TF_Message(status);
auto writer = gcs_client->WriteObject(bucket, object);
writer.write(content + offset, length);
writer.Close();
if (writer.metadata()) {
return ::testing::AssertionSuccess();
} else {
return ::testing::AssertionFailure()
<< writer.metadata().status().message();
}
}
::testing::AssertionResult InsertObject(const std::string& path,
const std::string& content,
gcs::Client* gcs_client,
TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, false, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK)
return ::testing::AssertionFailure() << TF_Message(status);
auto metadata = gcs_client->InsertObject(bucket, object, content);
if (metadata)
return ::testing::AssertionSuccess();
else
return ::testing::AssertionFailure() << metadata.status().message();
}
::testing::AssertionResult CompareSubString(int64_t offset, size_t length,
absl::string_view result,
size_t read) {
if (length == read && content_view.substr(offset, length) ==
absl::string_view(result).substr(0, read))
return ::testing::AssertionSuccess();
else
return ::testing::AssertionFailure()
<< "Result: " << absl::string_view(result).substr(0, read)
<< " Read: " << read;
}
::testing::AssertionResult CompareWithServer(const std::string& path,
size_t offset, size_t length,
gcs::Client* gcs_client,
TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, false, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK)
return ::testing::AssertionFailure() << TF_Message(status);
auto reader = gcs_client->ReadObject(bucket, object);
if (!reader) {
return ::testing::AssertionFailure() << reader.status().message();
} else {
std::string content{std::istreambuf_iterator<char>{reader}, {}};
return CompareSubString(offset, length, content, content.length());
}
}
TEST_F(GCSFilesystemTest, ParseGCSPath) {
std::string bucket, object;
ParseGCSPath("gs:
ASSERT_TF_OK(status_);
ASSERT_EQ(bucket, "bucket");
ASSERT_EQ(object, "path/to/object");
ParseGCSPath("gs:
ASSERT_TF_OK(status_);
ASSERT_EQ(bucket, "bucket");
ParseGCSPath("bucket/path/to/object", false, &bucket, &object, status_);
ASSERT_EQ(TF_GetCode(status_), TF_INVALID_ARGUMENT);
ParseGCSPath("gs:
ASSERT_EQ(TF_GetCode(status_), TF_INVALID_ARGUMENT);
ParseGCSPath("gs:
ASSERT_EQ(TF_GetCode(status_), TF_INVALID_ARGUMENT);
}
TEST_F(GCSFilesystemTest, RandomAccessFile) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_) << "Could not initialize filesystem. "
<< TF_Message(status_);
std::string filepath = GetURIForPath("a_file");
TF_RandomAccessFile* file = new TF_RandomAccessFile;
tf_gcs_filesystem::NewRandomAccessFile(filesystem_, filepath.c_str(), file,
status_);
ASSERT_TF_OK(status_);
char* result = new char[content_view.length()];
int64_t read = tf_random_access_file::Read(file, 0, 1, result, status_);
ASSERT_EQ(read, -1) << "Read: " << read;
ASSERT_EQ(TF_GetCode(status_), TF_NOT_FOUND) << TF_Message(status_);
TF_SetStatus(status_, TF_OK, "");
auto gcs_file =
static_cast<tf_gcs_filesystem::GCSFile*>(filesystem_->plugin_filesystem);
ASSERT_TRUE(WriteToServer(filepath, 0, content_view.length(),
&gcs_file->gcs_client, status_));
read = tf_random_access_file::Read(file, 0, content_view.length(), result,
status_);
ASSERT_TF_OK(status_);
ASSERT_TRUE(CompareSubString(0, content_view.length(), result, read));
read = tf_random_access_file::Read(file, 0, 4, result, status_);
ASSERT_TF_OK(status_);
ASSERT_TRUE(CompareSubString(0, 4, result, read));
read = tf_random_access_file::Read(file, content_view.length() - 2, 4, result,
status_);
ASSERT_EQ(TF_GetCode(status_), TF_OUT_OF_RANGE) << TF_Message(status_);
ASSERT_TRUE(CompareSubString(content_view.length() - 2, 2, result, read));
delete[] result;
tf_random_access_file::Cleanup(file);
delete file;
}
TEST_F(GCSFilesystemTest, WritableFile) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_) << "Could not initialize filesystem. "
<< TF_Message(status_);
std::string filepath = GetURIForPath("a_file");
TF_WritableFile* file = new TF_WritableFile;
tf_gcs_filesystem::NewWritableFile(filesystem_, filepath.c_str(), file,
status_);
ASSERT_TF_OK(status_);
tf_writable_file::Append(file, content, 4, status_);
ASSERT_TF_OK(status_);
auto length = tf_writable_file::Tell(file, status_);
ASSERT_EQ(length, 4);
ASSERT_TF_OK(status_);
tf_writable_file::Flush(file, status_);
ASSERT_TF_OK(status_);
auto gcs_file =
static_cast<tf_gcs_filesystem::GCSFile*>(filesystem_->plugin_filesystem);
ASSERT_TRUE(
CompareWithServer(filepath, 0, 4, &gcs_file->gcs_client, status_));
tf_writable_file::Append(file, content + 4, 4, status_);
ASSERT_TF_OK(status_);
length = tf_writable_file::Tell(file, status_);
ASSERT_EQ(length, 8);
ASSERT_TF_OK(status_);
tf_writable_file::Flush(file, status_);
ASSERT_TF_OK(status_);
ASSERT_TRUE(
CompareWithServer(filepath, 0, 8, &gcs_file->gcs_client, status_));
tf_writable_file::Close(file, status_);
ASSERT_TF_OK(status_);
tf_writable_file::Cleanup(file);
gcs_file->compose = true;
filepath = GetURIForPath("b_file");
tf_gcs_filesystem::NewWritableFile(filesystem_, filepath.c_str(), file,
status_);
ASSERT_TF_OK(status_);
tf_writable_file::Append(file, content, 4, status_);
ASSERT_TF_OK(status_);
length = tf_writable_file::Tell(file, status_);
ASSERT_EQ(length, 4);
ASSERT_TF_OK(status_);
tf_writable_file::Flush(file, status_);
ASSERT_TF_OK(status_);
ASSERT_TRUE(
CompareWithServer(filepath, 0, 4, &gcs_file->gcs_client, status_));
tf_writable_file::Append(file, content + 4, 4, status_);
ASSERT_TF_OK(status_);
length = tf_writable_file::Tell(file, status_);
ASSERT_EQ(length, 8);
ASSERT_TF_OK(status_);
tf_writable_file::Flush(file, status_);
ASSERT_TF_OK(status_);
ASSERT_TRUE(
CompareWithServer(filepath, 0, 8, &gcs_file->gcs_client, status_));
tf_writable_file::Close(file, status_);
ASSERT_TF_OK(status_);
tf_writable_file::Cleanup(file);
delete file;
}
TEST_F(GCSFilesystemTest, ReadOnlyMemoryRegion) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_) << "Could not initialize filesystem. "
<< TF_Message(status_);
std::string path = GetURIForPath("a_file");
auto gcs_file =
static_cast<tf_gcs_filesystem::GCSFile*>(filesystem_->plugin_filesystem);
ASSERT_TRUE(WriteToServer(path, 0, 0, &gcs_file->gcs_client, status_));
TF_ReadOnlyMemoryRegion* region = new TF_ReadOnlyMemoryRegion;
tf_gcs_filesystem::NewReadOnlyMemoryRegionFromFile(filesystem_, path.c_str(),
region, status_);
ASSERT_EQ(TF_GetCode(status_), TF_INVALID_ARGUMENT) << TF_Message(status_);
TF_SetStatus(status_, TF_OK, "");
ASSERT_TRUE(WriteToServer(path, 0, content_view.length(),
&gcs_file->gcs_client, status_));
tf_gcs_filesystem::NewReadOnlyMemoryRegionFromFile(filesystem_, path.c_str(),
region, status_);
ASSERT_TF_OK(status_);
auto length = tf_read_only_memory_region::Length(region);
ASSERT_EQ(length, content_view.length());
auto data =
static_cast<const char*>(tf_read_only_memory_region::Data(region));
ASSERT_TRUE(CompareSubString(0, content_view.length(), data, length));
tf_read_only_memory_region::Cleanup(region);
delete region;
}
TEST_F(GCSFilesystemTest, PathExists) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_);
const std::string path = GetURIForPath("PathExists");
tf_gcs_filesystem::PathExists(filesystem_, path.c_str(), status_);
EXPECT_EQ(TF_NOT_FOUND, TF_GetCode(status_)) << TF_Message(status_);
TF_SetStatus(status_, TF_OK, "");
WriteString(path, "test");
ASSERT_TF_OK(status_);
tf_gcs_filesystem::PathExists(filesystem_, path.c_str(), status_);
EXPECT_TF_OK(status_);
}
TEST_F(GCSFilesystemTest, GetChildren) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_);
const std::string base = GetURIForPath("GetChildren");
tf_gcs_filesystem::CreateDir(filesystem_, base.c_str(), status_);
EXPECT_TF_OK(status_);
const std::string file = io::JoinPath(base, "TestFile.csv");
WriteString(file, "test");
EXPECT_TF_OK(status_);
const std::string subdir = io::JoinPath(base, "SubDir");
tf_gcs_filesystem::CreateDir(filesystem_, subdir.c_str(), status_);
EXPECT_TF_OK(status_);
const std::string subfile = io::JoinPath(subdir, "TestSubFile.csv");
WriteString(subfile, "test");
EXPECT_TF_OK(status_);
char** entries;
auto num_entries = tf_gcs_filesystem::GetChildren(filesystem_, base.c_str(),
&entries, status_);
EXPECT_TF_OK(status_);
std::vector<std::string> childrens;
for (int i = 0; i < num_entries; ++i) {
childrens.push_back(entries[i]);
}
std::sort(childrens.begin(), childrens.end());
EXPECT_EQ(std::vector<string>({"SubDir/", "TestFile.csv"}), childrens);
}
TEST_F(GCSFilesystemTest, DeleteFile) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_);
const std::string path = GetURIForPath("DeleteFile");
WriteString(path, "test");
ASSERT_TF_OK(status_);
tf_gcs_filesystem::DeleteFile(filesystem_, path.c_str(), status_);
EXPECT_TF_OK(status_);
tf_gcs_filesystem::PathExists(filesystem_, path.c_str(), status_);
EXPECT_EQ(TF_GetCode(status_), TF_NOT_FOUND);
}
TEST_F(GCSFilesystemTest, CreateDir) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_);
const std::string dir = GetURIForPath("CreateDir");
tf_gcs_filesystem::CreateDir(filesystem_, dir.c_str(), status_);
EXPECT_TF_OK(status_);
TF_FileStatistics stat;
tf_gcs_filesystem::Stat(filesystem_, dir.c_str(), &stat, status_);
EXPECT_TF_OK(status_);
EXPECT_TRUE(stat.is_directory);
}
TEST_F(GCSFilesystemTest, DeleteDir) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_);
const std::string dir = GetURIForPath("DeleteDir");
const std::string file = io::JoinPath(dir, "DeleteDirFile.csv");
WriteString(file, "test");
ASSERT_TF_OK(status_);
tf_gcs_filesystem::DeleteDir(filesystem_, dir.c_str(), status_);
EXPECT_EQ(TF_GetCode(status_), TF_FAILED_PRECONDITION);
TF_SetStatus(status_, TF_OK, "");
tf_gcs_filesystem::DeleteFile(filesystem_, file.c_str(), status_);
EXPECT_TF_OK(status_);
tf_gcs_filesystem::DeleteDir(filesystem_, dir.c_str(), status_);
EXPECT_TF_OK(status_);
TF_FileStatistics stat;
tf_gcs_filesystem::Stat(filesystem_, dir.c_str(), &stat, status_);
EXPECT_EQ(TF_GetCode(status_), TF_NOT_FOUND) << TF_Message(status_);
}
TEST_F(GCSFilesystemTest, StatFile) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_);
const std::string path = GetURIForPath("StatFile");
WriteString(path, "test");
ASSERT_TF_OK(status_);
TF_FileStatistics stat;
tf_gcs_filesystem::Stat(filesystem_, path.c_str(), &stat, status_);
EXPECT_TF_OK(status_);
EXPECT_EQ(4, stat.length);
EXPECT_FALSE(stat.is_directory);
}
TEST_F(GCSFilesystemTest, RenameFile) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_);
const std::string src = GetURIForPath("RenameFileSrc");
const std::string dst = GetURIForPath("RenameFileDst");
WriteString(src, "test");
ASSERT_TF_OK(status_);
tf_gcs_filesystem::RenameFile(filesystem_, src.c_str(), dst.c_str(), status_);
EXPECT_TF_OK(status_);
auto result = ReadAll(dst);
EXPECT_TF_OK(status_);
EXPECT_EQ("test", result);
}
TEST_F(GCSFilesystemTest, RenameFileOverwrite) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_);
const std::string src = GetURIForPath("RenameFileOverwriteSrc");
const std::string dst = GetURIForPath("RenameFileOverwriteDst");
WriteString(src, "test_old");
ASSERT_TF_OK(status_);
WriteString(dst, "test_new");
ASSERT_TF_OK(status_);
tf_gcs_filesystem::PathExists(filesystem_, dst.c_str(), status_);
EXPECT_TF_OK(status_);
tf_gcs_filesystem::RenameFile(filesystem_, src.c_str(), dst.c_str(), status_);
EXPECT_TF_OK(status_);
auto result = ReadAll(dst);
EXPECT_TF_OK(status_);
EXPECT_EQ("test_old", result);
}
TEST_F(GCSFilesystemTest, NewRandomAccessFile_NoBlockCache) {
tf_gcs_filesystem::InitTest(filesystem_, false, 0, 0, 0, 0, 0, status_);
ASSERT_TF_OK(status_) << "Could not initialize filesystem. "
<< TF_Message(status_);
std::string path = GetURIForPath("a_file");
auto gcs_file =
static_cast<tf_gcs_filesystem::GCSFile*>(filesystem_->plugin_filesystem);
ASSERT_TRUE(InsertObject(path, "0123456789", &gcs_file->gcs_client, status_));
TF_RandomAccessFile* file = new TF_RandomAccessFile;
tf_gcs_filesystem::NewRandomAccessFile(filesystem_, path.c_str(), file,
status_);
ASSERT_TF_OK(status_);
std::string result;
result.resize(6);
int64_t read = tf_random_access_file::Read(file, 0, 6, &result[0], status_);
ASSERT_EQ(read, 6) << "Read: " << read << "\n";
ASSERT_TF_OK(status_);
ASSERT_EQ(result, "012345") << "Result: " << result << "\n";
read = tf_random_access_file::Read(file, 6, 6, &result[0], status_);
ASSERT_EQ(read, 4) << "Read: " << read << "\n";
ASSERT_EQ(TF_GetCode(status_), TF_OUT_OF_RANGE) << TF_Message(status_);
result.resize(read);
ASSERT_EQ(result, "6789") << "Result: " << result << "\n";
}
TEST_F(GCSFilesystemTest, NewRandomAccessFile_Buffered) {
tf_gcs_filesystem::InitTest(filesystem_, false, 10, 0, 0, 0, 0, status_);
ASSERT_TF_OK(status_) << "Could not initialize filesystem. "
<< TF_Message(status_);
std::string path = GetURIForPath("a_file");
auto gcs_file =
static_cast<tf_gcs_filesystem::GCSFile*>(filesystem_->plugin_filesystem);
ASSERT_TRUE(InsertObject(path, "0123456789", &gcs_file->gcs_client, status_));
TF_RandomAccessFile* file = new TF_RandomAccessFile;
tf_gcs_filesystem::NewRandomAccessFile(filesystem_, path.c_str(), file,
status_);
ASSERT_TF_OK(status_);
std::string result;
result.resize(6);
int64_t read = tf_random_access_file::Read(file, 0, 6, &result[0], status_);
ASSERT_EQ(read, 6) << "Read: " << read << "\n";
ASSERT_TF_OK(status_);
ASSERT_EQ(result, "012345") << "Result: " << result << "\n";
read = tf_random_access_file::Read(file, 6, 6, &result[0], status_);
ASSERT_EQ(read, 4) << "Read: " << read << "\n";
ASSERT_EQ(TF_GetCode(status_), TF_OUT_OF_RANGE) << TF_Message(status_);
result.resize(read);
ASSERT_EQ(result, "6789") << "Result: " << result << "\n";
}
TEST_F(GCSFilesystemTest, NewRandomAccessFile_Buffered_ReadAtEOF) {
tf_gcs_filesystem::InitTest(filesystem_, false, 10, 0, 0, 0, 0, status_);
ASSERT_TF_OK(status_) << "Could not initialize filesystem. "
<< TF_Message(status_);
std::string path = GetURIForPath("a_file");
auto gcs_file =
static_cast<tf_gcs_filesystem::GCSFile*>(filesystem_->plugin_filesystem);
ASSERT_TRUE(InsertObject(path, "0123456789", &gcs_file->gcs_client, status_));
TF_RandomAccessFile* file = new TF_RandomAccessFile;
tf_gcs_filesystem::NewRandomAccessFile(filesystem_, path.c_str(), file,
status_);
ASSERT_TF_OK(status_);
std::string result;
result.resize(10);
int64_t read = tf_random_access_file::Read(file, 0, result.length(),
&result[0], status_);
ASSERT_EQ(read, 10) << "Read: " << read << "\n";
ASSERT_TF_OK(status_);
ASSERT_EQ(result, "0123456789") << "Result: " << result << "\n";
read = tf_random_access_file::Read(file, result.length(), result.length(),
&result[0], status_);
ASSERT_EQ(read, 0) << "Read: " << read << "\n";
ASSERT_EQ(TF_GetCode(status_), TF_OUT_OF_RANGE) << TF_Message(status_);
result.resize(read);
ASSERT_EQ(result, "") << "Result: " << result << "\n";
}
TEST_F(GCSFilesystemTest, NewRandomAccessFile_Buffered_CachedOutOfRange) {
tf_gcs_filesystem::InitTest(filesystem_, false, 10, 0, 0, 0, 0, status_);
ASSERT_TF_OK(status_) << "Could not initialize filesystem. "
<< TF_Message(status_);
std::string path = GetURIForPath("a_file");
auto gcs_file =
static_cast<tf_gcs_filesystem::GCSFile*>(filesystem_->plugin_filesystem);
ASSERT_TRUE(InsertObject(path, "012345678", &gcs_file->gcs_client, status_));
TF_RandomAccessFile* file = new TF_RandomAccessFile;
tf_gcs_filesystem::NewRandomAccessFile(filesystem_, path.c_str(), file,
status_);
ASSERT_TF_OK(status_);
std::string result;
result.resize(5);
int64_t read = tf_random_access_file::Read(file, 0, result.length(),
&result[0], status_);
ASSERT_EQ(read, 5) << "Read: " << read << "\n";
ASSERT_TF_OK(status_);
ASSERT_EQ(result, "01234") << "Result: " << result << "\n";
read = tf_random_access_file::Read(file, 4, result.length(), &result[0],
status_);
ASSERT_EQ(read, 5) << "Read: " << read << "\n";
ASSERT_TF_OK(status_);
result.resize(read);
ASSERT_EQ(result, "45678") << "Result: " << result << "\n";
read = tf_random_access_file::Read(file, 5, result.length(), &result[0],
status_);
ASSERT_EQ(read, 4) << "Read: " << read << "\n";
ASSERT_EQ(TF_GetCode(status_), TF_OUT_OF_RANGE) << TF_Message(status_);
result.resize(read);
ASSERT_EQ(result, "5678") << "Result: " << result << "\n";
}
}
}
GTEST_API_ int main(int argc, char** argv) {
tensorflow::testing::InstallStacktraceHandler();
if (!GetTmpDir()) {
std::cerr << "Could not read GCS_TEST_TMPDIR env";
return -1;
}
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
} |
1,267 | cpp | tensorflow/tensorflow | gradients | tensorflow/cc/framework/gradients.cc | tensorflow/cc/framework/gradients_test.cc | #ifndef TENSORFLOW_CC_FRAMEWORK_GRADIENTS_H_
#define TENSORFLOW_CC_FRAMEWORK_GRADIENTS_H_
#include <vector>
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
namespace tensorflow {
Status AddSymbolicGradients(const Scope& scope,
const std::vector<Output>& outputs,
const std::vector<Output>& inputs,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs);
Status AddSymbolicGradients(const Scope& scope,
const std::vector<Output>& outputs,
const std::vector<Output>& inputs,
std::vector<Output>* grad_outputs);
Output NoGradient();
}
#endif
#include "tensorflow/cc/framework/gradients.h"
#include <deque>
#include <map>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include "tensorflow/cc/framework/grad_op_registry.h"
#include "tensorflow/cc/framework/while_gradients.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/while_context.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/macros.h"
namespace tensorflow {
namespace {
struct OutputHash {
uint64 operator()(const Output& x) const { return x.hash(); }
};
struct OutputEq {
bool operator()(const Output& x, const Output& y) const {
return (x.node() == y.node()) && (x.index() == y.index());
}
};
class SymbolicGradientBuilder {
public:
SymbolicGradientBuilder(const Scope& scope,
const ops::GradOpRegistry* registry,
const std::vector<Output>& outputs,
const std::vector<Output>& inputs,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs);
Status AddGradients();
static Output NoGradient() { return Output(nullptr, -1); }
private:
Status Initialize();
Status BackpropAlongEdge(const Output& dst_grad, const Output& src);
Status SumGradients(const Output& src, Output* grad);
bool IsPrimitiveOpWithNoGrad(const string& opname);
Status CallGradFunction(const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs);
std::vector<bool> GetReachableNodes();
Status ProcessWhileLoop(Node* exit_node, const Output& summed_grads);
std::unordered_set<int> GetStopBackpropNodes(
const std::vector<bool>& reachable_nodes,
const std::unordered_set<int>& output_nodes) const;
const Scope& scope_;
const ops::GradOpRegistry* registry_;
const std::vector<Output>& outputs_;
const std::vector<Output>& inputs_;
const std::vector<Output>& grad_inputs_;
std::vector<Output>* grad_outputs_;
typedef std::vector<Output> BackproppedGradients;
std::unordered_map<Output, BackproppedGradients, OutputHash, OutputEq>
backprops_;
std::vector<int> pending_;
std::deque<Node*> ready_;
std::unordered_map<Output, int, OutputHash, OutputEq> input_nodes_;
std::map<WhileContext*, std::map<Node*, Output>> while_backprops_;
SymbolicGradientBuilder(const SymbolicGradientBuilder&) = delete;
void operator=(const SymbolicGradientBuilder&) = delete;
};
SymbolicGradientBuilder::SymbolicGradientBuilder(
const Scope& scope, const ops::GradOpRegistry* registry,
const std::vector<Output>& outputs, const std::vector<Output>& inputs,
const std::vector<Output>& grad_inputs, std::vector<Output>* grad_outputs)
: scope_(scope),
registry_(registry),
outputs_(outputs),
inputs_(inputs),
grad_inputs_(grad_inputs),
grad_outputs_(grad_outputs) {}
Status SymbolicGradientBuilder::BackpropAlongEdge(const Output& dst_grad,
const Output& src) {
if (src.node() == nullptr) {
return errors::Internal("Attempted to backprop along an invalid edge.");
}
auto iter = backprops_.find(src);
if (iter != backprops_.end()) {
auto* grads = &iter->second;
grads->push_back(dst_grad);
if (--pending_[src.node()->id()] == 0) {
ready_.push_back(src.node());
}
}
return absl::OkStatus();
}
std::vector<bool> SymbolicGradientBuilder::GetReachableNodes() {
std::vector<bool> reachable_nodes(scope_.graph()->num_node_ids(), false);
std::deque<Node*> queue;
for (const Output& out : outputs_) {
if (!reachable_nodes[out.node()->id()]) {
queue.push_back(out.node());
reachable_nodes[out.node()->id()] = true;
}
}
while (!queue.empty()) {
Node* n = queue.front();
queue.pop_front();
for (const Edge* e : n->in_edges()) {
if (e->IsControlEdge()) continue;
if (!reachable_nodes[e->src()->id()]) {
queue.push_back(e->src());
reachable_nodes[e->src()->id()] = true;
}
}
}
return reachable_nodes;
}
std::unordered_set<int> SymbolicGradientBuilder::GetStopBackpropNodes(
const std::vector<bool>& reachable_nodes,
const std::unordered_set<int>& output_nodes) const {
std::unordered_set<int> internal_outputs;
std::unordered_set<Node*> visited;
std::deque<std::pair<Node*, Node*>> queue;
for (const Output& nout : inputs_) {
auto const& pair = visited.insert(nout.node());
if (pair.second) {
queue.push_back(std::make_pair(nout.node(), static_cast<Node*>(nullptr)));
}
}
while (!queue.empty()) {
std::pair<Node*, Node*> p = queue.front();
Node* n = p.first;
queue.pop_front();
for (const Edge* e : n->out_edges()) {
if (e->IsControlEdge() || !reachable_nodes[e->dst()->id()]) continue;
auto const& pair = visited.insert(e->dst());
if (pair.second) {
int node_id = e->dst()->id();
Node* last_output_node = p.second;
if (output_nodes.find(node_id) != output_nodes.end()) {
if (last_output_node != nullptr) {
internal_outputs.insert(last_output_node->id());
}
last_output_node = e->dst();
}
queue.push_back(std::make_pair(e->dst(), last_output_node));
}
}
}
std::unordered_set<int> stop_backprop_nodes;
for (int output_node : output_nodes) {
if (internal_outputs.find(output_node) == internal_outputs.end()) {
stop_backprop_nodes.insert(output_node);
}
}
return stop_backprop_nodes;
}
Status SymbolicGradientBuilder::Initialize() {
if (outputs_.size() != grad_inputs_.size()) {
return errors::InvalidArgument(
"Must specify a gradient input for each output.");
}
std::vector<bool> reachable_nodes = GetReachableNodes();
for (const Output& input : inputs_) {
if (!reachable_nodes[input.node()->id()]) {
return errors::InvalidArgument(
"Cannot compute the partial derivative for node '",
input.node()->name(),
"' as it's unreachable from the output node(s).");
}
}
grad_outputs_->clear();
grad_outputs_->resize(inputs_.size());
std::unordered_set<int> output_nodes;
output_nodes.reserve(outputs_.size());
for (size_t i = 0; i < outputs_.size(); ++i) {
output_nodes.insert(outputs_[i].node()->id());
}
std::unordered_set<int> stop_backprop_nodes =
GetStopBackpropNodes(reachable_nodes, output_nodes);
input_nodes_.reserve(inputs_.size());
for (size_t i = 0; i < inputs_.size(); ++i) {
input_nodes_.insert({inputs_[i], i});
}
pending_.resize(scope_.graph()->num_node_ids(), 0);
{
backprops_.clear();
std::unordered_set<Node*> visited;
std::deque<Node*> queue;
for (const Output& nout : inputs_) {
auto const& pair = visited.insert(nout.node());
if (pair.second) {
queue.push_back(nout.node());
}
}
while (!queue.empty()) {
Node* n = queue.front();
queue.pop_front();
for (int i = 0; i < n->num_outputs(); ++i) {
backprops_[{n, i}].clear();
}
int num_expected_backprops = 0;
if (stop_backprop_nodes.find(n->id()) == stop_backprop_nodes.end()) {
for (const Edge* e : n->out_edges()) {
if (e->IsControlEdge() || !reachable_nodes[e->dst()->id()]) continue;
auto const& pair = visited.insert(e->dst());
if (pair.second) {
queue.push_back(e->dst());
}
++num_expected_backprops;
}
}
if (output_nodes.find(n->id()) != output_nodes.end()) {
for (const Output& output : outputs_) {
if (output.node() == n) {
++num_expected_backprops;
}
}
}
pending_[n->id()] = num_expected_backprops;
}
}
{
const size_t num_dy = grad_inputs_.size();
for (size_t i = 0; i < num_dy; ++i) {
TF_RETURN_IF_ERROR(BackpropAlongEdge(grad_inputs_[i], outputs_[i]));
}
}
return absl::OkStatus();
}
Status SymbolicGradientBuilder::SumGradients(const Output& src, Output* grad) {
auto iter = backprops_.find(src);
if (iter == backprops_.end()) {
return errors::Internal("Unable to find backprop list for node.id ",
src.node()->name());
}
const auto& grads = iter->second;
std::vector<Output> grads_to_keep;
for (const Output& o : grads) {
if (o == NoGradient()) continue;
grads_to_keep.push_back(o);
}
if (grads_to_keep.empty()) {
*grad = NoGradient();
} else if (grads_to_keep.size() == 1) {
*grad = grads_to_keep[0];
} else {
*grad = ops::AddN(scope_, grads_to_keep);
}
return absl::OkStatus();
}
bool SymbolicGradientBuilder::IsPrimitiveOpWithNoGrad(const string& opname) {
ops::GradFunc grad_fn;
Status s = registry_->Lookup(opname, &grad_fn);
return s.ok() && (grad_fn == nullptr);
}
Status SymbolicGradientBuilder::CallGradFunction(
const Operation& op, const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
ops::GradFunc grad_fn;
TF_RETURN_IF_ERROR(registry_->Lookup(op.node()->type_string(), &grad_fn));
TF_RETURN_IF_ERROR(grad_fn(scope_, op, grad_inputs, grad_outputs));
TF_RETURN_IF_ERROR(scope_.status());
return absl::OkStatus();
}
Status SymbolicGradientBuilder::ProcessWhileLoop(Node* exit_node,
const Output& summed_grads) {
if (summed_grads == NoGradient()) {
return errors::Unimplemented(
"Missing gradient into while loop not yet implemented");
}
DCHECK(exit_node->IsExit());
WhileContext* while_ctx = exit_node->while_ctx();
DCHECK(while_ctx != nullptr);
std::map<Node*, Output>& backprops = while_backprops_[while_ctx];
DCHECK(backprops.find(exit_node) == backprops.end());
backprops[exit_node] = summed_grads;
if (backprops.size() < while_ctx->exit_nodes().size())
return absl::OkStatus();
Scope while_scope =
scope_.NewSubScope(strings::StrCat(while_ctx->frame_name(), "_grad"));
std::vector<Output> dy;
for (Node* n : while_ctx->exit_nodes()) dy.push_back(backprops[n]);
std::vector<Output> dx;
TF_RETURN_IF_ERROR(AddWhileLoopGradient(while_ctx, while_scope, dy, &dx));
DCHECK_EQ(dx.size(), while_ctx->enter_nodes().size());
for (int i = 0, end = dx.size(); i < end; ++i) {
Node* enter_node = while_ctx->enter_nodes()[i];
for (const Edge* e : enter_node->in_edges()) {
if (e->IsControlEdge()) continue;
TF_RETURN_IF_ERROR(BackpropAlongEdge(dx[i], {e->src(), e->src_output()}));
}
}
return absl::OkStatus();
}
Status SymbolicGradientBuilder::AddGradients() {
TF_RETURN_IF_ERROR(Initialize());
std::vector<Output> dy;
while (!ready_.empty()) {
Node* n = ready_.front();
ready_.pop_front();
const int num_y = n->num_outputs();
dy.clear();
dy.resize(num_y, {nullptr, 0});
std::vector<int> no_grad_dy_indices;
for (int i = 0; i < num_y; ++i) {
TF_RETURN_IF_ERROR(SumGradients({n, i}, &dy[i]));
if (dy[i] == NoGradient()) {
no_grad_dy_indices.push_back(i);
}
auto iter = input_nodes_.find({n, i});
if (iter != input_nodes_.end()) {
(*grad_outputs_)[iter->second] = dy[i];
}
}
bool stop_node = true;
for (const Edge* e : n->in_edges()) {
if (e->IsControlEdge()) continue;
if (backprops_.find({e->src(), e->src_output()}) != backprops_.end()) {
stop_node = false;
break;
}
}
if (stop_node) {
continue;
}
if (n->IsExit()) {
DCHECK_EQ(dy.size(), 1);
TF_RETURN_IF_ERROR(ProcessWhileLoop(n, dy[0]));
continue;
}
DCHECK(!n->IsEnter() && !n->IsNextIteration()) << n->DebugString();
const int num_no_grad = no_grad_dy_indices.size();
if (IsPrimitiveOpWithNoGrad(n->type_string()) || num_no_grad == num_y) {
for (const Edge* e : n->in_edges()) {
if (e->IsControlEdge()) continue;
TF_RETURN_IF_ERROR(
BackpropAlongEdge(NoGradient(), {e->src(), e->src_output()}));
}
continue;
}
if (num_no_grad > 0 && num_no_grad < num_y) {
for (const int dy_index : no_grad_dy_indices) {
dy[dy_index] = ops::ZerosLike(scope_, Output(n, dy_index));
}
}
std::vector<Output> dx;
TF_RETURN_IF_ERROR(CallGradFunction(Operation(n), dy, &dx));
for (const Edge* e : n->in_edges()) {
if (e->IsControlEdge()) continue;
size_t dx_index = e->dst_input();
if (dx_index >= dx.size()) {
return errors::Internal("Invalid gradient output index: ", dx_index,
" size: ", dx.size());
}
TF_RETURN_IF_ERROR(
BackpropAlongEdge(dx[dx_index], {e->src(), e->src_output()}));
}
}
std::unordered_map<Node*, int> requested_grads;
for (const Output& nout : inputs_) {
if (pending_[nout.node()->id()] > 0) {
DCHECK_GT(nout.node()->num_outputs(), 1);
int idx = input_nodes_[nout];
DCHECK(((*grad_outputs_)[idx].node() == nullptr));
TF_RETURN_IF_ERROR(SumGradients(nout, &(*grad_outputs_)[idx]));
++requested_grads[nout.node()];
}
}
for (const auto& p : requested_grads) {
int num_requested_inputs = p.first->num_outputs() - pending_[p.first->id()];
CHECK_EQ(num_requested_inputs, p.second);
}
return absl::OkStatus();
}
}
Status AddSymbolicGradients(const Scope& scope,
const std::vector<Output>& outputs,
const std::vector<Output>& inputs,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
SymbolicGradientBuilder builder(scope, ops::GradOpRegistry::Global(), outputs,
inputs, grad_inputs, grad_outputs);
return builder.AddGradients();
}
Status AddSymbolicGradients(const Scope& scope,
const std::vector<Output>& outputs,
const std::vector<Output>& inputs,
std::vector<Output>* grad_outputs) {
std::vector<Output> grad_inputs;
grad_inputs.reserve(outputs.size());
for (const Output& output : outputs) {
grad_inputs.emplace_back(ops::OnesLike(scope, output));
}
return AddSymbolicGradients(scope, outputs, inputs, grad_inputs,
grad_outputs);
}
Output NoGradient() { return SymbolicGradientBuilder::NoGradient(); }
} | #include "tensorflow/cc/framework/gradients.h"
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/grad_op_registry.h"
#include "tensorflow/cc/framework/testutil.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/equal_graph_def.h"
namespace tensorflow {
namespace {
using ops::Assign;
using ops::Const;
using ops::Identity;
using ops::MatMul;
using ops::OnesLike;
using ops::Placeholder;
using ops::Square;
using ops::Stack;
using ops::StopGradient;
using ops::Unstack;
using ops::Variable;
class GradientsTest : public ::testing::Test {
protected:
GradientsTest()
: scope_expected_(Scope::NewRootScope()),
scope_test_(Scope::NewRootScope()) {}
void CompareTestAndExpectedGraphs() {
GraphDef gdef_test;
TF_ASSERT_OK(scope_test_.ToGraphDef(&gdef_test));
GraphDef gdef_exp;
TF_ASSERT_OK(scope_expected_.ToGraphDef(&gdef_exp));
TF_EXPECT_GRAPH_EQ(gdef_exp, gdef_test);
}
Scope scope_expected_;
Scope scope_test_;
};
TEST_F(GradientsTest, OneMatMul) {
for (const bool expected : {false, true}) {
const Scope& scope = expected ? scope_expected_ : scope_test_;
auto x = Const(scope, {{1.0, 2.0}, {3.0, 4.0}});
auto y = Const(scope, {{1.0, 0.0}, {0.0, 1.0}});
auto z = MatMul(scope, x, y);
TF_ASSERT_OK(scope.status());
CHECK_NOTNULL(z.node());
if (expected) {
auto dz = Const(scope, {{1.0, 1.0}, {1.0, 1.0}});
auto dx = MatMul(scope, dz, y, MatMul::TransposeB(true));
auto dy = MatMul(scope, x, dz, MatMul::TransposeA(true));
} else {
auto dz = Const(scope, {{1.0, 1.0}, {1.0, 1.0}});
std::vector<Output> grad_outputs;
TF_ASSERT_OK(
AddSymbolicGradients(scope, {z}, {x, y}, {dz}, &grad_outputs));
}
}
CompareTestAndExpectedGraphs();
}
TEST_F(GradientsTest, OneMatMul_InferGradInputs) {
for (const bool expected : {false, true}) {
const Scope& scope = expected ? scope_expected_ : scope_test_;
auto x = Const(scope, {{1.0, 2.0}, {3.0, 4.0}});
auto y = Const(scope, {{1.0, 0.0}, {0.0, 1.0}});
auto z = MatMul(scope, x, y);
TF_ASSERT_OK(scope.status());
CHECK_NOTNULL(z.node());
if (expected) {
auto dz = OnesLike(scope, z);
auto dx = MatMul(scope, dz, y, MatMul::TransposeB(true));
auto dy = MatMul(scope, x, dz, MatMul::TransposeA(true));
} else {
std::vector<Output> grad_outputs;
TF_ASSERT_OK(AddSymbolicGradients(scope, {z}, {x, y}, &grad_outputs));
}
}
CompareTestAndExpectedGraphs();
}
TEST_F(GradientsTest, TwoMatMuls_Chained) {
for (const bool expected : {false, true}) {
const Scope& scope = expected ? scope_expected_ : scope_test_;
auto u = Const(scope, {{1.0, 2.0}, {3.0, 4.0}});
auto v = Const(scope, {{1.0, 0.0}, {0.0, 1.0}});
auto x = MatMul(scope, u, v);
auto y = Const(scope, {{1.0, 0.0}, {0.0, 1.0}});
auto z = MatMul(scope, x, y);
TF_ASSERT_OK(scope.status());
CHECK_NOTNULL(z.node());
if (expected) {
auto dz = Const(scope, {{1.0, 1.0}, {1.0, 1.0}});
auto dx = MatMul(scope, dz, y, MatMul::TransposeB(true));
auto dy = MatMul(scope, x, dz, MatMul::TransposeA(true));
auto du = MatMul(scope, dx, v, MatMul::TransposeB(true));
auto dv = MatMul(scope, u, dx, MatMul::TransposeA(true));
} else {
auto dz = Const(scope, {{1.0, 1.0}, {1.0, 1.0}});
std::vector<Output> grad_outputs;
TF_ASSERT_OK(
AddSymbolicGradients(scope, {z}, {u, v}, {dz}, &grad_outputs));
}
}
CompareTestAndExpectedGraphs();
}
TEST_F(GradientsTest, TwoMatMuls_Independent) {
for (const bool expected : {false, true}) {
const Scope& scope = expected ? scope_expected_ : scope_test_;
auto t = Const(scope, {{1.0, 2.0}, {3.0, 4.0}});
auto u = Const(scope, {{1.0, 0.0}, {0.0, 1.0}});
auto v = MatMul(scope, t, u);
TF_ASSERT_OK(scope.status());
CHECK_NOTNULL(v.node());
auto x = Const(scope, {{5.0, 6.0}, {7.0, 8.0}});
auto y = Const(scope, {{1.0, 0.0}, {0.0, 1.0}});
auto z = MatMul(scope, x, y);
TF_ASSERT_OK(scope.status());
CHECK_NOTNULL(z.node());
if (expected) {
auto dv = Const(scope, {{1.0, 1.0}, {1.0, 1.0}});
auto dt = MatMul(scope, dv, u, MatMul::TransposeB(true));
auto du = MatMul(scope, t, dv, MatMul::TransposeA(true));
auto dz = Const(scope, {{1.0, 1.0}, {1.0, 1.0}});
auto dx = MatMul(scope, dz, y, MatMul::TransposeB(true));
auto dy = MatMul(scope, x, dz, MatMul::TransposeA(true));
} else {
auto dv = Const(scope, {{1.0, 1.0}, {1.0, 1.0}});
auto dz = Const(scope, {{1.0, 1.0}, {1.0, 1.0}});
std::vector<Output> grad_outputs;
TF_ASSERT_OK(AddSymbolicGradients(scope, {v, z}, {t, u, x, y}, {dv, dz},
&grad_outputs));
}
}
CompareTestAndExpectedGraphs();
}
TEST_F(GradientsTest, StackUnstack_Chained) {
for (const bool expected : {false, true}) {
const Scope& scope = expected ? scope_expected_ : scope_test_;
auto a = Const(scope, 1, {4, 2});
auto b = Const(scope, 2, {4, 2});
auto c = Const(scope, 3, {4, 2});
auto pack = Stack(scope, {a, b, c});
auto unpack = Unstack(scope, pack.output, 3);
TF_ASSERT_OK(scope.status());
auto dx = Const(scope, 4, {4, 2});
auto dy = Const(scope, 5, {4, 2});
auto dz = Const(scope, 6, {4, 2});
if (expected) {
auto unpack_grad = Stack(scope, {dx, dy, dz});
auto pack_grad = Unstack(scope, unpack_grad.output, 3);
} else {
std::vector<Output> grad_outputs;
TF_ASSERT_OK(AddSymbolicGradients(scope, unpack.output, {a, b, c},
{dx, dy, dz}, &grad_outputs));
}
}
CompareTestAndExpectedGraphs();
}
TEST_F(GradientsTest, StackUnstack_StopBackprop) {
for (const bool expected : {false, true}) {
const Scope& scope = expected ? scope_expected_ : scope_test_;
auto a = Const(scope, 1, {4, 2});
auto b = Const(scope, 2, {4, 2});
auto c = Const(scope, 3, {4, 2});
auto pack = Stack(scope, {a, b, c});
auto unpack = Unstack(scope, pack.output, 3);
TF_ASSERT_OK(scope.status());
auto dx = Const(scope, 4, {4, 2});
auto dy = Const(scope, 5, {4, 2});
auto dz = Const(scope, 6, {4, 2});
if (expected) {
auto unpack_grad = Stack(scope, {dx, dy, dz});
} else {
std::vector<Output> grad_outputs;
TF_ASSERT_OK(AddSymbolicGradients(scope, unpack.output, {pack},
{dx, dy, dz}, &grad_outputs));
}
}
CompareTestAndExpectedGraphs();
}
TEST_F(GradientsTest, StackUnstack_SubsetOfUnstackOutputs) {
for (const bool expected : {false, true}) {
const Scope& scope = expected ? scope_expected_ : scope_test_;
auto c = Const(scope, 1, {3, 4, 2});
auto unpack = Unstack(scope, c, 3);
auto x = Identity(scope, unpack.output[0]);
auto y = Identity(scope, unpack.output[1]);
auto z = Identity(scope, unpack.output[2]);
TF_ASSERT_OK(scope.status());
auto dy = Const(scope, 4, {4, 2});
auto dz = Const(scope, 5, {4, 2});
if (expected) {
auto g1 = Identity(scope, dy);
auto g2 = Identity(scope, dz);
} else {
std::vector<Output> grad_outputs;
TF_ASSERT_OK(AddSymbolicGradients(scope, {y, z},
{unpack.output[1], unpack.output[2]},
{dy, dz}, &grad_outputs));
ASSERT_EQ(grad_outputs.size(), 2);
EXPECT_TRUE(grad_outputs[0].node() != nullptr);
EXPECT_TRUE(grad_outputs[1].node() != nullptr);
}
}
CompareTestAndExpectedGraphs();
}
TEST_F(GradientsTest, DependentGradOutputs) {
auto u = Const(scope_test_, {{2}});
auto v = Const(scope_test_, {{3}});
auto x = MatMul(scope_test_, u, v);
auto y = Const(scope_test_, {{4}});
auto z = MatMul(scope_test_, x, y);
TF_ASSERT_OK(scope_test_.status());
CHECK_NOTNULL(z.node());
auto dz = Const(scope_test_, {{5}});
std::vector<Output> grad_outputs;
TF_ASSERT_OK(
AddSymbolicGradients(scope_test_, {z}, {v, x}, {dz}, &grad_outputs));
std::vector<Tensor> outputs;
test::GetTensors(scope_test_, {grad_outputs[0], grad_outputs[1]}, &outputs);
test::ExpectTensorEqual<int>(outputs[0], test::AsTensor<int>({40}, {1, 1}));
test::ExpectTensorEqual<int>(outputs[1], test::AsTensor<int>({20}, {1, 1}));
}
TEST_F(GradientsTest, MultipleNodeOutputGrads) {
auto x = Const(scope_test_, 1, {3, 4, 2});
auto unpack = Unstack(scope_test_, x, 3);
auto pack = Stack(scope_test_, unpack.output);
auto dx = Const(scope_test_, {40, 41, 42, 43, 44, 45, 46, 47,
50, 51, 52, 53, 55, 55, 56, 57,
60, 61, 62, 63, 66, 66, 66, 67},
{3, 4, 2});
std::vector<Output> grad_outputs;
TF_ASSERT_OK(AddSymbolicGradients(scope_test_, {pack}, unpack.output, {dx},
&grad_outputs));
std::vector<Tensor> outputs;
test::GetTensors(scope_test_,
{grad_outputs[0], grad_outputs[1], grad_outputs[2]},
&outputs);
test::ExpectTensorEqual<int>(
outputs[0],
test::AsTensor<int>({40, 41, 42, 43, 44, 45, 46, 47}, {4, 2}));
test::ExpectTensorEqual<int>(
outputs[1],
test::AsTensor<int>({50, 51, 52, 53, 55, 55, 56, 57}, {4, 2}));
test::ExpectTensorEqual<int>(
outputs[2],
test::AsTensor<int>({60, 61, 62, 63, 66, 66, 66, 67}, {4, 2}));
}
TEST_F(GradientsTest, UnreachableEdgeGradOneOutput) {
auto x = Variable(scope_test_, {2, 3}, DT_DOUBLE);
auto x_const = Const(scope_test_, {{1.0, 2.0, 3.0}, {4.0, 5.0, 6.0}});
auto x_assign = Assign(scope_test_, x, x_const);
auto y = Variable(scope_test_, {3, 1}, DT_DOUBLE);
auto y_const = Const(scope_test_, {{1.0}, {2.0}, {3.0}});
auto y_assign = Assign(scope_test_, y, y_const);
auto m = MatMul(scope_test_, x, y);
auto z = Variable(scope_test_, {1, 3}, DT_DOUBLE);
auto z_const = Const(scope_test_, {{9.0, 10.0, 11.0}});
auto z_assign = Assign(scope_test_, z, z_const);
auto diff_m = Const(scope_test_, {{0.5}, {0.5}});
std::vector<Output> grad_outputs;
TF_ASSERT_OK(
AddSymbolicGradients(scope_test_, {m}, {y}, {diff_m}, &grad_outputs));
std::vector<Tensor> outputs;
test::GetTensors(scope_test_, {x_assign, y_assign, z_assign},
{grad_outputs[0]}, &outputs);
test::ExpectTensorNear<double>(
outputs[0], test::AsTensor<double>({2.5, 3.5, 4.5}, {3, 1}), 1e-5);
}
TEST_F(GradientsTest, UnreachableEdgeGradTwoOutputs) {
auto x = Variable(scope_test_, {2, 3}, DT_DOUBLE);
auto x_const = Const(scope_test_, {{1.0, 2.0, 3.0}, {4.0, 5.0, 6.0}});
auto x_assign = Assign(scope_test_, x, x_const);
auto y = Variable(scope_test_, {3, 1}, DT_DOUBLE);
auto y_const = Const(scope_test_, {{1.0}, {2.0}, {3.0}});
auto y_assign = Assign(scope_test_, y, y_const);
auto m1 = MatMul(scope_test_, x, y);
auto z = Variable(scope_test_, {1, 3}, DT_DOUBLE);
auto z_const = Const(scope_test_, {{9.0, 10.0, 11.0}});
auto z_assign = Assign(scope_test_, z, z_const);
auto m2 = MatMul(scope_test_, y, z);
auto dm1 = Const(scope_test_, {{0.5}, {0.5}});
auto dm2 =
Const(scope_test_, {{0.5, 0.5, 0.5}, {0.6, 0.7, 0.8}, {0.6, 0.7, 0.9}});
std::vector<Output> grad_outputs;
TF_ASSERT_OK(AddSymbolicGradients(scope_test_, {m1, m2}, {y}, {dm1, dm2},
&grad_outputs));
std::vector<Tensor> outputs;
test::GetTensors(scope_test_, {x_assign, y_assign, z_assign},
{grad_outputs[0]}, &outputs);
test::ExpectTensorNear<double>(
outputs[0], test::AsTensor<double>({17.5, 24.7, 26.8}, {3, 1}), 1e-5);
}
TEST_F(GradientsTest, UnreachableInput) {
auto x = Const(scope_test_, {{1.0, 2.0, 3.0}, {4.0, 5.0, 6.0}});
auto y = Const(scope_test_, {{1.0}, {2.0}, {3.0}});
auto z = Const(scope_test_.WithOpName("z"), {{9.0, 10.0, 11.0}});
auto m1 = MatMul(scope_test_, x, y);
auto m2 = MatMul(scope_test_, y, z);
auto dm1 = Const(scope_test_, {{0.5}, {0.5}});
std::vector<Output> grad_outputs;
Status status =
AddSymbolicGradients(scope_test_, {m1}, {z}, {dm1}, &grad_outputs);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
EXPECT_EQ(status.message(),
"Cannot compute the partial derivative"
" for node 'z' as it's unreachable from the output node(s).");
}
TEST_F(GradientsTest, DependentOutputs) {
auto x = Placeholder(scope_test_, DT_FLOAT);
auto y0 = Square(scope_test_, x);
auto y1 = Square(scope_test_, y0);
auto y2 = Square(scope_test_, y1);
std::vector<Output> grad_outputs;
TF_EXPECT_OK(AddSymbolicGradients(scope_test_, {y0, y2}, {x}, &grad_outputs));
ClientSession session(scope_test_);
std::vector<Tensor> grad_result;
TF_EXPECT_OK(session.Run({{x, {3.0f}}}, grad_outputs, &grad_result));
EXPECT_EQ(grad_result.size(), 1);
EXPECT_EQ(grad_result[0].NumElements(), 1);
EXPECT_EQ(grad_result[0].flat<float>()(0), 17502.0f);
}
TEST_F(GradientsTest, MultiOutputNodeDependentOutputs) {
auto x = Placeholder(scope_test_, DT_FLOAT);
auto y0 = Square(scope_test_, x);
auto y1 = Square(scope_test_, y0);
auto y2 = Square(scope_test_, y0);
auto y3 = Square(scope_test_, y2);
std::vector<Output> grad_outputs;
TF_EXPECT_OK(
AddSymbolicGradients(scope_test_, {y0, y1, y3}, {x}, &grad_outputs));
ClientSession session(scope_test_);
std::vector<Tensor> grad_result;
TF_EXPECT_OK(session.Run({{x, {3.0f}}}, grad_outputs, &grad_result));
EXPECT_EQ(grad_result.size(), 1);
EXPECT_EQ(grad_result[0].NumElements(), 1);
EXPECT_EQ(grad_result[0].flat<float>()(0), 17610.0f);
}
TEST_F(GradientsTest, AddSymbolicGradientsTest) {
Scope scope = Scope::NewRootScope();
for (int cnt = 0; cnt < 100; ++cnt) {
int N = 5 + rand() % 10;
OutputList inputs;
for (int i = 0; i < N; ++i) {
auto a = Const(scope, i, {1});
inputs.push_back(a);
}
auto pack = Stack(scope, inputs);
TF_ASSERT_OK(scope.status());
OutputList output_grads;
Tensor ts(DT_INT32, {N, 1});
auto v = ts.matrix<int32>();
for (int i = 0; i < N; ++i) {
v(i, 0) = i;
}
auto dy = Const(scope, ts);
output_grads.push_back(dy);
std::vector<Output> grad_outputs;
TF_ASSERT_OK(AddSymbolicGradients(scope, {pack.output}, inputs,
output_grads, &grad_outputs));
ClientSession session((scope));
std::vector<Tensor> in_grad;
TF_ASSERT_OK(session.Run(grad_outputs, &in_grad));
for (int i = 0; i < N; ++i) {
test::ExpectTensorEqual<int>(in_grad[i], test::AsTensor<int>({i}, {1}));
}
}
}
class StopGradientSingleOutputMultiEdgeTest : public ::testing::Test {
protected:
StopGradientSingleOutputMultiEdgeTest() : scope_(Scope::NewRootScope()) {}
void CheckGrad(const std::vector<bool>& stop_outputs,
const Tensor& expected_grad) {
CHECK_EQ(3, stop_outputs.size());
auto x = Const(scope_, {{1, 0}, {0, 1}});
auto y = Const(scope_, {{1, 0}, {0, 1}});
auto z = MatMul(scope_, x, y);
auto out0 = stop_outputs[0]
? StopGradient(scope_, (Identity(scope_, z))).output
: Identity(scope_, z).output;
auto out1 = stop_outputs[1]
? StopGradient(scope_, (Identity(scope_, z))).output
: Identity(scope_, z).output;
auto out2 = stop_outputs[2]
? StopGradient(scope_, (Identity(scope_, z))).output
: Identity(scope_, z).output;
auto g0 = Const(scope_, {{1, 2}, {3, 4}});
auto g1 = Const(scope_, {{5, 6}, {7, 8}});
auto g2 = Const(scope_, {{9, 10}, {11, 12}});
std::vector<Output> grad_outputs;
TF_EXPECT_OK(AddSymbolicGradients(scope_, {out0, out1, out2}, {z},
{g0, g1, g2}, &grad_outputs));
if (expected_grad.NumElements() > 0) {
Tensor output;
test::GetTensor(scope_, grad_outputs[0], &output);
test::ExpectTensorEqual<int>(output, expected_grad);
} else {
EXPECT_EQ(NoGradient(), grad_outputs[0]);
}
}
Scope scope_;
};
TEST_F(StopGradientSingleOutputMultiEdgeTest, ValidGradAllEdges) {
CheckGrad({false, false, false},
test::AsTensor<int>({15, 18, 21, 24}, {2, 2}));
}
TEST_F(StopGradientSingleOutputMultiEdgeTest, StopGradFirstEdge) {
CheckGrad({true, false, false},
test::AsTensor<int>({14, 16, 18, 20}, {2, 2}));
}
TEST_F(StopGradientSingleOutputMultiEdgeTest, StopGradSecondEdge) {
CheckGrad({false, true, false},
test::AsTensor<int>({10, 12, 14, 16}, {2, 2}));
}
TEST_F(StopGradientSingleOutputMultiEdgeTest, StopGradThirdEdge) {
CheckGrad({false, false, true}, test::AsTensor<int>({6, 8, 10, 12}, {2, 2}));
}
TEST_F(StopGradientSingleOutputMultiEdgeTest, StopGradFirstAndSecondEdges) {
CheckGrad({true, true, false}, test::AsTensor<int>({9, 10, 11, 12}, {2, 2}));
}
TEST_F(StopGradientSingleOutputMultiEdgeTest, StopGradSecondAndThirdEdges) {
CheckGrad({false, true, true}, test::AsTensor<int>({1, 2, 3, 4}, {2, 2}));
}
TEST_F(StopGradientSingleOutputMultiEdgeTest, StopGradFirstAndThirdEdges) {
CheckGrad({true, false, true}, test::AsTensor<int>({5, 6, 7, 8}, {2, 2}));
}
TEST_F(StopGradientSingleOutputMultiEdgeTest, StopGradAllEdges) {
CheckGrad({true, true, true}, Tensor());
}
class StopGradientMultiOutputTest : public ::testing::Test {
protected:
StopGradientMultiOutputTest() : scope_(Scope::NewRootScope()) {}
void CheckGrad(const std::vector<bool>& stop_outputs,
const Tensor& expected_grad) {
CHECK_EQ(3, stop_outputs.size());
auto x = ops::Const(scope_, 1, {3, 2, 4});
auto y = Unstack(scope_, x, 3);
TF_ASSERT_OK(scope_.status());
auto out0 =
stop_outputs[0] ? StopGradient(scope_, y.output[0]) : y.output[0];
auto out1 =
stop_outputs[1] ? StopGradient(scope_, y.output[1]) : y.output[1];
auto out2 =
stop_outputs[2] ? StopGradient(scope_, y.output[2]) : y.output[2];
auto g0 = Const(scope_, {1, 2, 3, 4, 5, 6, 7, 8}, {2, 4});
auto g1 = Const(scope_, {9, 10, 11, 12, 13, 14, 15, 16}, {2, 4});
auto g2 = Const(scope_, {17, 18, 19, 20, 21, 22, 23, 24}, {2, 4});
std::vector<Output> grad_outputs;
TF_EXPECT_OK(AddSymbolicGradients(scope_, {out0, out1, out2}, {x},
{g0, g1, g2}, &grad_outputs));
if (expected_grad.NumElements() > 0) {
Tensor output;
test::GetTensor(scope_, grad_outputs[0], &output);
test::ExpectTensorEqual<int>(output, expected_grad);
} else {
EXPECT_EQ(NoGradient(), grad_outputs[0]);
}
}
Scope scope_;
};
TEST_F(StopGradientMultiOutputTest, ValidGradAllOutputs) {
CheckGrad({false, false, false}, test::AsTensor<int>(
{1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24},
{3, 2, 4}));
}
TEST_F(StopGradientMultiOutputTest, StopGradFirstOutput) {
CheckGrad({true, false, false}, test::AsTensor<int>(
{0, 0, 0, 0, 0, 0, 0, 0,
9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24},
{3, 2, 4}));
}
TEST_F(StopGradientMultiOutputTest, StopGradSecondOutput) {
CheckGrad({false, true, false}, test::AsTensor<int>(
{1, 2, 3, 4, 5, 6, 7, 8,
0, 0, 0, 0, 0, 0, 0, 0,
17, 18, 19, 20, 21, 22, 23, 24},
{3, 2, 4}));
}
TEST_F(StopGradientMultiOutputTest, StopGradThirdOutput) {
CheckGrad({false, false, true}, test::AsTensor<int>(
{1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16,
0, 0, 0, 0, 0, 0, 0, 0},
{3, 2, 4}));
}
TEST_F(StopGradientMultiOutputTest, StopGradFirstAndThirdOutputs) {
CheckGrad({true, false, true}, test::AsTensor<int>(
{0, 0, 0, 0, 0, 0, 0, 0,
9, 10, 11, 12, 13, 14, 15, 16,
0, 0, 0, 0, 0, 0, 0, 0},
{3, 2, 4}));
}
TEST_F(StopGradientMultiOutputTest, StopAllOutputs) {
CheckGrad({true, true, true}, Tensor());
}
}
} |
1,268 | cpp | tensorflow/tensorflow | dlpack | third_party/xla/xla/python/dlpack.cc | tensorflow/c/eager/dlpack_test.cc | #ifndef XLA_PYTHON_DLPACK_H_
#define XLA_PYTHON_DLPACK_H_
#include <cstdint>
#include <optional>
#include "absl/status/statusor.h"
#include "third_party/nanobind/include/nanobind/nanobind.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/nb_class_ptr.h"
#include "xla/python/py_client.h"
namespace xla {
absl::StatusOr<nanobind::capsule> BufferToDLPackManagedTensor(
nanobind::handle buffer, std::optional<std::intptr_t> stream);
absl::StatusOr<nanobind::object> DLPackManagedTensorToBuffer(
const nanobind::capsule& tensor,
std::optional<nb_class_ptr<PyClient>> cpu_client,
std::optional<nb_class_ptr<PyClient>> gpu_client);
absl::StatusOr<nanobind::object> DLPackManagedTensorToBuffer(
const nanobind::capsule& tensor, ifrt::Device* device,
nb_class_ptr<PyClient> client, std::optional<std::intptr_t> stream);
}
#endif
#include "xla/python/dlpack.h"
#include <Python.h>
#include <cstdint>
#include <functional>
#include <memory>
#include <numeric>
#include <optional>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "include/dlpack/dlpack.h"
#include "llvm/Support/Casting.h"
#include "third_party/nanobind/include/nanobind/nanobind.h"
#include "xla/layout.h"
#include "xla/pjrt/exceptions.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/pjrt/pjrt_compiler.h"
#include "xla/pjrt/pjrt_layout.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/nb_class_ptr.h"
#include "xla/python/pjrt_ifrt/pjrt_array.h"
#include "xla/python/pjrt_ifrt/pjrt_client.h"
#include "xla/python/pjrt_ifrt/pjrt_device.h"
#include "xla/python/py_array.h"
#include "xla/python/py_client.h"
#include "xla/python/python_ref_manager.h"
#include "xla/python/traceback.h"
#include "xla/python/types.h"
#include "xla/python/util.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace nb = nanobind;
namespace xla {
namespace {
const char* const kDlTensorCapsuleName = "dltensor";
struct DLPackTensor {
~DLPackTensor();
nb::object buffer_reference;
std::unique_ptr<PjRtBuffer::ExternalReference> external_reference;
std::vector<int64_t> shape;
std::vector<int64_t> strides;
DLManagedTensor tensor;
};
DLPackTensor::~DLPackTensor() {
if (buffer_reference) {
GlobalPyRefManager()->AddGarbage(
absl::MakeSpan(&buffer_reference, 1));
}
}
void DLPackTensorDeleter(DLManagedTensor* t) {
if (t) {
delete static_cast<DLPackTensor*>(t->manager_ctx);
}
}
absl::StatusOr<DLDataType> PrimitiveTypeToDLDataType(PrimitiveType type) {
switch (type) {
case S8:
return DLDataType{kDLInt, 8, 1};
case S16:
return DLDataType{kDLInt, 16, 1};
case S32:
return DLDataType{kDLInt, 32, 1};
case S64:
return DLDataType{kDLInt, 64, 1};
case U8:
return DLDataType{kDLUInt, 8, 1};
case U16:
return DLDataType{kDLUInt, 16, 1};
case U32:
return DLDataType{kDLUInt, 32, 1};
case U64:
return DLDataType{kDLUInt, 64, 1};
case F16:
return DLDataType{kDLFloat, 16, 1};
case F32:
return DLDataType{kDLFloat, 32, 1};
case F64:
return DLDataType{kDLFloat, 64, 1};
case BF16:
return DLDataType{kDLBfloat, 16, 1};
case PRED:
return DLDataType{kDLBool, 8, 1};
case C64:
return DLDataType{kDLComplex, 64, 1};
case C128:
return DLDataType{kDLComplex, 128, 1};
default:
return Unimplemented("XLA type %s has no DLPack equivalent",
PrimitiveType_Name(type));
}
}
absl::StatusOr<PrimitiveType> DLDataTypeToPrimitiveType(DLDataType type) {
if (type.lanes != 1) {
return Unimplemented("DLPack types with lanes != 1 not implemented, got %d",
type.lanes);
}
switch (type.code) {
case kDLBool:
switch (type.bits) {
case 8:
return PRED;
default:
return Unimplemented(
"Only 8-bit DLPack booleans are supported, got %d bits",
type.bits);
}
case kDLInt:
switch (type.bits) {
case 8:
return S8;
case 16:
return S16;
case 32:
return S32;
case 64:
return S64;
default:
return Unimplemented(
"Invalid or unsupported DLPack integer width: %d bits",
type.bits);
}
case kDLUInt:
switch (type.bits) {
case 8:
return U8;
case 16:
return U16;
case 32:
return U32;
case 64:
return U64;
default:
return Unimplemented(
"Invalid or unsupported DLPack unsigned integer width: %d bits",
type.bits);
}
case kDLFloat:
switch (type.bits) {
case 16:
return F16;
case 32:
return F32;
case 64:
return F64;
default:
return Unimplemented(
"Invalid or unsupported DLPack float width: %d bits", type.bits);
}
case kDLBfloat:
switch (type.bits) {
case 16:
return BF16;
default:
return Unimplemented(
"Invalid or unsupported DLPack Bfloat width: %d bits", type.bits);
}
case kDLComplex:
switch (type.bits) {
case 64:
return C64;
case 128:
return C128;
default:
return Unimplemented(
"Invalid or unsupported DLPack complex width: %d bits",
type.bits);
}
default:
return Unimplemented("Unknown or invalid DLPack type code %d", type.code);
}
}
absl::StatusOr<std::vector<int64_t>> StridesToLayout(
absl::Span<int64_t const> dims, absl::Span<int64_t const> strides) {
CHECK_EQ(dims.size(), strides.size());
std::vector<int64_t> minor_to_major(dims.size());
std::iota(minor_to_major.begin(), minor_to_major.end(), 0);
absl::c_sort(minor_to_major, [&](int a, int b) {
if (strides[a] < strides[b]) {
return true;
}
if (strides[a] > strides[b]) {
return false;
}
return b < a;
});
int64_t stride = 1;
for (int64_t d : minor_to_major) {
if (dims[d] > 1 && strides[d] != stride) {
return Unimplemented(
"Only DLPack tensors with trivial (compact) striding are supported; "
"i.e., tensors whose striding represents a transposition of the "
"underlying buffer but not broadcasting. Dimensions were: [%s], "
"strides were [%s].",
absl::StrJoin(dims, ","), absl::StrJoin(strides, ","));
}
stride *= dims[d];
}
return minor_to_major;
}
absl::StatusOr<DLDeviceType> DLDeviceTypeForDevice(const PjRtDevice& device) {
if (device.client()->platform_id() == CpuId()) {
return kDLCPU;
} else if (device.client()->platform_id() == CudaId()) {
return kDLCUDA;
} else if (device.client()->platform_id() == RocmId()) {
return kDLROCM;
}
return InvalidArgument("Device %s cannot be used as a DLPack device.",
device.DebugString());
}
absl::StatusOr<DLDevice> DLDeviceForDevice(const PjRtDevice& device) {
DLDevice context;
TF_ASSIGN_OR_RETURN(context.device_type, DLDeviceTypeForDevice(device));
context.device_id = device.local_hardware_id();
return context;
}
absl::StatusOr<PjRtDevice*> DeviceForDLDevice(const PjRtClient* cpu_client,
const PjRtClient* gpu_client,
const DLDevice& context) {
switch (context.device_type) {
case kDLCPU:
if (cpu_client == nullptr) {
return InvalidArgument(
"DLPack tensor is on CPU, but no CPU backend was provided.");
}
TF_RET_CHECK(cpu_client->platform_id() == CpuId());
return cpu_client->LookupAddressableDevice(
xla::PjRtLocalDeviceId(context.device_id));
case kDLCUDA:
if (gpu_client == nullptr) {
return InvalidArgument(
"DLPack tensor is on GPU, but no GPU backend was provided.");
}
TF_RET_CHECK(gpu_client->platform_id() == CudaId());
return gpu_client->LookupAddressableDevice(
xla::PjRtLocalDeviceId(context.device_id));
case kDLROCM:
if (gpu_client == nullptr) {
return InvalidArgument(
"DLPack tensor is on GPU, but no GPU backend was provided.");
}
TF_RET_CHECK(gpu_client->platform_id() == RocmId());
return gpu_client->LookupAddressableDevice(
xla::PjRtLocalDeviceId(context.device_id));
default:
return InvalidArgument("Unknown/unsupported DLPack device type %d",
context.device_type);
}
}
}
absl::StatusOr<nb::capsule> BufferToDLPackManagedTensor(
nb::handle py_buffer, std::optional<std::intptr_t> stream) {
ifrt::Array* ifrt_array = nb::cast<xla::PyArray>(py_buffer).ifrt_array();
if (ifrt_array == nullptr) {
return Unimplemented(
"BufferToDLPackManagedTensor called on deleted array.");
}
auto* arr = llvm::dyn_cast_or_null<ifrt::PjRtCompatibleArray>(ifrt_array);
if (arr == nullptr) {
throw XlaRuntimeError(
"This operation is implemented for a PjRt-compatible backend only.");
}
PjRtBuffer* pjrt_buffer = arr->pjrt_buffers().front().get();
if (pjrt_buffer->IsTuple()) {
return Unimplemented(
"BufferToDLPackManagedTensor is not implemented for tuple "
"buffers.");
}
if (pjrt_buffer->has_dynamic_dimensions()) {
return Unimplemented("DynamicShape is not implemented in DLPack.");
}
auto pack = std::make_unique<DLPackTensor>();
DLTensor& dt = pack->tensor.dl_tensor;
{
GlobalPyRefManager()->CollectGarbage();
nb::gil_scoped_release gil_release;
TF_ASSIGN_OR_RETURN(pack->external_reference,
pjrt_buffer->AcquireExternalReference());
if (stream) {
TF_RETURN_IF_ERROR(
pack->external_reference->WaitUntilBufferReadyOnStream(*stream));
} else {
TF_RETURN_IF_ERROR(
AwaitBuffersReady(absl::MakeConstSpan(&ifrt_array, 1)));
}
}
pack->buffer_reference = nb::borrow<nb::object>(py_buffer);
dt.data = pack->external_reference->OpaqueDeviceMemoryDataPointer();
pack->tensor.manager_ctx = pack.get();
pack->tensor.deleter = DLPackTensorDeleter;
TF_ASSIGN_OR_RETURN(dt.device, DLDeviceForDevice(*pjrt_buffer->device()));
dt.device.device_id = pjrt_buffer->device()->local_hardware_id();
dt.ndim = pjrt_buffer->dimensions().size();
TF_ASSIGN_OR_RETURN(dt.dtype,
PrimitiveTypeToDLDataType(pjrt_buffer->element_type()));
pack->shape = std::vector<int64_t>(pjrt_buffer->dimensions().begin(),
pjrt_buffer->dimensions().end());
Layout xla_layout = GetXlaLayoutUnsafe(pjrt_buffer->layout());
pack->strides = StridesForShape(pjrt_buffer->element_type(),
pjrt_buffer->dimensions(), xla_layout);
dt.shape = reinterpret_cast<std::int64_t*>(pack->shape.data());
dt.strides = reinterpret_cast<std::int64_t*>(pack->strides.data());
dt.byte_offset = 0;
nb::capsule capsule = nb::steal<nb::capsule>(
PyCapsule_New(&pack.release()->tensor, kDlTensorCapsuleName,
[](PyObject* obj) noexcept {
DLManagedTensor* dlmt = static_cast<DLManagedTensor*>(
PyCapsule_GetPointer(obj, kDlTensorCapsuleName));
if (dlmt) {
DLPackTensorDeleter(dlmt);
} else {
PyErr_Clear();
}
}));
if (!capsule.ptr()) {
throw nb::python_error();
}
return capsule;
}
absl::StatusOr<nb::object> DLPackManagedTensorToBuffer(
const nb::capsule& tensor, std::optional<nb_class_ptr<PyClient>> cpu_client,
std::optional<nb_class_ptr<PyClient>> gpu_client) {
auto* cpu_pjrt_client = cpu_client ? (*cpu_client)->pjrt_client() : nullptr;
auto* gpu_pjrt_client = gpu_client ? (*gpu_client)->pjrt_client() : nullptr;
if (std::string_view(tensor.name()) != kDlTensorCapsuleName) {
return InvalidArgument(
"DLPack tensor must be a capsule with name \"dltensor\", got \"%s\". "
"Note that a DLPack tensor may be consumed at most once.",
std::string_view(tensor.name()));
}
DLManagedTensor* dlmt = static_cast<DLManagedTensor*>(tensor.data());
if (dlmt->dl_tensor.ndim < 0) {
return InvalidArgument(
"Number of dimensions in DLManagedTensor must be nonnegative, got %d",
dlmt->dl_tensor.ndim);
}
TF_ASSIGN_OR_RETURN(PjRtDevice * device,
DeviceForDLDevice(cpu_client ? cpu_pjrt_client : nullptr,
gpu_client ? gpu_pjrt_client : nullptr,
dlmt->dl_tensor.device));
absl::Span<int64_t const> dimensions(
reinterpret_cast<int64_t*>(dlmt->dl_tensor.shape), dlmt->dl_tensor.ndim);
TF_ASSIGN_OR_RETURN(PrimitiveType element_type,
DLDataTypeToPrimitiveType(dlmt->dl_tensor.dtype));
std::vector<int64_t> minor_to_major;
if (dlmt->dl_tensor.strides &&
absl::c_find(dimensions, 0) == dimensions.end()) {
absl::Span<int64_t const> strides(
reinterpret_cast<int64_t*>(dlmt->dl_tensor.strides),
dlmt->dl_tensor.ndim);
TF_ASSIGN_OR_RETURN(minor_to_major, StridesToLayout(dimensions, strides));
} else {
minor_to_major.resize(dlmt->dl_tensor.ndim);
std::iota(minor_to_major.rbegin(), minor_to_major.rend(), 0);
}
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(element_type, dimensions,
minor_to_major);
TF_ASSIGN_OR_RETURN(Layout default_layout, device->client()->GetDefaultLayout(
element_type, dimensions));
if (shape.layout() != default_layout) {
return Unimplemented(
"from_dlpack got array with non-default layout with minor-to-major "
"dimensions (%s), expected (%s)",
absl::StrJoin(shape.layout().minor_to_major(), ","),
absl::StrJoin(default_layout.minor_to_major(), ","));
}
std::function<void()> on_delete_callback;
if (dlmt->deleter) {
on_delete_callback = [dlmt]() { dlmt->deleter(dlmt); };
}
TF_ASSIGN_OR_RETURN(auto pjrt_buffer,
device->client()->CreateViewOfDeviceBuffer(
static_cast<char*>(dlmt->dl_tensor.data) +
dlmt->dl_tensor.byte_offset,
shape, device, on_delete_callback));
PyCapsule_SetName(tensor.ptr(), "used_dltensor");
PyCapsule_SetDestructor(tensor.ptr(), nullptr);
auto client = (cpu_client && device->client() == cpu_pjrt_client)
? std::move(*cpu_client)
: std::move(*gpu_client);
auto* ifrt_client =
llvm::dyn_cast_or_null<ifrt::PjRtCompatibleClient>(client->ifrt_client());
if (ifrt_client == nullptr) {
throw XlaRuntimeError(
"This operation is implemented for a PjRt-compatible backend only.");
}
TF_ASSIGN_OR_RETURN(auto ifrt_array,
ifrt_client->CreatePjRtArray(std::move(pjrt_buffer)));
return PyArray::MakeFromSingleDeviceArray(std::move(client), Traceback::Get(),
std::move(ifrt_array), false, true);
}
absl::StatusOr<nb::object> DLPackManagedTensorToBuffer(
const nb::capsule& tensor, ifrt::Device* ifrt_device,
nb_class_ptr<PyClient> client, std::optional<std::intptr_t> stream) {
ifrt::PjRtDevice* device =
llvm::dyn_cast_or_null<ifrt::PjRtDevice>(ifrt_device);
if (device == nullptr) {
throw XlaRuntimeError(
"DLPack is supported for PjRt-compatible backends only.");
}
if (!device->IsAddressable()) {
throw XlaRuntimeError(
"DLPack is only supported for devices addressable by the current "
"process.");
}
if (std::string_view(tensor.name()) != kDlTensorCapsuleName) {
return InvalidArgument(
"DLPack tensor must be a capsule with name \"dltensor\", got \"%s\". "
"Note that a DLPack tensor may be consumed at most once.",
std::string_view(tensor.name()));
}
DLManagedTensor* dlmt = static_cast<DLManagedTensor*>(tensor.data());
if (dlmt->dl_tensor.ndim < 0) {
return InvalidArgument(
"Number of dimensions in DLManagedTensor must be nonnegative, got %d",
dlmt->dl_tensor.ndim);
}
absl::Span<int64_t const> dimensions(
reinterpret_cast<int64_t*>(dlmt->dl_tensor.shape), dlmt->dl_tensor.ndim);
TF_ASSIGN_OR_RETURN(PrimitiveType element_type,
DLDataTypeToPrimitiveType(dlmt->dl_tensor.dtype));
std::vector<int64_t> minor_to_major;
if (dlmt->dl_tensor.strides &&
absl::c_find(dimensions, 0) == dimensions.end()) {
absl::Span<int64_t const> strides(
reinterpret_cast<int64_t*>(dlmt->dl_tensor.strides),
dlmt->dl_tensor.ndim);
TF_ASSIGN_OR_RETURN(minor_to_major, StridesToLayout(dimensions, strides));
} else {
minor_to_major.resize(dlmt->dl_tensor.ndim);
std::iota(minor_to_major.rbegin(), minor_to_major.rend(), 0);
}
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(element_type, dimensions,
minor_to_major);
std::function<void()> on_delete_callback;
if (dlmt->deleter) {
on_delete_callback = [dlmt]() { dlmt->deleter(dlmt); };
}
TF_ASSIGN_OR_RETURN(
auto pjrt_buffer,
device->pjrt_device()->client()->CreateViewOfDeviceBuffer(
static_cast<char*>(dlmt->dl_tensor.data) +
dlmt->dl_tensor.byte_offset,
shape, device->pjrt_device(), on_delete_callback, stream));
PyCapsule_SetName(tensor.ptr(), "used_dltensor");
PyCapsule_SetDestructor(tensor.ptr(), nullptr);
auto* ifrt_client =
llvm::dyn_cast_or_null<ifrt::PjRtCompatibleClient>(client->ifrt_client());
if (ifrt_client == nullptr) {
throw XlaRuntimeError(
"This operation is implemented for a PjRt-compatible backend only.");
}
TF_ASSIGN_OR_RETURN(auto ifrt_array,
ifrt_client->CreatePjRtArray(std::move(pjrt_buffer)));
return PyArray::MakeFromSingleDeviceArray(std::move(client), Traceback::Get(),
std::move(ifrt_array), false, true);
}
} | #include "tensorflow/c/eager/dlpack.h"
#include <vector>
#include "absl/strings/str_join.h"
#include "include/dlpack/dlpack.h"
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
void TestHandleFromDLPack(TF_Status* status, TFE_Context* ctx,
std::vector<int64_t> shape,
std::vector<int64_t> strides) {
size_t num_elements = 1;
for (int i = 0; i < static_cast<int32_t>(shape.size()); ++i) {
num_elements *= shape[i];
}
std::vector<float> data(num_elements);
for (size_t j = 0; j < num_elements; ++j) {
data[j] = j;
}
DLManagedTensor dlm_in = {};
DLTensor* dltensor_in = &dlm_in.dl_tensor;
dltensor_in->data = data.data();
dltensor_in->device = {kDLCPU, 0};
dltensor_in->ndim = static_cast<int32_t>(shape.size());
dltensor_in->dtype = {kDLFloat, 32, 1};
dltensor_in->shape = shape.data();
dltensor_in->strides = strides.data();
TFE_TensorHandle* handle = TFE_HandleFromDLPack(&dlm_in, status, ctx);
ASSERT_NE(handle, nullptr)
<< TF_Message(status) << " (shape=[" << absl::StrJoin(shape, ",")
<< "], strides=[" << absl::StrJoin(strides, ",") << "])";
auto* dlm_out =
static_cast<DLManagedTensor*>(TFE_HandleToDLPack(handle, status));
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
const DLTensor* dltensor_out = &dlm_out->dl_tensor;
EXPECT_EQ(dltensor_out->device.device_type, dltensor_in->device.device_type);
EXPECT_EQ(dltensor_out->device.device_id, dltensor_in->device.device_id);
EXPECT_EQ(dltensor_out->ndim, dltensor_in->ndim);
EXPECT_EQ(dltensor_out->dtype.code, dltensor_in->dtype.code);
EXPECT_EQ(dltensor_out->dtype.bits, dltensor_in->dtype.bits);
EXPECT_EQ(dltensor_out->dtype.lanes, dltensor_in->dtype.lanes);
for (int i = 0; i < dltensor_in->ndim; ++i) {
EXPECT_EQ(dltensor_out->shape[i], dltensor_in->shape[i]);
if (dltensor_out->strides) {
if (i == dltensor_in->ndim - 1) {
EXPECT_EQ(dltensor_out->strides[i], 1);
} else {
EXPECT_EQ(dltensor_out->strides[i],
dltensor_out->shape[i + 1] * dltensor_out->strides[i + 1]);
}
}
}
const float* data_in = static_cast<const float*>(dltensor_in->data);
const float* data_out = static_cast<const float*>(dltensor_out->data);
for (size_t j = 0; j < num_elements; ++j) {
EXPECT_EQ(data_out[j], data_in[j]);
}
TFE_CallDLManagedTensorDeleter(dlm_out);
TFE_DeleteTensorHandle(handle);
}
TEST(DLPack, HandleFromDLPackStrides) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TestHandleFromDLPack(status, ctx, {}, {});
TestHandleFromDLPack(status, ctx, {4}, {});
TestHandleFromDLPack(status, ctx, {4}, {1});
TestHandleFromDLPack(status, ctx, {4, 3, 2}, {});
TestHandleFromDLPack(status, ctx, {4, 3, 2}, {6, 2, 1});
TestHandleFromDLPack(status, ctx, {1}, {1});
TestHandleFromDLPack(status, ctx, {1}, {0});
TestHandleFromDLPack(status, ctx, {4, 1, 2}, {2, 1, 1});
TestHandleFromDLPack(status, ctx, {4, 1, 2}, {2, 0, 1});
TestHandleFromDLPack(status, ctx, {4, 3, 1}, {3, 1, 1});
TestHandleFromDLPack(status, ctx, {4, 3, 1}, {3, 1, 0});
TestHandleFromDLPack(status, ctx, {4, 0, 2}, {0, 2, 1});
TestHandleFromDLPack(status, ctx, {4, 0, 2}, {0, 1, 1});
TestHandleFromDLPack(status, ctx, {4, 0, 2}, {0, 0, 1});
TestHandleFromDLPack(status, ctx, {4, 0, 2}, {0, 2, 0});
TFE_DeleteContext(ctx);
TF_DeleteStatus(status);
}
}
} |
1,269 | cpp | tensorflow/tensorflow | c_api_experimental_reader | tensorflow/c/eager/c_api_experimental_reader.cc | tensorflow/c/eager/c_api_experimental_reader_test.cc | #ifndef TENSORFLOW_C_EAGER_C_API_EXPERIMENTAL_READER_H_
#define TENSORFLOW_C_EAGER_C_API_EXPERIMENTAL_READER_H_
#include "tensorflow/c/eager/c_api.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct TFE_MonitoringCounterReader TFE_MonitoringCounterReader;
TF_CAPI_EXPORT extern TFE_MonitoringCounterReader*
TFE_MonitoringNewCounterReader(const char* name);
TF_CAPI_EXPORT extern int64_t TFE_MonitoringReadCounter0(
TFE_MonitoringCounterReader*);
TF_CAPI_EXPORT extern int64_t TFE_MonitoringReadCounter1(
TFE_MonitoringCounterReader*, const char* label_value);
#ifdef __cplusplus
}
#endif
#endif
#include "tensorflow/c/eager/c_api_experimental_reader.h"
#include "tensorflow/c/eager/tfe_monitoring_reader_internal.h"
template <typename... LabelType>
int64_t TFE_MonitoringCounterReader::Read(const LabelType&... labels) {
return counter->Read(labels...);
}
TFE_MonitoringCounterReader* TFE_MonitoringNewCounterReader(const char* name) {
auto* result = new TFE_MonitoringCounterReader(name);
return result;
}
int64_t TFE_MonitoringReadCounter0(TFE_MonitoringCounterReader* cell_reader) {
int64_t result = cell_reader->Read();
return result;
}
int64_t TFE_MonitoringReadCounter1(TFE_MonitoringCounterReader* cell_reader,
const char* label) {
int64_t result = cell_reader->Read(label);
return result;
} | #include "tensorflow/c/eager/c_api_experimental_reader.h"
#include <cstdint>
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TFE_MonitoringCounter0* CreateCounter0(const char* counter_name);
TFE_MonitoringCounter1* CreateCounter1(const char* counter_name,
const char* label);
void IncrementCounter0(TFE_MonitoringCounter0* counter, int64_t delta = 1);
void IncrementCounter1(TFE_MonitoringCounter1* counter, const char* label,
int64_t delta = 1);
TEST(CAPI, MonitoringCellReader0) {
auto counter_name = "test/counter0";
auto* counter = CreateCounter0(counter_name);
auto* reader = TFE_MonitoringNewCounterReader(counter_name);
IncrementCounter0(counter);
int64_t actual = TFE_MonitoringReadCounter0(reader);
CHECK_EQ(actual, 1);
}
TEST(CAPI, MonitoringCellReader1) {
auto counter_name = "test/counter1";
auto label_name = "test/label";
auto* counter = CreateCounter1(counter_name, label_name);
auto* reader = TFE_MonitoringNewCounterReader(counter_name);
IncrementCounter1(counter, label_name);
int64_t actual = TFE_MonitoringReadCounter1(reader, label_name);
CHECK_EQ(actual, 1);
}
TFE_MonitoringCounter0* CreateCounter0(const char* counter_name) {
TF_Status* status = TF_NewStatus();
auto* counter =
TFE_MonitoringNewCounter0(counter_name, status, "description");
TF_DeleteStatus(status);
return counter;
}
void IncrementCounter0(TFE_MonitoringCounter0* counter, int64_t delta) {
auto* cell = TFE_MonitoringGetCellCounter0(counter);
TFE_MonitoringCounterCellIncrementBy(cell, delta);
}
TFE_MonitoringCounter1* CreateCounter1(const char* counter_name,
const char* label) {
TF_Status* status = TF_NewStatus();
auto* counter =
TFE_MonitoringNewCounter1(counter_name, status, "description", label);
TF_DeleteStatus(status);
return counter;
}
void IncrementCounter1(TFE_MonitoringCounter1* counter, const char* label,
int64_t delta) {
auto* cell = TFE_MonitoringGetCellCounter1(counter, label);
TFE_MonitoringCounterCellIncrementBy(cell, delta);
}
}
} |
1,270 | cpp | tensorflow/tensorflow | c_api_unified_experimental | tensorflow/c/eager/c_api_unified_experimental.cc | tensorflow/c/eager/c_api_unified_experimental_test.cc | #ifndef TENSORFLOW_C_EAGER_C_API_UNIFIED_EXPERIMENTAL_H_
#define TENSORFLOW_C_EAGER_C_API_UNIFIED_EXPERIMENTAL_H_
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/c/tf_datatype.h"
#include "tensorflow/c/tf_status.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct TF_ExecutionContext TF_ExecutionContext;
typedef struct TF_AbstractTensor TF_AbstractTensor;
typedef struct TF_AbstractOp TF_AbstractOp;
typedef struct TF_AbstractFunction TF_AbstractFunction;
void TF_SetTracingImplementation(const char* name, TF_Status*);
TF_ExecutionContext* TF_CreateFunction(const char* fn_name, TF_Status* status);
TF_ExecutionContext* TF_NewEagerExecutionContext(TFE_ContextOptions*,
TF_Status* s);
void TF_DeleteExecutionContext(TF_ExecutionContext*);
typedef struct TF_Shape {
int num_dims;
int64_t* dim_sizes;
} TF_Shape;
TF_AbstractTensor* TF_AddFunctionParameter(TF_ExecutionContext* func,
TF_DataType dtype, TF_Shape shape,
TF_Status* s);
TF_AbstractOp* TF_NewAbstractOp(TF_ExecutionContext* ctx);
void TF_DeleteAbstractOp(TF_AbstractOp*);
void TF_AbstractOpSetOpType(TF_AbstractOp* op, const char* const op_type,
TF_Status* s);
void TF_AbstractOpSetOpName(TF_AbstractOp* op, const char* const op_name,
TF_Status* s);
void TF_AbstractOpSetAttrType(TF_AbstractOp* op, const char* const attr_name,
TF_DataType value, TF_Status* s);
void TF_DeleteAbstractTensor(TF_AbstractTensor*);
typedef struct TF_OutputList TF_OutputList;
TF_OutputList* TF_NewOutputList();
void TF_DeleteOutputList(TF_OutputList* o);
void TF_OutputListSetNumOutputs(TF_OutputList* o, int num_outputs, TF_Status*);
int TF_OutputListNumOutputs(TF_OutputList* o);
TF_AbstractTensor* TF_OutputListGet(TF_OutputList* o, int i);
void TF_OutputListPushBack(TF_OutputList* o, TF_AbstractTensor* tensor,
TF_Status*);
void TF_ExecuteOperation(TF_AbstractOp* op, int num_inputs,
TF_AbstractTensor* const* inputs, TF_OutputList* o,
TF_Status* s);
TF_AbstractFunction* TF_FinalizeFunction(TF_ExecutionContext* ctx,
TF_OutputList*, TF_Status*);
void TF_DeleteAbstractFunction(TF_AbstractFunction*);
void TF_ExecutionContextRegisterFunction(TF_ExecutionContext*,
TF_AbstractFunction*, TF_Status*);
TF_AbstractTensor* TF_CreateAbstractTensorFromEagerTensor(TFE_TensorHandle* t,
TF_Status* s);
TFE_TensorHandle* TF_AbstractTensorGetEagerTensor(TF_AbstractTensor* at,
TF_Status* s);
TFE_Context* TF_ExecutionContextGetTFEContext(TF_ExecutionContext*,
TF_Status* s);
#ifdef __cplusplus
}
#endif
#endif
#include "tensorflow/c/eager/c_api_unified_experimental.h"
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/c/eager/c_api_unified_experimental_internal.h"
#include "tensorflow/c/tf_datatype.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/llvm_rtti/llvm_rtti.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/types.h"
using tensorflow::string;
namespace tensorflow {
namespace tracing {
typedef absl::flat_hash_map<std::string, tracing::FactoryFunction> FactoriesMap;
static FactoriesMap& GetFactories() {
static FactoriesMap* factories = new FactoriesMap;
return *factories;
}
static tracing::FactoryFunction default_factory;
void RegisterTracingEngineFactory(const string& name, FactoryFunction factory) {
assert((!GetFactories().count(name)) ||
(GetFactories()[name] == factory) &&
"Duplicate tracing factory registration");
GetFactories()[name] = factory;
}
Status SetDefaultTracingEngine(const char* name) {
auto entry = GetFactories().find(name);
if (entry != GetFactories().end()) {
default_factory = GetFactories().find(name)->second;
return absl::OkStatus();
}
string msg = absl::StrCat(
"No tracing engine factory has been registered with the key '", name,
"' (available: ");
std::set<string> factories_sorted;
for (const auto& factory : GetFactories())
factories_sorted.insert(factory.first);
const char* comma = "";
for (const string& factory : factories_sorted) {
msg += comma + factory;
comma = ", ";
}
msg += ")";
return errors::InvalidArgument(msg.c_str());
}
static TracingContext* CreateTracingExecutionContext(const char* fn_name,
TF_Status* s) {
if (default_factory) {
return default_factory(fn_name, s);
}
tsl::Set_TF_Status_from_Status(
s, errors::FailedPrecondition("default_factory is nullptr"));
return nullptr;
}
}
}
using tensorflow::AbstractFunction;
using tensorflow::AbstractTensorHandle;
using tensorflow::DataType;
using tensorflow::dyn_cast;
using tensorflow::OutputList;
using tensorflow::Status;
using tensorflow::unwrap;
using tensorflow::wrap;
using tensorflow::tracing::CreateTracingExecutionContext;
using tensorflow::tracing::SetDefaultTracingEngine;
using tensorflow::tracing::TracingContext;
using tensorflow::tracing::TracingOperation;
using tensorflow::tracing::TracingTensorHandle;
void TF_SetTracingImplementation(const char* name, TF_Status* s) {
tsl::Set_TF_Status_from_Status(s, SetDefaultTracingEngine(name));
}
TF_ExecutionContext* TF_CreateFunction(const char* fn_name, TF_Status* s) {
return wrap(CreateTracingExecutionContext(fn_name, s));
}
TF_AbstractFunction* TF_FinalizeFunction(TF_ExecutionContext* ctx,
TF_OutputList* outputs, TF_Status* s) {
AbstractFunction* func;
TracingContext* tracing_ctx = dyn_cast<TracingContext>(unwrap(ctx));
if (!tracing_ctx) {
tsl::Set_TF_Status_from_Status(
s, tensorflow::errors::InvalidArgument(
"Only TracingContext can be converted into a function."));
return nullptr;
}
tsl::Set_TF_Status_from_Status(s,
tracing_ctx->Finalize(unwrap(outputs), &func));
TF_DeleteExecutionContext(ctx);
return wrap(func);
}
TF_AbstractTensor* TF_AddFunctionParameter(TF_ExecutionContext* func,
TF_DataType dtype, TF_Shape shape,
TF_Status* s) {
DCHECK_GE(shape.num_dims, -1);
TracingTensorHandle* t;
TracingContext* tracing_ctx = dyn_cast<TracingContext>(unwrap(func));
if (!tracing_ctx) {
tsl::Set_TF_Status_from_Status(
s, tensorflow::errors::InvalidArgument(
"TF_AddFunctionParameter must be called on a TracingContext."));
return nullptr;
}
tensorflow::PartialTensorShape partial_shape;
if (shape.num_dims != -1) {
DCHECK(shape.dim_sizes != nullptr);
Status status = tensorflow::PartialTensorShape::MakePartialShape(
reinterpret_cast<int64_t*>(shape.dim_sizes), shape.num_dims,
&partial_shape);
if (!status.ok()) {
tsl::Set_TF_Status_from_Status(s, status);
return nullptr;
}
}
tsl::Set_TF_Status_from_Status(
s, tracing_ctx->AddParameter(static_cast<DataType>(dtype), partial_shape,
&t));
return wrap(t);
}
void TF_DeleteExecutionContext(TF_ExecutionContext* c) { unwrap(c)->Release(); }
TF_AbstractOp* TF_NewAbstractOp(TF_ExecutionContext* c) {
return wrap((unwrap(c)->CreateOperation()));
}
void TF_DeleteAbstractOp(TF_AbstractOp* op) { unwrap(op)->Release(); }
void TF_DeleteAbstractTensor(TF_AbstractTensor* t) { unwrap(t)->Unref(); }
TF_OutputList* TF_NewOutputList() { return wrap(new OutputList); }
void TF_DeleteOutputList(TF_OutputList* o) { delete unwrap(o); }
void TF_OutputListSetNumOutputs(TF_OutputList* o, int num_outputs,
TF_Status* s) {
unwrap(o)->expected_num_outputs = num_outputs;
unwrap(o)->outputs.clear();
unwrap(o)->outputs.resize(num_outputs);
}
int TF_OutputListNumOutputs(TF_OutputList* o) {
return unwrap(o)->outputs.size();
}
TF_AbstractTensor* TF_OutputListGet(TF_OutputList* o, int i) {
return wrap(unwrap(o)->outputs[i]);
}
void TF_OutputListPushBack(TF_OutputList* o, TF_AbstractTensor* tensor,
TF_Status* s) {
unwrap(o)->outputs.push_back(unwrap(tensor));
}
void TF_AbstractOpSetOpType(TF_AbstractOp* op, const char* const op_type,
TF_Status* s) {
tsl::Set_TF_Status_from_Status(
s, unwrap(op)->Reset(op_type,
nullptr));
}
void TF_AbstractOpSetOpName(TF_AbstractOp* op, const char* const op_name,
TF_Status* s) {
TracingOperation* tracing_op = dyn_cast<TracingOperation>(unwrap(op));
if (!tracing_op) {
tsl::Set_TF_Status_from_Status(
s, tensorflow::errors::InvalidArgument(
"TF_AbstractOpSetOpName must be called on a TracingOperation."));
return;
}
tsl::Set_TF_Status_from_Status(s, tracing_op->SetOpName(op_name));
}
void TF_AbstractOpSetAttrType(TF_AbstractOp* op, const char* const attr_name,
TF_DataType value, TF_Status* s) {
Status status =
unwrap(op)->SetAttrType(attr_name, static_cast<DataType>(value));
TF_SetStatus(s, static_cast<TF_Code>(status.code()),
absl::StatusMessageAsCStr(status));
}
void TF_ExecuteOperation(TF_AbstractOp* op, int num_inputs,
TF_AbstractTensor* const* inputs, TF_OutputList* o,
TF_Status* s) {
for (int i = 0; i < num_inputs; i++) {
tsl::Set_TF_Status_from_Status(s, unwrap(op)->AddInput(unwrap(inputs[i])));
if (TF_GetCode(s) != TF_OK) {
return;
}
}
int num_outputs = unwrap(o)->expected_num_outputs;
tsl::Set_TF_Status_from_Status(
s, unwrap(op)->Execute(
absl::MakeSpan(reinterpret_cast<AbstractTensorHandle**>(
unwrap(o)->outputs.data()),
unwrap(o)->outputs.size()),
&num_outputs));
}
void TF_DeleteAbstractFunction(TF_AbstractFunction* func) {
unwrap(func)->Unref();
}
void TF_ExecutionContextRegisterFunction(TF_ExecutionContext* ctx,
TF_AbstractFunction* func,
TF_Status* s) {
tsl::Set_TF_Status_from_Status(s,
unwrap(ctx)->RegisterFunction(unwrap(func)));
} | #include "tensorflow/c/eager/c_api_unified_experimental.h"
#include <memory>
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/c/eager/c_api_test_util.h"
#include "tensorflow/c/eager/c_api_unified_experimental_internal.h"
#include "tensorflow/c/tf_datatype.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/c/tf_tensor.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
using tensorflow::Status;
using tensorflow::string;
using tensorflow::TF_StatusPtr;
namespace tensorflow {
namespace {
class UnifiedCAPI
: public ::testing::TestWithParam<std::tuple<const char*, bool>> {
protected:
void SetUp() override {
TF_StatusPtr status(TF_NewStatus());
TF_SetTracingImplementation(std::get<0>(GetParam()), status.get());
Status s = StatusFromTF_Status(status.get());
CHECK_EQ(errors::OK, s.code()) << s.message();
}
};
TEST_P(UnifiedCAPI, TestBasicEager) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetTfrt(opts, std::get<1>(GetParam()));
TF_ExecutionContext* ctx = TF_NewEagerExecutionContext(opts, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteContextOptions(opts);
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_Context* eager_ctx = TF_ExecutionContextGetTFEContext(ctx, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_TensorHandle* t = TestScalarTensorHandle(eager_ctx, 2.0f);
TF_AbstractTensor* at =
TF_CreateAbstractTensorFromEagerTensor(t, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto* op = TF_NewAbstractOp(ctx);
TF_AbstractOpSetOpType(op, "Add", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractTensor* inputs[2] = {at, at};
TF_OutputList* o = TF_NewOutputList();
TF_OutputListSetNumOutputs(o, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(op, 2, inputs, o, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_DeleteAbstractOp(op);
TF_DeleteAbstractTensor(at);
ASSERT_EQ(1, TF_OutputListNumOutputs(o));
TF_AbstractTensor* result = TF_OutputListGet(o, 0);
TFE_TensorHandle* result_t =
TF_AbstractTensorGetEagerTensor(result, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_Tensor* result_tensor = TFE_TensorHandleResolve(result_t, status.get());
float* result_value = static_cast<float*>(TF_TensorData(result_tensor));
EXPECT_EQ(*result_value, 4.0);
TF_DeleteTensor(result_tensor);
TF_DeleteAbstractTensor(result);
TF_DeleteOutputList(o);
TF_DeleteExecutionContext(ctx);
}
TEST_P(UnifiedCAPI, TestBasicEagerMatMul) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TF_ExecutionContext* ctx = TF_NewEagerExecutionContext(opts, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteContextOptions(opts);
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
int64_t dims[] = {2, 2};
int num_dims = sizeof(dims) / sizeof(dims[0]);
float vals[] = {0.0f, 0.0f, 0.0f, 0.0f};
TFE_Context* eager_ctx = TF_ExecutionContextGetTFEContext(ctx, status.get());
TFE_TensorHandle* t =
TestMatrixTensorHandleWithInput(eager_ctx, vals, dims, num_dims);
TF_AbstractTensor* at = TF_CreateAbstractTensorFromEagerTensor(
t, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto* op = TF_NewAbstractOp(ctx);
TF_AbstractOpSetOpType(op, "MatMul", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractTensor* inputs[2] = {at, at};
TF_OutputList* o = TF_NewOutputList();
TF_OutputListSetNumOutputs(o, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(op, 2, inputs, o, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_DeleteAbstractOp(op);
TF_DeleteAbstractTensor(at);
ASSERT_EQ(1, TF_OutputListNumOutputs(o));
TF_AbstractTensor* result = TF_OutputListGet(o, 0);
TFE_TensorHandle* result_t =
TF_AbstractTensorGetEagerTensor(result, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_Tensor* result_tensor = TFE_TensorHandleResolve(result_t, status.get());
float result_data[4] = {0};
memcpy(&result_data[0], TF_TensorData(result_tensor),
TF_TensorByteSize(result_tensor));
int data_len = 4;
for (int i = 0; i < data_len; i++) {
EXPECT_EQ(result_data[i], 0);
}
TF_DeleteTensor(result_tensor);
TF_DeleteAbstractTensor(result);
TF_DeleteOutputList(o);
TF_DeleteExecutionContext(ctx);
}
TEST_P(UnifiedCAPI, TestBasicEagerMatMul2) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TF_ExecutionContext* ctx = TF_NewEagerExecutionContext(opts, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteContextOptions(opts);
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
int64_t dims[] = {2, 2};
int num_dims = sizeof(dims) / sizeof(dims[0]);
float vals1[] = {1.0f, 2.0f, 3.0f, 4.0f};
TFE_Context* eager_ctx = TF_ExecutionContextGetTFEContext(ctx, status.get());
TFE_TensorHandle* t1 =
TestMatrixTensorHandleWithInput(eager_ctx, vals1, dims, num_dims);
TF_AbstractTensor* at1 = TF_CreateAbstractTensorFromEagerTensor(
t1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
float vals2[] = {5.0f, 6.0f, 7.0f, 8.0f};
TFE_TensorHandle* t2 =
TestMatrixTensorHandleWithInput(eager_ctx, vals2, dims, num_dims);
TF_AbstractTensor* at2 = TF_CreateAbstractTensorFromEagerTensor(
t2, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto* op = TF_NewAbstractOp(ctx);
TF_AbstractOpSetOpType(op, "MatMul", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractTensor* inputs[2] = {at1, at2};
TF_OutputList* o = TF_NewOutputList();
TF_OutputListSetNumOutputs(o, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(op, 2, inputs, o, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_DeleteAbstractOp(op);
TF_DeleteAbstractTensor(at1);
TF_DeleteAbstractTensor(at2);
ASSERT_EQ(1, TF_OutputListNumOutputs(o));
TF_AbstractTensor* result = TF_OutputListGet(o, 0);
TFE_TensorHandle* result_t =
TF_AbstractTensorGetEagerTensor(result, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_Tensor* result_tensor = TFE_TensorHandleResolve(result_t, status.get());
float result_data[4] = {0};
memcpy(&result_data[0], TF_TensorData(result_tensor),
TF_TensorByteSize(result_tensor));
float e_vals[] = {19.0f, 22.0f, 43.0f, 50.0f};
int data_len = 4;
for (int i = 0; i < data_len; i++) {
EXPECT_EQ(result_data[i], e_vals[i]);
}
TF_DeleteTensor(result_tensor);
TF_DeleteAbstractTensor(result);
TF_DeleteOutputList(o);
TF_DeleteExecutionContext(ctx);
}
TEST_P(UnifiedCAPI, TestBasicEagerMatAdd) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TF_ExecutionContext* ctx = TF_NewEagerExecutionContext(opts, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteContextOptions(opts);
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
int64_t dims[] = {2, 2};
int num_dims = sizeof(dims) / sizeof(dims[0]);
float vals1[] = {1.0f, 2.0f, 3.0f, 4.0f};
TFE_Context* eager_ctx = TF_ExecutionContextGetTFEContext(ctx, status.get());
TFE_TensorHandle* t1 =
TestMatrixTensorHandleWithInput(eager_ctx, vals1, dims, num_dims);
TF_AbstractTensor* at1 = TF_CreateAbstractTensorFromEagerTensor(
t1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
float vals2[] = {5.0f, 6.0f, 7.0f, 8.0f};
TFE_TensorHandle* t2 =
TestMatrixTensorHandleWithInput(eager_ctx, vals2, dims, num_dims);
TF_AbstractTensor* at2 = TF_CreateAbstractTensorFromEagerTensor(
t2, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto* op = TF_NewAbstractOp(ctx);
TF_AbstractOpSetOpType(op, "Add", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractTensor* inputs[2] = {at1, at2};
TF_OutputList* o = TF_NewOutputList();
TF_OutputListSetNumOutputs(o, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(op, 2, inputs, o, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_DeleteAbstractOp(op);
TF_DeleteAbstractTensor(at1);
TF_DeleteAbstractTensor(at2);
ASSERT_EQ(1, TF_OutputListNumOutputs(o));
TF_AbstractTensor* result = TF_OutputListGet(o, 0);
TFE_TensorHandle* result_t =
TF_AbstractTensorGetEagerTensor(result, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_Tensor* result_tensor = TFE_TensorHandleResolve(result_t, status.get());
float result_data[4] = {0};
memcpy(&result_data[0], TF_TensorData(result_tensor),
TF_TensorByteSize(result_tensor));
float e_vals[] = {6.0f, 8.0f, 10.0f, 12.0f};
int data_len = 4;
for (int i = 0; i < data_len; i++) {
EXPECT_EQ(result_data[i], e_vals[i]);
}
TF_DeleteTensor(result_tensor);
TF_DeleteAbstractTensor(result);
TF_DeleteOutputList(o);
TF_DeleteExecutionContext(ctx);
}
TEST_P(UnifiedCAPI, TestBasicGraph) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
string fn_name = "double";
TF_ExecutionContext* graph_ctx =
TF_CreateFunction(fn_name.c_str(), status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto* placeholder_t =
TF_AddFunctionParameter(graph_ctx, TF_FLOAT, {-1, nullptr}, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto* add_op = TF_NewAbstractOp(graph_ctx);
TF_AbstractOpSetOpType(add_op, "Add", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractOpSetOpName(add_op, "my_add", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractTensor* inputs[2] = {placeholder_t, placeholder_t};
TF_OutputList* add_outputs = TF_NewOutputList();
TF_OutputListSetNumOutputs(add_outputs, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(add_op, 2, inputs, add_outputs, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto outs = unwrap(add_outputs);
auto h = outs->outputs[0];
ASSERT_NE(h, nullptr);
ASSERT_EQ(h->FullType().type_id(), TFT_UNSET);
ASSERT_EQ(unwrap(inputs[0])->FullType().type_id(), TFT_UNSET);
TF_DeleteAbstractOp(add_op);
TF_AbstractFunction* func =
TF_FinalizeFunction(graph_ctx, add_outputs, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_DeleteAbstractTensor(TF_OutputListGet(add_outputs, 0));
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetTfrt(opts, std::get<1>(GetParam()));
TF_ExecutionContext* eager_execution_ctx =
TF_NewEagerExecutionContext(opts, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteContextOptions(opts);
TF_ExecutionContextRegisterFunction(eager_execution_ctx, func, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractOp* fn_op = TF_NewAbstractOp(eager_execution_ctx);
TF_AbstractOpSetOpType(fn_op, fn_name.c_str(), status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_Context* eager_ctx =
TF_ExecutionContextGetTFEContext(eager_execution_ctx, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_TensorHandle* input_eager = TestScalarTensorHandle(eager_ctx, 2.0f);
TF_AbstractTensor* input_t =
TF_CreateAbstractTensorFromEagerTensor(input_eager, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(fn_op, 1, &input_t, add_outputs, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_EQ(1, TF_OutputListNumOutputs(add_outputs));
TF_AbstractTensor* final_result = TF_OutputListGet(add_outputs, 0);
TFE_TensorHandle* final =
TF_AbstractTensorGetEagerTensor(final_result, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_Tensor* f_t = TFE_TensorHandleResolve(final, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
float* f_value = static_cast<float*>(TF_TensorData(f_t));
ASSERT_EQ(*f_value, 4.0);
TF_DeleteOutputList(add_outputs);
TF_DeleteAbstractOp(fn_op);
TF_DeleteAbstractTensor(input_t);
TF_DeleteAbstractTensor(final_result);
TF_DeleteAbstractTensor(placeholder_t);
TF_DeleteTensor(f_t);
TF_DeleteAbstractFunction(func);
TF_DeleteExecutionContext(eager_execution_ctx);
}
TEST_P(UnifiedCAPI, TestBasicGraphMatMul) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
string fn_name = "matrix_multiply";
TF_ExecutionContext* graph_ctx =
TF_CreateFunction(fn_name.c_str(), status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto* placeholder_t =
TF_AddFunctionParameter(graph_ctx, TF_FLOAT, {-1, nullptr}, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
auto* matmul_op = TF_NewAbstractOp(graph_ctx);
TF_AbstractOpSetOpType(matmul_op, "MatMul", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractOpSetOpName(matmul_op, "my_matmul", status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractTensor* inputs[2] = {placeholder_t, placeholder_t};
TF_OutputList* mm_outputs = TF_NewOutputList();
TF_OutputListSetNumOutputs(mm_outputs, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(matmul_op, 2, inputs, mm_outputs, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_DeleteAbstractOp(matmul_op);
TF_AbstractFunction* func =
TF_FinalizeFunction(graph_ctx, mm_outputs, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_ContextOptions* opts = TFE_NewContextOptions();
TF_ExecutionContext* eager_execution_ctx =
TF_NewEagerExecutionContext(opts, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteContextOptions(opts);
TF_ExecutionContextRegisterFunction(eager_execution_ctx, func, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_AbstractOp* fn_op = TF_NewAbstractOp(eager_execution_ctx);
TF_AbstractOpSetOpType(fn_op, fn_name.c_str(), status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_Context* eager_ctx =
TF_ExecutionContextGetTFEContext(eager_execution_ctx, status.get());
float vals[] = {1.0f, 1.0f, 1.0f, 1.0f};
int64_t dims[] = {2, 2};
int num_dims = sizeof(dims) / sizeof(dims[0]);
TFE_TensorHandle* input_eager =
TestMatrixTensorHandleWithInput(eager_ctx, vals, dims, num_dims);
TF_AbstractTensor* input_t =
TF_CreateAbstractTensorFromEagerTensor(input_eager, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_OutputListSetNumOutputs(mm_outputs, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(fn_op, 1, &input_t, mm_outputs, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_EQ(1, TF_OutputListNumOutputs(mm_outputs));
TF_AbstractTensor* final_result = TF_OutputListGet(mm_outputs, 0);
TFE_TensorHandle* final =
TF_AbstractTensorGetEagerTensor(final_result, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_Tensor* f_t = TFE_TensorHandleResolve(final, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
float result_data[4] = {0};
memcpy(&result_data[0], TF_TensorData(f_t), TF_TensorByteSize(f_t));
int data_len = 4;
for (int i = 0; i < data_len; i++) {
ASSERT_EQ(result_data[i], 2.0f);
}
TF_DeleteAbstractTensor(final_result);
TF_DeleteOutputList(mm_outputs);
TF_DeleteAbstractTensor(placeholder_t);
TF_DeleteAbstractOp(fn_op);
TF_DeleteAbstractTensor(input_t);
TF_DeleteTensor(f_t);
TF_DeleteAbstractFunction(func);
TF_DeleteExecutionContext(eager_execution_ctx);
}
TEST_P(UnifiedCAPI, TestMultiOutputGraph) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TF_Status* s = status.get();
string fn_name = "two_adds";
TF_ExecutionContext* graph_ctx = TF_CreateFunction(fn_name.c_str(), s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
auto* arg0 = TF_AddFunctionParameter(graph_ctx, TF_FLOAT, {-1, nullptr}, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
auto* arg1 = TF_AddFunctionParameter(graph_ctx, TF_FLOAT, {-1, nullptr}, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractTensor* add_output1;
{
auto* add_op = TF_NewAbstractOp(graph_ctx);
TF_AbstractOpSetOpType(add_op, "Add", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractOpSetOpName(add_op, "my_add", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractTensor* inputs[2] = {arg0, arg1};
TF_OutputList* add_outputs = TF_NewOutputList();
TF_OutputListSetNumOutputs(add_outputs, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(add_op, 2, inputs, add_outputs, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_DeleteAbstractOp(add_op);
add_output1 = TF_OutputListGet(add_outputs, 0);
TF_DeleteOutputList(add_outputs);
}
TF_AbstractTensor* add_output2;
{
auto* add_op = TF_NewAbstractOp(graph_ctx);
TF_AbstractOpSetOpType(add_op, "Add", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractOpSetOpName(add_op, "my_add", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractTensor* inputs[2] = {arg1, arg1};
TF_OutputList* add_outputs = TF_NewOutputList();
TF_OutputListSetNumOutputs(add_outputs, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(add_op, 2, inputs, add_outputs, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_DeleteAbstractOp(add_op);
add_output2 = TF_OutputListGet(add_outputs, 0);
TF_DeleteOutputList(add_outputs);
}
TF_DeleteAbstractTensor(arg0);
TF_DeleteAbstractTensor(arg1);
TF_AbstractFunction* func;
{
TF_OutputList* func_outputs = TF_NewOutputList();
TF_OutputListPushBack(func_outputs, add_output1, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_OutputListPushBack(func_outputs, add_output2, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
func = TF_FinalizeFunction(graph_ctx, func_outputs, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_DeleteAbstractTensor(add_output1);
TF_DeleteAbstractTensor(add_output2);
TF_DeleteOutputList(func_outputs);
}
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetTfrt(opts, std::get<1>(GetParam()));
TF_ExecutionContext* eager_execution_ctx =
TF_NewEagerExecutionContext(opts, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TFE_DeleteContextOptions(opts);
TF_ExecutionContextRegisterFunction(eager_execution_ctx, func, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractOp* fn_op = TF_NewAbstractOp(eager_execution_ctx);
TF_AbstractOpSetOpType(fn_op, fn_name.c_str(), s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
std::vector<TF_AbstractTensor*> func_args;
{
TFE_Context* eager_ctx =
TF_ExecutionContextGetTFEContext(eager_execution_ctx, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_TensorHandle* input_eager = TestScalarTensorHandle(eager_ctx, 2.0f);
func_args.push_back(TF_CreateAbstractTensorFromEagerTensor(input_eager, s));
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
input_eager = TestScalarTensorHandle(eager_ctx, 3.0f);
func_args.push_back(TF_CreateAbstractTensorFromEagerTensor(input_eager, s));
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
}
TF_OutputList* func_outputs = TF_NewOutputList();
TF_OutputListSetNumOutputs(func_outputs, 2, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_ExecuteOperation(fn_op, func_args.size(), func_args.data(), func_outputs,
s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_DeleteAbstractOp(fn_op);
for (TF_AbstractTensor* t : func_args) TF_DeleteAbstractTensor(t);
ASSERT_EQ(2, TF_OutputListNumOutputs(func_outputs));
float results[2];
for (int idx = 0; idx < 2; ++idx) {
TF_AbstractTensor* result = TF_OutputListGet(func_outputs, idx);
TFE_TensorHandle* handle = TF_AbstractTensorGetEagerTensor(result, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_Tensor* f_t = TFE_TensorHandleResolve(handle, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
results[idx] = *static_cast<float*>(TF_TensorData(f_t));
TF_DeleteTensor(f_t);
}
ASSERT_EQ(results[0], 5.0);
ASSERT_EQ(results[1], 6.0);
for (int idx = 0; idx < 2; ++idx) {
TF_AbstractTensor* result = TF_OutputListGet(func_outputs, idx);
TF_DeleteAbstractTensor(result);
}
TF_DeleteOutputList(func_outputs);
TF_DeleteExecutionContext(eager_execution_ctx);
TF_DeleteAbstractFunction(func);
}
TEST_P(UnifiedCAPI, TestMultiOutputGraphMatMul) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TF_Status* s = status.get();
string fn_name = "two_adds_and_matmul";
TF_ExecutionContext* graph_ctx = TF_CreateFunction(fn_name.c_str(), s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
auto* arg0 = TF_AddFunctionParameter(graph_ctx, TF_FLOAT, {-1, nullptr}, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
auto* arg1 = TF_AddFunctionParameter(graph_ctx, TF_FLOAT, {-1, nullptr}, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractTensor* add_output1;
{
auto* add_op = TF_NewAbstractOp(graph_ctx);
TF_AbstractOpSetOpType(add_op, "Add", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractOpSetOpName(add_op, "my_add1", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractTensor* inputs[2] = {arg0, arg1};
TF_OutputList* add_outputs = TF_NewOutputList();
TF_OutputListSetNumOutputs(add_outputs, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(add_op, 2, inputs, add_outputs, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_DeleteAbstractOp(add_op);
add_output1 = TF_OutputListGet(add_outputs, 0);
TF_DeleteOutputList(add_outputs);
}
TF_AbstractTensor* add_output2;
{
auto* add_op = TF_NewAbstractOp(graph_ctx);
TF_AbstractOpSetOpType(add_op, "Add", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractOpSetOpName(add_op, "my_add2", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractTensor* inputs[2] = {arg1, arg1};
TF_OutputList* add_outputs = TF_NewOutputList();
TF_OutputListSetNumOutputs(add_outputs, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(add_op, 2, inputs, add_outputs, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_DeleteAbstractOp(add_op);
add_output2 = TF_OutputListGet(add_outputs, 0);
TF_DeleteOutputList(add_outputs);
}
TF_AbstractTensor* mm_output;
{
auto* mm_op = TF_NewAbstractOp(graph_ctx);
TF_AbstractOpSetOpType(mm_op, "MatMul", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractOpSetOpName(mm_op, "mm", s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_AbstractTensor* inputs[2] = {add_output1, add_output2};
TF_OutputList* mm_outputs = TF_NewOutputList();
TF_OutputListSetNumOutputs(mm_outputs, 1, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_ExecuteOperation(mm_op, 2, inputs, mm_outputs, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_DeleteAbstractOp(mm_op);
mm_output = TF_OutputListGet(mm_outputs, 0);
TF_DeleteOutputList(mm_outputs);
}
TF_AbstractFunction* func;
{
TF_OutputList* func_outputs = TF_NewOutputList();
TF_OutputListPushBack(func_outputs, add_output1, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_OutputListPushBack(func_outputs, add_output2, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_OutputListPushBack(func_outputs, mm_output, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
func = TF_FinalizeFunction(graph_ctx, func_outputs, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
TF_DeleteOutputList(func_outputs);
}
/**
* We traced so far this function:
*
* def two_adds_and_mm(A, B):
* my_add1 = A + B
* my_add2 = B + B
* mm = tf.MatMul(my_add1,my_add2)
* return my_add1, my_add2, mm
*
* Now we will execute this function with an eager context:
*
* A =[[0, 1],[1, 0]]
* B =[[1, 0],[0, 1]]
*
* output1, output2, output3 = two_adds_and_mm(A, B)
*
* We expect outputs:
*
* output1 = [[1, 1],[1, 1]]
* output2 = [[2, 0],[0, 2]]
* output3 |
1,271 | cpp | tensorflow/tensorflow | gradient_checker | tensorflow/cc/framework/gradient_checker.cc | tensorflow/cc/framework/gradient_checker_test.cc | #ifndef TENSORFLOW_CC_FRAMEWORK_GRADIENT_CHECKER_H_
#define TENSORFLOW_CC_FRAMEWORK_GRADIENT_CHECKER_H_
#include <vector>
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/core/framework/tensor.h"
namespace tensorflow {
template <typename X_T, typename Y_T, typename JAC_T>
Status ComputeGradientError(const Scope& scope, const OutputList& xs,
const std::vector<TensorShape>& x_shapes,
const OutputList& ys,
const std::vector<TensorShape>& y_shapes,
JAC_T* max_error);
template <typename X_T, typename Y_T, typename JAC_T>
Status ComputeGradientError(const Scope& scope, const Output& x,
const Tensor& x_init_value, const Output& y,
const TensorShape& y_shape, JAC_T* max_error);
}
#endif
#include "tensorflow/cc/framework/gradient_checker.h"
#include <algorithm>
#include <utility>
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/gradients.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/type_traits.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
namespace {
template <typename T>
struct BaseUnitsForType {};
#define SET_BASE_UNITS_FOR_TYPE(TYPE, INIT) \
template <> \
struct BaseUnitsForType<TYPE> { \
static const std::vector<TYPE>& values() { \
static std::vector<TYPE>* units = new std::vector<TYPE> INIT; \
return *units; \
} \
}
SET_BASE_UNITS_FOR_TYPE(float, {1});
SET_BASE_UNITS_FOR_TYPE(double, {1});
SET_BASE_UNITS_FOR_TYPE(complex64, ({{1, 0}, {0, 1}}));
SET_BASE_UNITS_FOR_TYPE(complex128, ({{1, 0}, {0, 1}}));
template <typename T, typename JAC_T>
typename std::enable_if<std::is_floating_point<T>::value>::type SetJacobian(
typename TTypes<JAC_T>::Matrix* jacobian, const int row, const int col,
const T& value, const bool expand_by_row) {
(*jacobian)(row, col) = JAC_T{value};
}
template <typename T, typename JAC_T>
typename std::enable_if<is_complex<T>::value>::type SetJacobian(
typename TTypes<JAC_T>::Matrix* jacobian, const int row, const int col,
const T& value, const bool expand_by_row) {
(*jacobian)(row, col) = JAC_T{value.real()};
if (expand_by_row) {
(*jacobian)(row + 1, col) = JAC_T{value.imag()};
} else {
(*jacobian)(row, col + 1) = JAC_T{value.imag()};
}
}
template <typename T>
struct JacobianStride {};
#define SET_JACOBIAN_STRIDE(TYPE, VALUE) \
template <> \
struct JacobianStride<TYPE> { \
static constexpr int value = VALUE; \
}
SET_JACOBIAN_STRIDE(float, 1);
SET_JACOBIAN_STRIDE(double, 1);
SET_JACOBIAN_STRIDE(complex64, 2);
SET_JACOBIAN_STRIDE(complex128, 2);
template <typename X_T, typename Y_T, typename JAC_T>
Status ComputeTheoreticalJacobianTranspose(
const Scope& scope, const OutputList& xs,
const std::vector<TensorShape>& x_shapes,
const std::vector<Tensor>& x_datas, const OutputList& ys,
const std::vector<TensorShape>& y_shapes,
std::vector<Tensor>* jacobian_ts) {
size_t y_num = y_shapes.size();
size_t x_num = x_shapes.size();
OutputList dys;
dys.reserve(y_shapes.size());
for (const auto& y_shape : y_shapes) {
dys.push_back(
ops::Cast(scope, ops::Const(scope, 1.0, y_shape), ys[0].type()));
}
OutputList dxs;
TF_RETURN_IF_ERROR(AddSymbolicGradients(scope, ys, xs, dys, &dxs));
std::vector<Tensor> dy_datas(y_num);
for (int i = 0; i < y_num; i++) {
dy_datas[i] = Tensor(ys[i].type(), y_shapes[i]);
auto dy_data_flat = dy_datas[i].flat<Y_T>();
dy_data_flat.setZero();
}
ClientSession::FeedType feed_list;
for (int i = 0; i < x_num; i++) {
feed_list.insert({xs[i], x_datas[i]});
}
for (int i = 0; i < y_num; i++) {
feed_list.insert({dys[i], dy_datas[i]});
}
const int x_stride = JacobianStride<X_T>::value;
const int y_stride = JacobianStride<Y_T>::value;
ClientSession session(scope);
for (int y_idx = 0; y_idx < y_num; y_idx++) {
auto dy_data_flat = dy_datas[y_idx].flat<Y_T>();
const int64_t dy_size = y_shapes[y_idx].num_elements();
for (int c = 0; c < dy_size; ++c) {
int unit_dimension = 0;
for (Y_T unit : BaseUnitsForType<Y_T>::values()) {
dy_data_flat(c) = unit;
std::vector<Tensor> dxout;
TF_RETURN_IF_ERROR(session.Run(feed_list, dxs, &dxout));
for (int x_idx = 0; x_idx < x_num; x_idx++) {
if (x_shapes[x_idx] != dxout[x_idx].shape()) {
return errors::Internal("Gradient for input ", x_idx,
" expected shape ",
x_shapes[x_idx].DebugString(), " but was ",
dxout[x_idx].shape().DebugString());
}
const int64_t x_size = x_shapes[x_idx].num_elements();
auto jacobian = (*jacobian_ts)[x_idx * y_num + y_idx].matrix<JAC_T>();
auto dx_flat = dxout[x_idx].flat<X_T>();
for (int r = 0; r < x_size; ++r) {
SetJacobian<X_T, JAC_T>(&jacobian, r * x_stride,
c * y_stride + unit_dimension, dx_flat(r),
true );
}
}
dy_data_flat(c) = Y_T{0};
unit_dimension++;
}
}
}
return absl::OkStatus();
}
Status EvaluateGraph(ClientSession* session, const OutputList& xs,
const OutputList& ys, std::vector<Tensor>* x_datas,
std::vector<Tensor>* y_datas) {
ClientSession::FeedType feed_list;
for (int i = 0; i < x_datas->size(); i++) {
feed_list.insert({xs[i], (*x_datas)[i]});
}
TF_RETURN_IF_ERROR(session->Run(feed_list, ys, y_datas));
for (int y_idx = 0; y_idx < y_datas->size(); y_idx++) {
for (int x_idx = 0; x_idx < x_datas->size(); x_idx++) {
Tensor y_data = (*y_datas)[y_idx];
if (y_data.SharesBufferWith((*x_datas)[x_idx])) {
(*y_datas)[y_idx] = tensor::DeepCopy(y_data);
}
}
}
return absl::OkStatus();
}
template <typename X_T, typename Y_T, typename JAC_T>
Status ComputeNumericJacobianTranspose(const Scope& scope, const OutputList& xs,
const std::vector<TensorShape>& x_shapes,
const OutputList& ys,
const std::vector<TensorShape>& y_shapes,
const JAC_T delta,
std::vector<Tensor>* x_datas,
std::vector<Tensor>* jacobian_ts) {
size_t y_num = y_shapes.size();
size_t x_num = x_shapes.size();
const int x_stride = JacobianStride<X_T>::value;
const int y_stride = JacobianStride<Y_T>::value;
ClientSession session(scope);
for (int x_idx = 0; x_idx < x_num; x_idx++) {
auto x_data_flat = (*x_datas)[x_idx].flat<X_T>();
const int64_t x_size = x_shapes[x_idx].num_elements();
for (int r = 0; r < x_size; ++r) {
int unit_dimension = 0;
for (X_T unit : BaseUnitsForType<X_T>::values()) {
X_T x_delta = unit * X_T{delta};
X_T v = x_data_flat(r);
x_data_flat(r) = v + x_delta;
std::vector<Tensor> y_pos;
TF_RETURN_IF_ERROR(EvaluateGraph(&session, xs, ys, x_datas, &y_pos));
x_data_flat(r) = v - x_delta;
std::vector<Tensor> y_neg;
TF_RETURN_IF_ERROR(EvaluateGraph(&session, xs, ys, x_datas, &y_neg));
for (int y_idx = 0; y_idx < y_num; y_idx++) {
auto y_pos_flat = y_pos[y_idx].flat<Y_T>();
auto y_neg_flat = y_neg[y_idx].flat<Y_T>();
const int64_t y_size = y_shapes[y_idx].num_elements();
const Y_T scale = 2 * delta;
auto jacobian = (*jacobian_ts)[x_idx * y_num + y_idx].matrix<JAC_T>();
for (int c = 0; c < y_size; ++c) {
SetJacobian<Y_T, JAC_T>(&jacobian, r * x_stride + unit_dimension,
c * y_stride,
(y_pos_flat(c) - y_neg_flat(c)) / scale,
false );
}
}
x_data_flat(r) = v;
unit_dimension++;
}
}
}
return absl::OkStatus();
}
template <typename X_T, typename Y_T, typename JAC_T>
void InitJacobians(const OutputList& xs,
const std::vector<TensorShape>& x_shapes,
const std::vector<TensorShape>& y_shapes,
std::vector<Tensor>* jacobians) {
const size_t y_num = y_shapes.size();
const size_t x_num = x_shapes.size();
const DataType jacobian_type = DataTypeToEnum<JAC_T>::v();
jacobians->resize(y_num * x_num);
for (int x_idx = 0; x_idx < x_num; x_idx++) {
const int64_t x_size =
x_shapes[x_idx].num_elements() * JacobianStride<X_T>::value;
for (int y_idx = 0; y_idx < y_num; y_idx++) {
const int64_t y_size =
y_shapes[y_idx].num_elements() * JacobianStride<Y_T>::value;
Tensor jacobian_t(jacobian_type, {x_size, y_size});
auto jacobian_t_flat = jacobian_t.flat<JAC_T>();
jacobian_t_flat.setZero();
(*jacobians)[x_idx * y_num + y_idx] = std::move(jacobian_t);
}
}
}
template <typename X_T, typename Y_T, typename JAC_T>
Status ComputeGradientErrorInternal(const Scope& scope, const OutputList& xs,
const std::vector<TensorShape>& x_shapes,
const OutputList& ys,
const std::vector<TensorShape>& y_shapes,
std::vector<Tensor>* x_datas,
JAC_T* max_error) {
std::vector<Tensor> jacobian_ts;
InitJacobians<X_T, Y_T, JAC_T>(xs, x_shapes, y_shapes, &jacobian_ts);
TF_RETURN_IF_ERROR((ComputeTheoreticalJacobianTranspose<X_T, Y_T, JAC_T>(
scope, xs, x_shapes, *x_datas, ys, y_shapes, &jacobian_ts)));
std::vector<Tensor> jacobian_ns;
InitJacobians<X_T, Y_T, JAC_T>(xs, x_shapes, y_shapes, &jacobian_ns);
TF_RETURN_IF_ERROR((ComputeNumericJacobianTranspose<X_T, Y_T, JAC_T>(
scope, xs, x_shapes, ys, y_shapes, JAC_T{1e-3f}, x_datas, &jacobian_ns)));
for (int i = 0; i < jacobian_ts.size(); i++) {
*max_error = 0.0;
auto jac_t = jacobian_ts[i].matrix<JAC_T>();
auto jac_n = jacobian_ns[i].matrix<JAC_T>();
for (int r = 0; r < jacobian_ts[i].dim_size(0); ++r) {
for (int c = 0; c < jacobian_ts[i].dim_size(1); ++c) {
auto cur_error = std::fabs(jac_t(r, c) - jac_n(r, c));
if (std::isnan(cur_error)) {
*max_error = cur_error;
return absl::OkStatus();
}
*max_error = std::max(*max_error, cur_error);
}
}
}
return absl::OkStatus();
}
}
template <typename X_T, typename Y_T, typename JAC_T>
Status ComputeGradientError(const Scope& scope, const OutputList& xs,
const std::vector<TensorShape>& x_shapes,
const OutputList& ys,
const std::vector<TensorShape>& y_shapes,
JAC_T* max_error) {
if (xs.size() != x_shapes.size()) {
return errors::InvalidArgument("xs(size ", xs.size(),
") and x_shapes(size ", x_shapes.size(),
") must be the same size.");
}
if (ys.size() != y_shapes.size()) {
return errors::InvalidArgument("ys(size ", ys.size(),
") and y_shapes(size ", y_shapes.size(),
") must be the same size.");
}
std::vector<Tensor> x_datas(x_shapes.size());
for (int i = 0; i < x_shapes.size(); i++) {
x_datas[i] = Tensor(xs[i].type(), x_shapes[i]);
auto x_data_flat = x_datas[i].flat<X_T>();
x_data_flat.setRandom();
}
return ComputeGradientErrorInternal<X_T, Y_T, JAC_T>(
scope, xs, x_shapes, ys, y_shapes, &x_datas, max_error);
}
template <typename X_T, typename Y_T, typename JAC_T>
Status ComputeGradientError(const Scope& scope, const Output& x,
const Tensor& x_init_value, const Output& y,
const TensorShape& y_shape, JAC_T* max_error) {
std::vector<Tensor> x_datas(1, Tensor(x_init_value));
return ComputeGradientErrorInternal<X_T, Y_T, JAC_T>(
scope, {x}, {x_datas[0].shape()}, {y}, {y_shape}, &x_datas, max_error);
}
#define INSTANTIATE_GRAD_ERR_TYPE(X_T, Y_T, JAC_T) \
template Status ComputeGradientError<X_T, Y_T, JAC_T>( \
const Scope& scope, const OutputList& xs, \
const std::vector<TensorShape>& x_shapes, const OutputList& ys, \
const std::vector<TensorShape>& y_shapes, JAC_T* max_error); \
template Status ComputeGradientError<X_T, Y_T, JAC_T>( \
const Scope& scope, const Output& x, const Tensor& x_init_value, \
const Output& y, const TensorShape& y_shape, JAC_T* max_error);
INSTANTIATE_GRAD_ERR_TYPE(float, float, float);
INSTANTIATE_GRAD_ERR_TYPE(double, float, double);
INSTANTIATE_GRAD_ERR_TYPE(double, double, double);
INSTANTIATE_GRAD_ERR_TYPE(complex64, float, float);
INSTANTIATE_GRAD_ERR_TYPE(float, complex64, float);
INSTANTIATE_GRAD_ERR_TYPE(complex64, complex64, float);
INSTANTIATE_GRAD_ERR_TYPE(complex128, complex128, double);
} | #include "tensorflow/cc/framework/gradient_checker.h"
#include "tensorflow/cc/framework/grad_op_registry.h"
#include "tensorflow/cc/framework/testutil.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/equal_graph_def.h"
namespace tensorflow {
namespace {
using ops::Complex;
using ops::Const;
using ops::Div;
using ops::MatMul;
using ops::Placeholder;
using ops::Real;
using ops::Split;
using ops::Square;
using ops::Stack;
using ops::Sub;
using ops::Unstack;
TEST(GradientCheckerTest, BasicFloat) {
Scope scope = Scope::NewRootScope();
TensorShape shape({2, 4, 3});
auto x = Placeholder(scope, DT_FLOAT, Placeholder::Shape(shape));
auto y = Square(scope, x);
float max_error;
TF_ASSERT_OK((ComputeGradientError<float, float, float>(
scope, {x}, {shape}, {y}, {shape}, &max_error)));
EXPECT_LT(max_error, 1e-4);
}
TEST(GradientCheckerTest, BasicDouble) {
Scope scope = Scope::NewRootScope();
TensorShape shape({2, 4, 3});
auto x = Placeholder(scope, DT_DOUBLE, Placeholder::Shape(shape));
auto y = Square(scope, x);
double max_error;
TF_ASSERT_OK((ComputeGradientError<double, double, double>(
scope, {x}, {shape}, {y}, {shape}, &max_error)));
EXPECT_LT(max_error, 1e-10);
}
TEST(GradientCheckerTest, BasicComplex64) {
Scope scope = Scope::NewRootScope();
TensorShape shape({2, 4, 3});
auto x = Placeholder(scope, DT_COMPLEX64, Placeholder::Shape(shape));
auto y = Square(scope, x);
float max_error;
TF_ASSERT_OK((ComputeGradientError<complex64, complex64, float>(
scope, {x}, {shape}, {y}, {shape}, &max_error)));
EXPECT_LT(max_error, 1e-4);
}
TEST(GradientCheckerTest, BasicComplex128) {
Scope scope = Scope::NewRootScope();
TensorShape shape({2, 4, 3});
auto x = Placeholder(scope, DT_COMPLEX128, Placeholder::Shape(shape));
auto y = Square(scope, x);
double max_error;
TF_ASSERT_OK((ComputeGradientError<complex128, complex128, double>(
scope, {x}, {shape}, {y}, {shape}, &max_error)));
EXPECT_LT(max_error, 1e-10);
}
TEST(GradientCheckerTest, FloatToComplex64) {
Scope scope = Scope::NewRootScope();
TensorShape shape({2, 4, 3});
auto x = Placeholder(scope, DT_FLOAT, Placeholder::Shape(shape));
auto y = Complex(scope, x, x);
float max_error;
TF_ASSERT_OK((ComputeGradientError<float, complex64, float>(
scope, {x}, {shape}, {y}, {shape}, &max_error)));
EXPECT_LT(max_error, 1e-4);
}
TEST(GradientCheckerTest, Complex64ToFloat) {
Scope scope = Scope::NewRootScope();
TensorShape shape({2, 4, 3});
auto x = Placeholder(scope, DT_COMPLEX64, Placeholder::Shape(shape));
auto y = Real(scope, x);
float max_error;
TF_ASSERT_OK((ComputeGradientError<complex64, float, float>(
scope, {x}, {shape}, {y}, {shape}, &max_error)));
EXPECT_LT(max_error, 1e-4);
}
TEST(GradientCheckerTest, BasicNan) {
Scope scope = Scope::NewRootScope();
TensorShape shape({2, 4, 3});
auto x = Placeholder(scope, DT_FLOAT, Placeholder::Shape(shape));
auto y = Div(scope, x, Sub(scope, x, x));
float max_error;
TF_ASSERT_OK((ComputeGradientError<float, float, float>(
scope, {x}, {shape}, {y}, {shape}, &max_error)));
EXPECT_TRUE(std::isnan(max_error));
}
TEST(GradientCheckerTest, MatMulGrad) {
Scope scope = Scope::NewRootScope();
TensorShape x_shape({4, 3});
TensorShape y_shape({3, 2});
TensorShape z_shape({4, 2});
auto x = Placeholder(scope, DT_DOUBLE, Placeholder::Shape(x_shape));
auto y = Const(scope, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0}, y_shape);
auto z = MatMul(scope, x, y);
double max_error;
TF_ASSERT_OK((ComputeGradientError<double, double, double>(
scope, {x}, {x_shape}, {z}, {z_shape}, &max_error)));
EXPECT_LT(max_error, 1e-10);
}
TEST(GradientCheckerTest, SplitGrad) {
Scope scope = Scope::NewRootScope();
TensorShape x_shape({5, 2});
auto x = Placeholder(scope, DT_DOUBLE, Placeholder::Shape(x_shape));
auto split_dim = Const(scope, 1, {});
auto y = Split(scope, split_dim, x, 2);
TensorShape y_shape = TensorShape({5, 1});
double max_error;
TF_ASSERT_OK((ComputeGradientError<double, double, double>(
scope, {x}, {x_shape}, y.output, {y_shape, y_shape}, &max_error)));
EXPECT_LT(max_error, 1e-10);
}
TEST(GradientCheckerTest, StackGrad) {
Scope scope = Scope::NewRootScope();
TensorShape x_shape({1, 2, 3});
std::vector<Output> xs;
xs.push_back(Placeholder(scope, DT_DOUBLE, Placeholder::Shape(x_shape)));
xs.push_back(Placeholder(scope, DT_DOUBLE, Placeholder::Shape(x_shape)));
auto y = Stack(scope, xs, Stack::Axis(0));
TensorShape y_shape({2, 1, 2, 3});
double max_error;
TF_ASSERT_OK((ComputeGradientError<double, double, double>(
scope, xs, {x_shape, x_shape}, {y}, {y_shape}, &max_error)));
EXPECT_LT(max_error, 1e-10);
}
TEST(GradientCheckerTest, StackUnstackGrad) {
Scope scope = Scope::NewRootScope();
TensorShape shape({1, 2, 3});
std::vector<Output> xs;
xs.push_back(Placeholder(scope, DT_DOUBLE, Placeholder::Shape(shape)));
xs.push_back(Placeholder(scope, DT_DOUBLE, Placeholder::Shape(shape)));
auto tmp = Stack(scope, xs, Stack::Axis(0));
auto y = Unstack(scope, tmp, 2, Unstack::Axis(0));
double max_error;
TF_ASSERT_OK((ComputeGradientError<double, double, double>(
scope, xs, {shape, shape}, y.output, {shape, shape}, &max_error)));
EXPECT_LT(max_error, 1e-10);
}
}
} |
1,272 | cpp | tensorflow/tensorflow | parallel_device | tensorflow/c/eager/parallel_device/parallel_device.cc | tensorflow/c/eager/parallel_device/parallel_device_test.cc | #ifndef TENSORFLOW_C_EAGER_PARALLEL_DEVICE_PARALLEL_DEVICE_H_
#define TENSORFLOW_C_EAGER_PARALLEL_DEVICE_PARALLEL_DEVICE_H_
#include "tensorflow/c/c_api.h"
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/c/eager/c_api_experimental.h"
namespace tensorflow {
namespace parallel_device {
void AllocateParallelDevice(const char* device_name,
const char* const* underlying_devices,
int num_underlying_devices,
TFE_CustomDevice* device, void** device_info);
}
}
#endif
#include "tensorflow/c/eager/parallel_device/parallel_device.h"
#include <cstring>
#include <memory>
#include "absl/strings/str_cat.h"
#include "absl/types/optional.h"
#include "absl/types/variant.h"
#include "tensorflow/c/c_api.h"
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/c/eager/parallel_device/parallel_device_lib.h"
#include "tensorflow/c/eager/tfe_tensorhandle_internal.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_status_helper.h"
namespace tensorflow {
namespace parallel_device {
namespace {
class OpDeleter {
public:
void operator()(TFE_Op* to_delete) const { TFE_DeleteOp(to_delete); }
};
using OpPtr = std::unique_ptr<TFE_Op, OpDeleter>;
using MaybeParallelTensorOwned =
absl::variant<std::unique_ptr<ParallelTensor>, TensorHandlePtr>;
using MaybeParallelTensorUnowned =
absl::variant<ParallelTensor*, TFE_TensorHandle*>;
class NamedParallelDevice {
public:
NamedParallelDevice(const std::string& name,
std::unique_ptr<ParallelDevice> parallel_device)
: device_name_(name), parallel_device_(std::move(parallel_device)) {}
const std::string& name() const { return device_name_; }
const ParallelDevice& device() const { return *parallel_device_; }
private:
std::string device_name_;
std::unique_ptr<ParallelDevice> parallel_device_;
};
absl::optional<std::vector<MaybeParallelTensorOwned>> ExecuteWithSpecialOps(
const ParallelDevice& parallel_device,
const std::string& parallel_device_name, TFE_Context* context,
std::vector<MaybeParallelTensorUnowned> inputs, const char* operation_name,
const TFE_OpAttrs* attributes, int expected_max_outputs,
TF_Status* status) {
absl::optional<std::vector<MaybeParallelTensorOwned>> result;
if (operation_name == std::string("TPUReplicatedInput")) {
if (inputs.size() != parallel_device.num_underlying_devices()) {
std::string message(absl::StrCat(
"The parallel device ", parallel_device_name, " expected ",
parallel_device.num_underlying_devices(),
" inputs to TPUReplicatedInput, but got ", inputs.size()));
TF_SetStatus(status, TF_INVALID_ARGUMENT, message.c_str());
return result;
}
std::vector<TensorHandlePtr> components;
components.reserve(inputs.size());
for (int i = 0; i < inputs.size(); ++i) {
if (absl::holds_alternative<ParallelTensor*>(inputs[i])) {
std::string message(absl::StrCat(
"Expected all inputs to TPUReplicatedInput to be non-parallel "
"TensorHandles. The input ",
i,
" was a parallel tensor (already "
"placed on the parallel device)."));
TF_SetStatus(status, TF_INVALID_ARGUMENT, message.c_str());
return result;
}
components.emplace_back(TFE_TensorHandleCopySharingTensor(
absl::get<TFE_TensorHandle*>(inputs[i]), status));
}
std::vector<MaybeParallelTensorOwned> result_content;
result_content.reserve(1);
result_content.push_back(ParallelTensor::FromTensorHandles(
parallel_device, std::move(components), status));
if (TF_GetCode(status) != TF_OK) return result;
result.emplace(std::move(result_content));
return result;
} else if (operation_name == std::string("TPUReplicatedOutput")) {
OpPtr op(TFE_NewOp(context, operation_name, status));
TFE_OpAddAttrs(op.get(), attributes);
int expected_outputs = TFE_OpGetOutputLength(op.get(), "outputs", status);
if (TF_GetCode(status) != TF_OK) return result;
if (expected_outputs != parallel_device.num_underlying_devices()) {
std::string message(absl::StrCat(
"The parallel device ", parallel_device_name, " expected ",
parallel_device.num_underlying_devices(),
" outputs for TPUReplicatedOutput, but got ", expected_outputs));
TF_SetStatus(status, TF_INVALID_ARGUMENT, message.c_str());
return result;
}
if (absl::holds_alternative<TFE_TensorHandle*>(inputs[0])) {
TF_SetStatus(status, TF_INVALID_ARGUMENT,
"Expected the input to "
"TPUReplicatedOutput to be a parallel tensor (placed on the "
"parallel device).");
return result;
}
ParallelTensor* t = absl::get<ParallelTensor*>(inputs[0]);
std::vector<MaybeParallelTensorOwned> outputs;
outputs.reserve(t->num_tensors());
for (int i = 0; i < t->num_tensors(); ++i) {
TensorHandlePtr this_output(
TFE_TensorHandleCopySharingTensor(t->tensor(i), status));
outputs.emplace_back(std::move(this_output));
if (TF_GetCode(status) != TF_OK) return result;
}
result.emplace(std::move(outputs));
return result;
}
std::vector<ParallelTensor*> parallel_inputs;
std::vector<std::unique_ptr<ParallelTensor>> implicitly_broadcast_tensors;
parallel_inputs.reserve(inputs.size());
implicitly_broadcast_tensors.reserve(inputs.size());
for (const auto& input : inputs) {
if (absl::holds_alternative<TFE_TensorHandle*>(input)) {
if (operation_name == std::string("_EagerConst")) {
std::unique_ptr<ParallelTensor> parallel_tensor(
parallel_device.CopyToParallelDevice(
context, absl::get<TFE_TensorHandle*>(input), status));
if (TF_GetCode(status) != TF_OK) return absl::nullopt;
parallel_inputs.push_back(parallel_tensor.get());
implicitly_broadcast_tensors.emplace_back(std::move(parallel_tensor));
} else {
TF_SetStatus(
status, TF_INVALID_ARGUMENT,
absl::StrCat(
"Got a non-parallel tensor ",
tensorflow::unwrap(absl::get<TFE_TensorHandle*>(input))
->DebugString(),
" as input to a parallel operation. First pack non-parallel "
"tensors for each device into a parallel tensor explicitly.")
.c_str());
return absl::nullopt;
}
} else {
parallel_inputs.push_back(absl::get<ParallelTensor*>(input));
}
}
absl::optional<std::vector<std::unique_ptr<ParallelTensor>>>
maybe_parallel_results(
parallel_device.Execute(context, parallel_inputs, operation_name,
attributes, expected_max_outputs, status));
if (!maybe_parallel_results.has_value()) return result;
std::vector<std::unique_ptr<ParallelTensor>> parallel_results(
std::move(maybe_parallel_results.value()));
std::vector<MaybeParallelTensorOwned> result_content;
result_content.reserve(parallel_results.size());
for (std::unique_ptr<ParallelTensor>& parallel_result : parallel_results) {
result_content.push_back(
MaybeParallelTensorOwned(std::move(parallel_result)));
}
result.emplace(std::move(result_content));
return result;
}
void ParallelTensorDeallocator(void* data) {
delete reinterpret_cast<ParallelTensor*>(data);
}
int ParallelTensorNumDims(void* data, TF_Status* status) {
const std::vector<int64_t>* shape;
Status s = reinterpret_cast<ParallelTensor*>(data)->Shape(&shape);
if (!s.ok()) {
tsl::Set_TF_Status_from_Status(status, s);
return -1;
}
return shape->size();
}
int64_t ParallelTensorDim(void* data, int dim_index, TF_Status* status) {
const std::vector<int64_t>* shape;
Status s = reinterpret_cast<ParallelTensor*>(data)->Shape(&shape);
if (!s.ok()) {
tsl::Set_TF_Status_from_Status(status, s);
return -1;
}
return (*shape)[dim_index];
}
TF_Buffer* ParallelTensorSummarize(void* data, TF_Status* status) {
ParallelTensor* parallel_tensor = reinterpret_cast<ParallelTensor*>(data);
std::string summary;
Status cpp_status = parallel_tensor->SummarizeValue(summary);
if (!cpp_status.ok()) {
tsl::Set_TF_Status_from_Status(status, cpp_status);
return nullptr;
}
return TF_NewBufferFromString(summary.data(), summary.size());
}
TensorHandlePtr ParallelTensorToTensorHandle(
const std::string& parallel_device_name, TFE_Context* context,
std::unique_ptr<ParallelTensor> t, TF_Status* status) {
ParallelTensor* t_released = t.release();
TFE_CustomDeviceTensorHandleMethods handle_methods;
handle_methods.num_dims = &ParallelTensorNumDims;
handle_methods.dim = &ParallelTensorDim;
handle_methods.deallocator = &ParallelTensorDeallocator;
handle_methods.summarize = &ParallelTensorSummarize;
return TensorHandlePtr(TFE_NewCustomDeviceTensorHandle(
context, parallel_device_name.c_str(), t_released->dtype(), t_released,
handle_methods, status));
}
TFE_TensorHandle* CopyToParallelDevice(TFE_Context* context,
TFE_TensorHandle* tensor,
TF_Status* status, void* device_info) {
TF_SetStatus(
status, TF_UNIMPLEMENTED,
absl::StrCat("Trying to copy a tensor ",
tensorflow::unwrap(tensor)->DebugString(),
" on to a parallel device. Pack non-parallel "
"tensors for each device into a parallel tensor explicitly.")
.c_str());
return nullptr;
}
TFE_TensorHandle* CopyTensorFromParallelDevice(TFE_Context* context,
TFE_TensorHandle* tensor,
const char* target_device_name,
TF_Status* status,
void* device_info) {
ParallelTensor* parallel_tensor = reinterpret_cast<ParallelTensor*>(
TFE_TensorHandleDevicePointer(tensor, status));
if (TF_GetCode(status) != TF_OK) return nullptr;
if (parallel_tensor->num_tensors() == 1) {
return TFE_TensorHandleCopySharingTensor(parallel_tensor->tensor(0),
status);
} else {
TF_SetStatus(
status, TF_UNIMPLEMENTED,
absl::StrCat(
"Trying to copy a tensor out of a parallel device. Since there "
"are multiple components to parallel tensors, they must be "
"unpacked explicitly.\n",
tensorflow::unwrap(tensor)->DebugString())
.c_str());
return nullptr;
}
}
void ParallelDeviceExecute(const TFE_Op* original_op, int* num_outputs,
TFE_TensorHandle** outputs, TF_Status* status,
void* device_info) {
const char* requested_placement = TFE_OpGetDevice(original_op, status);
if (*requested_placement == '\0') {
TF_SetStatus(
status, TF_INTERNAL,
"Ops must be placed on the parallel device explicitly, or their inputs "
"first un-packed. Got an un-placed op with an input placed on the "
"parallel device.");
return;
}
TFE_Context* context = TFE_OpGetContext(original_op, status);
if (TF_GetCode(status) != TF_OK) return;
const char* operation_name = TFE_OpGetName(original_op, status);
if (TF_GetCode(status) != TF_OK) return;
const TFE_OpAttrs* attributes = TFE_OpGetAttrs(original_op);
NamedParallelDevice* named_device =
reinterpret_cast<NamedParallelDevice*>(device_info);
std::vector<MaybeParallelTensorUnowned> typed_inputs;
int num_inputs = TFE_OpGetFlatInputCount(original_op, status);
if (TF_GetCode(status) != TF_OK) return;
typed_inputs.reserve(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
TFE_TensorHandle* input = TFE_OpGetFlatInput(original_op, i, status);
if (TF_GetCode(status) != TF_OK) return;
const char* tensor_handle_device =
TFE_TensorHandleDeviceName(input, status);
if (TF_GetCode(status) != TF_OK) return;
if (named_device->name() == tensor_handle_device) {
typed_inputs.emplace_back(reinterpret_cast<ParallelTensor*>(
TFE_TensorHandleDevicePointer(input, status)));
if (TF_GetCode(status) != TF_OK) return;
} else {
typed_inputs.emplace_back(input);
}
}
absl::optional<std::vector<MaybeParallelTensorOwned>> maybe_typed_outputs(
ExecuteWithSpecialOps(named_device->device(), named_device->name(),
context, std::move(typed_inputs), operation_name,
attributes, *num_outputs, status));
if (TF_GetCode(status) != TF_OK) return;
if (!maybe_typed_outputs.has_value()) {
TF_SetStatus(status, TF_INTERNAL, "OK status but no value was returned.");
return;
}
std::vector<MaybeParallelTensorOwned> typed_outputs(
std::move(maybe_typed_outputs.value()));
if (typed_outputs.size() > *num_outputs) {
TF_SetStatus(status, TF_INTERNAL,
"The allocated output buffer was too small.");
return;
}
for (int i = 0; i < typed_outputs.size(); ++i) {
MaybeParallelTensorOwned typed_output(std::move(typed_outputs[i]));
if (absl::holds_alternative<TensorHandlePtr>(typed_output)) {
outputs[i] = absl::get<TensorHandlePtr>(typed_output).release();
} else {
outputs[i] = ParallelTensorToTensorHandle(
named_device->name(), context,
std::move(absl::get<std::unique_ptr<ParallelTensor>>(
typed_output)),
status)
.release();
if (TF_GetCode(status) != TF_OK) return;
}
}
*num_outputs = typed_outputs.size();
}
void DeleteParallelDevice(void* device_info) {
delete reinterpret_cast<NamedParallelDevice*>(device_info);
}
}
void AllocateParallelDevice(const char* device_name,
const char* const* underlying_devices,
int num_underlying_devices,
TFE_CustomDevice* device, void** device_info) {
device->copy_tensor_to_device = &CopyToParallelDevice;
device->copy_tensor_from_device = &CopyTensorFromParallelDevice;
device->delete_device = &DeleteParallelDevice;
device->execute = &ParallelDeviceExecute;
std::vector<std::string> underlying_devices_vector;
underlying_devices_vector.reserve(num_underlying_devices);
for (int device_index = 0; device_index < num_underlying_devices;
++device_index) {
underlying_devices_vector.push_back(underlying_devices[device_index]);
}
std::unique_ptr<ParallelDevice> parallel_device(
new ParallelDevice(underlying_devices_vector));
*device_info =
new NamedParallelDevice{device_name, std::move(parallel_device)};
}
}
} | #include "tensorflow/c/eager/parallel_device/parallel_device.h"
#include <array>
#include "tensorflow/c/c_api.h"
#include "tensorflow/c/c_api_experimental.h"
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#include "tensorflow/c/eager/parallel_device/parallel_device_testlib.h"
#include "tensorflow/c/eager/tfe_tensorhandle_internal.h"
#include "tensorflow/c/tf_status_internal.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace parallel_device {
using ::testing::HasSubstr;
TEST(PARALLEL_DEVICE, TestBasicCPU) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true, 2),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
BasicTestsForTwoDevices(context.get(),
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:1");
}
TEST(PARALLEL_DEVICE, TestBasicCPUAliased) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
BasicTestsForTwoDevices(context.get(),
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:0");
}
TEST(PARALLEL_DEVICE, TestBasicTPUAliased) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::unique_ptr<TF_DeviceList, decltype(&TF_DeleteDeviceList)> devices(
TFE_ContextListDevices(context.get(), status.get()), TF_DeleteDeviceList);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
bool has_tpu = false;
for (int device_index = 0; device_index < TF_DeviceListCount(devices.get());
++device_index) {
std::string device_type =
TF_DeviceListType(devices.get(), device_index, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
if (device_type == "TPU") {
has_tpu = true;
break;
}
}
if (has_tpu) {
BasicTestsForTwoDevices(context.get(),
"/job:localhost/replica:0/task:0/device:TPU:0",
"/job:localhost/replica:0/task:0/device:TPU:0");
}
}
TEST(PARALLEL_DEVICE, TestExplicitCopies) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true, 2),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
const char* device_name = "/job:localhost/replica:0/task:0/device:CUSTOM:0";
const char* first_device_name =
"/job:localhost/replica:0/task:0/device:CPU:0";
const char* second_device_name =
"/job:localhost/replica:0/task:0/device:CPU:1";
std::array<const char*, 2> underlying_devices{first_device_name,
second_device_name};
RegisterParallelDevice(context.get(), device_name, underlying_devices,
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr cpu_value(FloatTensorHandle(3., status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr failed_copy_on_result(TFE_TensorHandleCopyToDevice(
cpu_value.get(), context.get(), device_name, status.get()));
EXPECT_EQ(TF_GetCode(status.get()), TF_UNIMPLEMENTED);
std::array<TFE_TensorHandle*, 2> components{cpu_value.get(), cpu_value.get()};
TensorHandlePtr device_value = CreatePerDeviceValues(
context.get(), components, device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr copy_off(TFE_TensorHandleCopyToDevice(
device_value.get(), context.get(), first_device_name, status.get()));
EXPECT_EQ(TF_GetCode(status.get()), TF_UNIMPLEMENTED);
}
TEST(PARALLEL_DEVICE, TestDifferentShapes) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true, 2),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
const char* device_name = "/job:localhost/replica:0/task:0/device:CUSTOM:0";
std::array<const char*, 2> underlying_devices{
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:1"};
RegisterParallelDevice(context.get(), device_name, underlying_devices,
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::vector<float> size_two_value{1., 2.};
std::vector<float> size_three_value{1., 2., 3.};
TensorHandlePtr size_two(
VectorFloatTensorHandle(size_two_value, status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr size_three(
VectorFloatTensorHandle(size_three_value, status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TFE_TensorHandle*, 2> components{size_two.get(), size_three.get()};
TensorHandlePtr combined_value = CreatePerDeviceValues(
context.get(), components, device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
int num_axes = TFE_TensorHandleNumDims(combined_value.get(), status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
EXPECT_EQ(num_axes, 1);
}
TEST(PARALLEL_DEVICE, TestNestedParallelDevices) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true, 3),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
const char* first_device_name =
"/job:localhost/replica:0/task:0/device:CUSTOM:0";
std::array<const char*, 2> first_underlying_devices{
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:1"};
RegisterParallelDevice(context.get(), first_device_name,
first_underlying_devices, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
const char* second_device_name =
"/job:localhost/replica:0/task:0/device:CUSTOM:1";
std::array<const char*, 2> second_underlying_devices{
"/job:localhost/replica:0/task:0/device:CUSTOM:0",
"/job:localhost/replica:0/task:0/device:CPU:2"};
RegisterParallelDevice(context.get(), second_device_name,
second_underlying_devices, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr value_one(FloatTensorHandle(1., status.get()));
TensorHandlePtr value_two(FloatTensorHandle(2., status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TFE_TensorHandle*, 2> components{value_one.get(), value_two.get()};
TensorHandlePtr first_combined_value = CreatePerDeviceValues(
context.get(), components, first_device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr value_three(FloatTensorHandle(3., status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
components[0] = first_combined_value.get();
components[1] = value_three.get();
TensorHandlePtr second_combined_value = CreatePerDeviceValues(
context.get(), components, second_device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr negative_one_cpu(FloatTensorHandle(3., status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
components[0] = negative_one_cpu.get();
components[1] = negative_one_cpu.get();
TensorHandlePtr first_negative_one = CreatePerDeviceValues(
context.get(), components, first_device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
components[0] = first_negative_one.get();
components[1] = negative_one_cpu.get();
TensorHandlePtr second_negative_one = CreatePerDeviceValues(
context.get(), components, second_device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr multiply_result(
Multiply(context.get(), second_combined_value.get(),
second_negative_one.get(), status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TensorHandlePtr, 2> second_components;
ExtractPerDeviceValues(context.get(), multiply_result.get(),
&second_components, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
ExpectScalarEq<float>(second_components[1].get(), 9.);
std::string first_device = TFE_TensorHandleBackingDeviceName(
second_components[0].get(), status.get());
ASSERT_EQ(second_underlying_devices[0], first_device);
std::string second_device = TFE_TensorHandleBackingDeviceName(
second_components[1].get(), status.get());
ASSERT_EQ(second_underlying_devices[1], second_device);
std::array<TensorHandlePtr, 2> first_components;
ExtractPerDeviceValues(context.get(), second_components[0].get(),
&first_components, status.get());
ExpectScalarEq<float>(first_components[0].get(), 3.);
ExpectScalarEq<float>(first_components[1].get(), 6.);
first_device = TFE_TensorHandleBackingDeviceName(first_components[0].get(),
status.get());
ASSERT_EQ(first_underlying_devices[0], first_device);
second_device = TFE_TensorHandleBackingDeviceName(first_components[1].get(),
status.get());
ASSERT_EQ(first_underlying_devices[1], second_device);
}
TEST(PARALLEL_DEVICE, TestInvalidPacking) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
const char* device_name = "/job:localhost/replica:0/task:0/device:CUSTOM:0";
std::array<const char*, 1> underlying_devices{
"/job:localhost/replica:0/task:0/device:CPU:0"};
RegisterParallelDevice(context.get(), device_name, underlying_devices,
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr value_one(FloatTensorHandle(1., status.get()));
TensorHandlePtr value_two(FloatTensorHandle(2., status.get()));
{
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TFE_TensorHandle*, 2> components{value_one.get(),
value_two.get()};
TensorHandlePtr combined_value = CreatePerDeviceValues(
context.get(), components, device_name, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_INVALID_ARGUMENT)
<< TF_Message(status.get());
}
{
std::array<TFE_TensorHandle*, 1> correct_components{value_one.get()};
TensorHandlePtr combined_value = CreatePerDeviceValues(
context.get(), correct_components, device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TensorHandlePtr, 2> incorrect_components;
ExtractPerDeviceValues(context.get(), combined_value.get(),
&incorrect_components, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_INVALID_ARGUMENT)
<< TF_Message(status.get());
}
{
std::array<TFE_TensorHandle*, 1> correct_components{value_one.get()};
TensorHandlePtr combined_value = CreatePerDeviceValues(
context.get(), correct_components, device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TFE_TensorHandle*, 1> incorrect_components{combined_value.get()};
TensorHandlePtr recombined_value = CreatePerDeviceValues(
context.get(), incorrect_components, device_name, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_INVALID_ARGUMENT)
<< TF_Message(status.get());
}
{
std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> op(
TFE_NewOp(context.get(), "TPUReplicatedOutput", status.get()),
TFE_DeleteOp);
if (TF_GetCode(status.get()) != TF_OK) return;
TFE_OpSetAttrInt(op.get(), "num_replicas", 1);
TFE_OpAddInput(op.get(), value_one.get(), status.get());
if (TF_GetCode(status.get()) != TF_OK) return;
TFE_OpSetDevice(op.get(), device_name, status.get());
if (TF_GetCode(status.get()) != TF_OK) return;
TFE_TensorHandle* result_handles;
int num_retvals = 1;
TFE_Execute(op.get(), &result_handles, &num_retvals, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_INVALID_ARGUMENT)
<< TF_Message(status.get());
}
}
TensorHandlePtr CollectiveSum(TFE_Context* context, TFE_TensorHandle* input,
int group_size, TF_Status* status) {
std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> op(
TFE_NewOp(context, "CollectiveReduce", status), TFE_DeleteOp);
if (TF_GetCode(status) != TF_OK) return nullptr;
const char* device = TFE_TensorHandleDeviceName(input, status);
if (TF_GetCode(status) != TF_OK) return nullptr;
TFE_OpSetDevice(op.get(), device, status);
if (TF_GetCode(status) != TF_OK) return nullptr;
TFE_OpSetAttrType(op.get(), "T", TFE_TensorHandleDataType(input));
TFE_OpSetAttrInt(op.get(), "group_size", group_size);
TFE_OpSetAttrInt(op.get(), "group_key", 0);
TFE_OpSetAttrInt(op.get(), "instance_key", 0);
const std::string merge_op("Add");
TFE_OpSetAttrString(op.get(), "merge_op", merge_op.c_str(),
merge_op.length());
const std::string final_op("Id");
TFE_OpSetAttrString(op.get(), "final_op", final_op.c_str(),
final_op.length());
TFE_OpSetAttrIntList(op.get(), "subdiv_offsets", nullptr, 0);
TFE_OpAddInput(op.get(), input, status);
if (TF_GetCode(status) != TF_OK) return nullptr;
TFE_TensorHandle* result_handle;
int num_retvals = 1;
TFE_Execute(op.get(), &result_handle, &num_retvals, status);
if (TF_GetCode(status) != TF_OK) return nullptr;
return TensorHandlePtr(result_handle);
}
void TestCollective(bool async) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
TFE_ContextOptionsSetAsync(opts.get(), async);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true, 2),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
const char* device_name = "/job:localhost/replica:0/task:0/device:CUSTOM:0";
std::array<const char*, 2> underlying_devices{
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:1"};
RegisterParallelDevice(context.get(), device_name, underlying_devices,
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr value_one(FloatTensorHandle(1., status.get()));
TensorHandlePtr value_two(FloatTensorHandle(2., status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TFE_TensorHandle*, 2> components{value_one.get(), value_two.get()};
TensorHandlePtr parallel_value = CreatePerDeviceValues(
context.get(), components, device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr reduced(
CollectiveSum(context.get(), parallel_value.get(), 2, status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TensorHandlePtr, 2> result_components;
ExtractPerDeviceValues(context.get(), reduced.get(), &result_components,
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
ExpectScalarEq<float>(result_components[0].get(), 3.);
ExpectScalarEq<float>(result_components[1].get(), 3.);
}
TEST(PARALLEL_DEVICE, TestCollectiveSync) { TestCollective(false); }
TEST(PARALLEL_DEVICE, TestCollectiveAsync) { TestCollective(true); }
void RegisterCollectiveMulFunction(TFE_Context* context,
const char* function_name, int group_size,
TF_Status* status) {
std::unique_ptr<TF_Graph, decltype(&TF_DeleteGraph)> body(TF_NewGraph(),
TF_DeleteGraph);
TF_OperationDescription* placeholder_desc =
TF_NewOperation(body.get(), "Placeholder", "Placeholder");
TF_SetAttrType(placeholder_desc, "dtype", TF_FLOAT);
TF_Operation* placeholder_op = TF_FinishOperation(placeholder_desc, status);
if (TF_GetCode(status) != TF_OK) return;
TF_Output x{placeholder_op, 0};
TF_OperationDescription* reduce_desc =
TF_NewOperation(body.get(), "CollectiveReduce", "CollectiveReduce");
TF_SetAttrType(reduce_desc, "T", TF_FLOAT);
TF_SetAttrInt(reduce_desc, "group_size", group_size);
TF_SetAttrInt(reduce_desc, "group_key", 0);
TF_SetAttrInt(reduce_desc, "instance_key", 0);
const std::string merge_op("Mul");
TF_SetAttrString(reduce_desc, "merge_op", merge_op.c_str(),
merge_op.length());
const std::string final_op("Id");
TF_SetAttrString(reduce_desc, "final_op", final_op.c_str(),
final_op.length());
TF_SetAttrIntList(reduce_desc, "subdiv_offsets", nullptr, 0);
TF_AddInput(reduce_desc, x);
TF_Operation* reduce_op = TF_FinishOperation(reduce_desc, status);
if (TF_GetCode(status) != TF_OK) return;
TF_Operation* operations[]{placeholder_op, reduce_op};
TF_Output y{reduce_op, 0};
const char* output_name = "y";
std::unique_ptr<TF_Function, decltype(&TF_DeleteFunction)> function(
TF_GraphToFunction(
body.get(), function_name,
0, 2,
operations, 1, &x,
1, &y, &output_name,
nullptr, "", status),
TF_DeleteFunction);
if (TF_GetCode(status) != TF_OK) return;
TFE_ContextAddFunction(context, function.get(), status);
}
TEST(PARALLEL_DEVICE, TestFunction) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true, 2),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
const char* device_name = "/job:localhost/replica:0/task:0/device:CUSTOM:0";
std::array<const char*, 2> underlying_devices{
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:1"};
RegisterParallelDevice(context.get(), device_name, underlying_devices,
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
const char* function_name = "test_reduce_mul";
RegisterCollectiveMulFunction(context.get(), function_name, 2, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr value_one(FloatTensorHandle(7., status.get()));
TensorHandlePtr value_two(FloatTensorHandle(9., status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TFE_TensorHandle*, 2> components{value_one.get(), value_two.get()};
TensorHandlePtr parallel_value = CreatePerDeviceValues(
context.get(), components, device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> op(
TFE_NewOp(context.get(), function_name, status.get()), TFE_DeleteOp);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TFE_OpSetDevice(op.get(), device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TFE_OpAddInput(op.get(), parallel_value.get(), status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TFE_TensorHandle* raw_result_handle;
int num_retvals = 1;
TFE_Execute(op.get(), &raw_result_handle, &num_retvals, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr reduced(raw_result_handle);
std::array<TensorHandlePtr, 2> result_components;
ExtractPerDeviceValues(context.get(), reduced.get(), &result_components,
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
ExpectScalarEq<float>(result_components[0].get(), 7. * 9.);
ExpectScalarEq<float>(result_components[1].get(), 7. * 9.);
std::string first_device = TFE_TensorHandleBackingDeviceName(
result_components[0].get(), status.get());
ASSERT_EQ(underlying_devices[0], first_device);
std::string second_device = TFE_TensorHandleBackingDeviceName(
result_components[1].get(), status.get());
ASSERT_EQ(underlying_devices[1], second_device);
}
TEST(PARALLEL_DEVICE, TestSummaryString) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true, 2),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
const char* device_name = "/job:localhost/replica:0/task:0/device:CUSTOM:0";
std::array<const char*, 2> underlying_devices{
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:1"};
RegisterParallelDevice(context.get(), device_name, underlying_devices,
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr cpu_value(FloatTensorHandle(3., status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TFE_TensorHandle*, 2> components{cpu_value.get(), cpu_value.get()};
TensorHandlePtr device_value = CreatePerDeviceValues(
context.get(), components, device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
ImmediateExecutionTensorHandle* unwrapped_handle =
tensorflow::unwrap(device_value.get());
std::string summarized;
TF_ASSERT_OK(unwrapped_handle->SummarizeValue(summarized));
EXPECT_THAT(summarized, HasSubstr("\"CPU:0\": 3"));
}
}
} |
1,273 | cpp | tensorflow/tensorflow | parallel_device_lib | tensorflow/c/eager/parallel_device/parallel_device_lib.cc | tensorflow/c/eager/parallel_device/parallel_device_lib_test.cc | #ifndef TENSORFLOW_C_EAGER_PARALLEL_DEVICE_PARALLEL_DEVICE_LIB_H_
#define TENSORFLOW_C_EAGER_PARALLEL_DEVICE_PARALLEL_DEVICE_LIB_H_
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "absl/types/variant.h"
#include "tensorflow/c/c_api.h"
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/c/eager/tfe_op_internal.h"
#include "tensorflow/c/safe_ptr.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
namespace tensorflow {
namespace parallel_device {
using TensorHandlePtr = tensorflow::Safe_TFE_TensorHandlePtr;
class ParallelTensor;
class DeviceThread;
class ParallelDevice {
public:
explicit ParallelDevice(const std::vector<std::string>& devices,
bool is_async = false, int in_flight_nodes_limit = 0);
~ParallelDevice();
std::unique_ptr<ParallelTensor> CopyToParallelDevice(TFE_Context* context,
TFE_TensorHandle* tensor,
TF_Status* status) const;
template <typename DataType>
std::unique_ptr<ParallelTensor> ScalarsFromSequence(
absl::Span<const DataType> values, TFE_Context* context,
TF_Status* status) const;
std::unique_ptr<ParallelTensor> DeviceIDs(TFE_Context* context,
TF_Status* status) const;
size_t num_underlying_devices() const { return underlying_devices_.size(); }
const std::vector<std::string>& underlying_devices() const {
return underlying_devices_;
}
absl::optional<std::vector<std::unique_ptr<ParallelTensor>>> Execute(
TFE_Context* context, const std::vector<ParallelTensor*>& inputs,
const char* operation_name, const TFE_OpAttrs* attributes,
int expected_max_outputs, TF_Status* status) const;
void StartExecute(TFE_Context* context,
const std::vector<ParallelTensor*>& inputs,
const char* operation_name, const TFE_OpAttrs* attributes,
int expected_max_outputs,
CancellationManager& cancellation_manager,
std::optional<int64_t> step_id = std::nullopt) const;
void StartExecute(TFE_Context* context,
const std::vector<std::vector<TFE_TensorHandle*>>& inputs,
const char* operation_name, const TFE_OpAttrs* attributes,
int expected_max_outputs,
CancellationManager& cancellation_manager,
std::optional<int64_t> step_id = std::nullopt) const;
absl::optional<std::vector<std::unique_ptr<ParallelTensor>>> Join(
const std::vector<PartialTensorShape>& expected_output_shapes,
TF_Status* status) const;
void AsyncWait(TFE_Context* context, TF_Status* status) const;
std::vector<std::string> SummarizeDeviceNames() const;
private:
const std::vector<std::string> underlying_devices_;
std::vector<std::unique_ptr<DeviceThread>> device_threads_;
mutable std::unique_ptr<CancellationManager> default_cancellation_manager_;
};
class ParallelTensor {
public:
static std::unique_ptr<ParallelTensor> FromTensorHandles(
const ParallelDevice& parallel_device,
std::vector<TensorHandlePtr> components, TF_Status* status);
static std::unique_ptr<ParallelTensor> FromTensorHandles(
const ParallelDevice& parallel_device,
std::vector<TensorHandlePtr> components, absl::Span<const int64_t> shape,
TF_Status* status);
size_t num_tensors() const { return tensors_.size(); }
TFE_TensorHandle* tensor(size_t index) const { return tensors_[index].get(); }
Status Shape(const std::vector<int64_t>** shape) const;
TF_DataType dtype() const { return dtype_; }
Status SummarizeValue(std::string& summary);
std::vector<TensorHandlePtr> release_tensors() { return std::move(tensors_); }
std::vector<TFE_TensorHandle*> tensors() const {
std::vector<TFE_TensorHandle*> result;
result.reserve(tensors_.size());
for (const TensorHandlePtr& tensor : tensors_) {
result.emplace_back(tensor.get());
}
return result;
}
private:
ParallelTensor(const ParallelDevice& device,
std::vector<TensorHandlePtr> tensors,
absl::Span<const int64_t> shape, const TF_DataType dtype)
: device_(device),
tensors_(std::move(tensors)),
shape_(std::vector<int64_t>(shape.begin(), shape.end())),
dtype_(dtype) {}
ParallelTensor(const ParallelDevice& device,
std::vector<TensorHandlePtr> tensors, const TF_DataType dtype)
: device_(device),
tensors_(std::move(tensors)),
shape_(absl::nullopt),
dtype_(dtype) {}
const ParallelDevice& device_;
std::vector<TensorHandlePtr> tensors_;
mutable absl::optional<std::vector<int64_t>> shape_;
const TF_DataType dtype_;
};
template <typename DataType>
std::unique_ptr<ParallelTensor> ParallelDevice::ScalarsFromSequence(
absl::Span<DataType const> values, TFE_Context* context,
TF_Status* status) const {
std::vector<TensorHandlePtr> components;
components.reserve(underlying_devices_.size());
if (values.size() != num_underlying_devices()) {
TF_SetStatus(
status, TF_INVALID_ARGUMENT,
"Number of values did not match number of underlying devices.");
return nullptr;
}
TF_DataType datatype_enum(
static_cast<TF_DataType>(DataTypeToEnum<DataType>().value));
for (int device_index = 0; device_index < num_underlying_devices();
++device_index) {
auto device_value = absl::make_unique<DataType>();
*device_value = values[device_index];
std::unique_ptr<TF_Tensor, decltype(&TF_DeleteTensor)> tensor(
TF_NewTensor(
datatype_enum, nullptr, 0,
device_value.release(), sizeof(DataType),
[](void* data, size_t, void* arg) {
delete reinterpret_cast<DataType*>(data);
},
nullptr),
TF_DeleteTensor);
std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> const_op(
TFE_NewOp(context, "Const", status), TFE_DeleteOp);
if (TF_GetCode(status) != TF_OK) return nullptr;
TFE_OpSetDevice(const_op.get(), underlying_devices_[device_index].c_str(),
status);
if (TF_GetCode(status) != TF_OK) return nullptr;
TFE_OpSetAttrTensor(const_op.get(), "value", tensor.get(), status);
if (TF_GetCode(status) != TF_OK) return nullptr;
TFE_OpSetAttrType(const_op.get(), "dtype", datatype_enum);
TFE_TensorHandle* device_handle;
int num_outputs = 1;
TFE_Execute(const_op.get(), &device_handle, &num_outputs, status);
if (TF_GetCode(status) != TF_OK) return nullptr;
components.emplace_back(device_handle);
}
return ParallelTensor::FromTensorHandles(*this, std::move(components),
status);
}
}
}
#endif
#include "tensorflow/c/eager/parallel_device/parallel_device_lib.h"
#include <string>
#include <utility>
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/c/eager/tfe_cancellation_manager_internal.h"
#include "tensorflow/c/eager/tfe_tensorhandle_internal.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_status_internal.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace parallel_device {
namespace {
class OpDeleter {
public:
void operator()(TFE_Op* to_delete) const { TFE_DeleteOp(to_delete); }
};
using OpPtr = std::unique_ptr<TFE_Op, OpDeleter>;
class StatusDeleter {
public:
void operator()(TF_Status* to_delete) const { TF_DeleteStatus(to_delete); }
};
using StatusPtr = std::unique_ptr<TF_Status, StatusDeleter>;
class ExecutorDeleter {
public:
void operator()(TFE_Executor* to_delete) const {
TFE_DeleteExecutor(to_delete);
}
};
using ExecutorPtr = std::unique_ptr<TFE_Executor, ExecutorDeleter>;
}
class DeviceThread {
public:
explicit DeviceThread(const std::string& device, const bool is_async,
const int in_flight_nodes_limit)
: status_(TF_NewStatus()),
device_(device),
executor_(
TFE_NewExecutor(is_async, true,
in_flight_nodes_limit)),
op_(nullptr),
thread_(tensorflow::Env::Default()->StartThread(
tensorflow::ThreadOptions(), "parallel_device_execute",
std::bind(&DeviceThread::Run, this))) {}
~DeviceThread();
void StartExecute(TFE_Context* context, const char* operation_name,
std::vector<TFE_TensorHandle*> inputs,
const TFE_OpAttrs* attributes, int expected_max_outputs,
CancellationManager& cancellation_manager,
absl::optional<int64_t> step_id = absl::nullopt);
std::vector<TensorHandlePtr> Join(TF_Status* status);
void AsyncWait(TF_Status* status);
private:
void Run();
void Execute(TFE_Context* context, const char* operation_name,
std::vector<TFE_TensorHandle*> inputs,
const TFE_OpAttrs* attributes, int expected_max_outputs,
std::vector<TensorHandlePtr>* outputs, TF_Status* status) const
TF_EXCLUSIVE_LOCKS_REQUIRED(execution_mutex_);
enum class ExecutionState {
kReadyToExecute,
kHasResult,
kIdle,
kShuttingDown,
};
tensorflow::mutex execution_mutex_;
ExecutionState execution_state_ TF_GUARDED_BY(execution_mutex_) =
ExecutionState::kIdle;
tensorflow::condition_variable start_execute_;
tensorflow::condition_variable finished_execute_;
tensorflow::condition_variable finished_join_;
TFE_Context* context_ TF_GUARDED_BY(execution_mutex_);
const char* operation_name_ TF_GUARDED_BY(execution_mutex_);
absl::optional<int64_t> step_id_ TF_GUARDED_BY(execution_mutex_) =
absl::nullopt;
std::vector<TFE_TensorHandle*> op_inputs_ TF_GUARDED_BY(execution_mutex_);
const TFE_OpAttrs* attributes_ TF_GUARDED_BY(execution_mutex_);
int expected_max_outputs_ TF_GUARDED_BY(execution_mutex_);
CancellationManager* cancellation_manager_ TF_GUARDED_BY(execution_mutex_);
std::vector<TensorHandlePtr> op_outputs_ TF_GUARDED_BY(execution_mutex_);
StatusPtr status_ TF_GUARDED_BY(execution_mutex_);
const std::string device_;
ExecutorPtr executor_ TF_GUARDED_BY(execution_mutex_);
mutable OpPtr op_ TF_GUARDED_BY(execution_mutex_);
std::unique_ptr<Thread> thread_;
};
DeviceThread::~DeviceThread() {
{
tensorflow::mutex_lock l(execution_mutex_);
execution_state_ = ExecutionState::kShuttingDown;
}
start_execute_.notify_one();
}
void DeviceThread::AsyncWait(TF_Status* status) {
tensorflow::mutex_lock l(execution_mutex_);
TFE_ExecutorWaitForAllPendingNodes(executor_.get(), status);
TFE_ExecutorClearError(executor_.get());
}
void DeviceThread::Run() {
while (true) {
{
tensorflow::mutex_lock l(execution_mutex_);
while (execution_state_ == ExecutionState::kIdle ||
execution_state_ == ExecutionState::kHasResult) {
start_execute_.wait(l);
}
if (execution_state_ == ExecutionState::kShuttingDown) {
return;
} else if (execution_state_ == ExecutionState::kReadyToExecute) {
op_outputs_ = std::vector<TensorHandlePtr>();
Execute(context_, operation_name_, std::move(op_inputs_), attributes_,
expected_max_outputs_, &op_outputs_, status_.get());
execution_state_ = ExecutionState::kHasResult;
}
}
finished_execute_.notify_one();
}
}
void DeviceThread::StartExecute(TFE_Context* context,
const char* operation_name,
std::vector<TFE_TensorHandle*> inputs,
const TFE_OpAttrs* attributes,
int expected_max_outputs,
CancellationManager& cancellation_manager,
absl::optional<int64_t> step_id) {
{
tensorflow::mutex_lock l(execution_mutex_);
while (execution_state_ != ExecutionState::kIdle) {
finished_join_.wait(l);
}
context_ = context;
operation_name_ = operation_name;
step_id_ = step_id;
op_inputs_ = inputs;
attributes_ = attributes;
expected_max_outputs_ = expected_max_outputs;
cancellation_manager_ = &cancellation_manager;
execution_state_ = ExecutionState::kReadyToExecute;
}
start_execute_.notify_one();
}
std::vector<TensorHandlePtr> DeviceThread::Join(TF_Status* status) {
std::vector<TensorHandlePtr> result;
{
tensorflow::mutex_lock l(execution_mutex_);
while (execution_state_ != ExecutionState::kHasResult) {
finished_execute_.wait(l);
}
if (TF_GetCode(status_.get()) != TF_OK) {
TF_SetStatus(status, TF_GetCode(status_.get()),
TF_Message(status_.get()));
TF_SetStatus(status_.get(), TF_OK, "");
}
cancellation_manager_ = nullptr;
execution_state_ = ExecutionState::kIdle;
result = std::move(op_outputs_);
}
finished_join_.notify_one();
return result;
}
void DeviceThread::Execute(TFE_Context* context, const char* operation_name,
std::vector<TFE_TensorHandle*> inputs,
const TFE_OpAttrs* attributes,
int expected_max_outputs,
std::vector<TensorHandlePtr>* outputs,
TF_Status* status) const {
if (op_ == nullptr) {
TFE_ContextSetExecutorForThread(context, executor_.get());
op_.reset(TFE_NewOp(context, operation_name, status));
if (TF_GetCode(status) != TF_OK) return;
TFE_OpSetDevice(op_.get(), device_.c_str(), status);
if (TF_GetCode(status) != TF_OK) return;
} else {
TFE_OpReset(op_.get(), operation_name, device_.c_str(), status);
if (TF_GetCode(status) != TF_OK) return;
}
TFE_OpAddAttrs(op_.get(), attributes);
for (int input_index = 0; input_index < inputs.size(); ++input_index) {
TFE_OpAddInput(op_.get(), inputs[input_index], status);
if (TF_GetCode(status) != TF_OK) return;
}
std::vector<TFE_TensorHandle*> unwrapped_results(expected_max_outputs);
int real_num_outputs = expected_max_outputs;
TFE_OpSetCancellationManager(op_.get(), wrap(cancellation_manager_), status);
if (TF_GetCode(status) != TF_OK) return;
if (step_id_.has_value()) {
tensorflow::unwrap(op_.get())->SetStepId(step_id_.value());
}
TFE_Execute(op_.get(), unwrapped_results.data(), &real_num_outputs, status);
if (TF_GetCode(status) != TF_OK) {
cancellation_manager_->StartCancel();
return;
}
unwrapped_results.resize(real_num_outputs);
outputs->reserve(real_num_outputs);
for (TFE_TensorHandle* unwrapped_result : unwrapped_results) {
outputs->emplace_back(unwrapped_result);
}
}
ParallelDevice::ParallelDevice(const std::vector<std::string>& devices,
bool is_async, int in_flight_nodes_limit)
: underlying_devices_(devices),
default_cancellation_manager_(absl::make_unique<CancellationManager>()) {
device_threads_.reserve(devices.size());
for (int device_index = 0; device_index < devices.size(); ++device_index) {
device_threads_.emplace_back(new DeviceThread(
devices[device_index].c_str(), is_async, in_flight_nodes_limit));
}
}
ParallelDevice::~ParallelDevice() = default;
std::unique_ptr<ParallelTensor> ParallelDevice::CopyToParallelDevice(
TFE_Context* context, TFE_TensorHandle* tensor, TF_Status* status) const {
std::vector<TensorHandlePtr> components;
components.reserve(underlying_devices_.size());
for (const std::string& underlying_device_name : underlying_devices_) {
TFE_TensorHandle* t = TFE_TensorHandleCopyToDevice(
tensor, context, underlying_device_name.c_str(), status);
if (TF_GetCode(status) != TF_OK) return nullptr;
components.emplace_back(t);
}
return ParallelTensor::FromTensorHandles(*this, std::move(components),
status);
}
std::unique_ptr<ParallelTensor> ParallelDevice::DeviceIDs(
TFE_Context* context, TF_Status* status) const {
std::vector<int32_t> ids;
ids.reserve(num_underlying_devices());
for (int i = 0; i < num_underlying_devices(); ++i) {
ids.push_back(i);
}
return ScalarsFromSequence<int32_t>(ids, context, status);
}
absl::optional<std::vector<std::unique_ptr<ParallelTensor>>>
ParallelDevice::Execute(TFE_Context* context,
const std::vector<ParallelTensor*>& inputs,
const char* operation_name,
const TFE_OpAttrs* attributes, int expected_max_outputs,
TF_Status* status) const {
std::vector<PartialTensorShape> expected_output_shapes(expected_max_outputs);
StartExecute(context, inputs, operation_name, attributes,
expected_max_outputs, *default_cancellation_manager_);
auto result = Join(expected_output_shapes, status);
if (TF_GetCode(status) != TF_OK) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> await_status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextAsyncWait(context, await_status.get());
default_cancellation_manager_ = absl::make_unique<CancellationManager>();
}
return result;
}
void ParallelDevice::StartExecute(TFE_Context* context,
const std::vector<ParallelTensor*>& inputs,
const char* operation_name,
const TFE_OpAttrs* attributes,
int expected_max_outputs,
CancellationManager& cancellation_manager,
absl::optional<int64_t> step_id) const {
for (int device_index = 0; device_index < underlying_devices_.size();
++device_index) {
DeviceThread* device_thread = device_threads_[device_index].get();
std::vector<TFE_TensorHandle*> device_inputs;
device_inputs.reserve(inputs.size());
for (int input_index = 0; input_index < inputs.size(); ++input_index) {
device_inputs.push_back(inputs[input_index]->tensor(device_index));
}
device_thread->StartExecute(
context, operation_name, std::move(device_inputs), attributes,
expected_max_outputs, cancellation_manager, step_id);
}
}
void ParallelDevice::StartExecute(
TFE_Context* context,
const std::vector<std::vector<TFE_TensorHandle*>>& inputs,
const char* operation_name, const TFE_OpAttrs* attributes,
int expected_max_outputs, CancellationManager& cancellation_manager,
absl::optional<int64_t> step_id) const {
for (int device_index = 0; device_index < underlying_devices_.size();
++device_index) {
DeviceThread* device_thread = device_threads_[device_index].get();
std::vector<TFE_TensorHandle*> device_inputs;
device_inputs.reserve(inputs.size());
for (int input_index = 0; input_index < inputs.size(); ++input_index) {
device_inputs.push_back(inputs[input_index][device_index]);
}
device_thread->StartExecute(
context, operation_name, std::move(device_inputs), attributes,
expected_max_outputs, cancellation_manager, step_id);
}
}
void ParallelDevice::AsyncWait(TFE_Context* context, TF_Status* status) const {
StatusPtr first_bad_status(nullptr);
for (const auto& dt : device_threads_) {
StatusPtr async_wait_status(TF_NewStatus());
dt->AsyncWait(async_wait_status.get());
if (TF_GetCode(async_wait_status.get()) != TF_OK &&
(first_bad_status == nullptr ||
TF_GetCode(first_bad_status.get()) == TF_CANCELLED)) {
first_bad_status.reset(TF_NewStatus());
TF_SetStatus(first_bad_status.get(), TF_GetCode(async_wait_status.get()),
TF_Message(async_wait_status.get()));
}
}
if (first_bad_status != nullptr) {
TF_SetStatus(status, TF_GetCode(first_bad_status.get()),
TF_Message(first_bad_status.get()));
}
}
absl::optional<std::vector<std::unique_ptr<ParallelTensor>>>
ParallelDevice::Join(
const std::vector<PartialTensorShape>& expected_output_shapes,
TF_Status* status) const {
absl::optional<std::vector<std::unique_ptr<ParallelTensor>>> result;
std::vector<std::vector<TensorHandlePtr>> per_device_output_tensors;
per_device_output_tensors.reserve(underlying_devices_.size());
int first_op_output_count = 0;
StatusPtr first_bad_status(nullptr);
for (int device_index = 0; device_index < underlying_devices_.size();
++device_index) {
DeviceThread* device_thread = device_threads_[device_index].get();
per_device_output_tensors.push_back(device_thread->Join(status));
if (TF_GetCode(status) != TF_OK &&
(first_bad_status == nullptr
|| TF_GetCode(first_bad_status.get()) == TF_CANCELLED)) {
first_bad_status.reset(TF_NewStatus());
TF_SetStatus(first_bad_status.get(), TF_GetCode(status),
TF_Message(status));
}
if (device_index == 0) {
first_op_output_count = per_device_output_tensors.rbegin()->size();
} else {
if (first_bad_status == nullptr &&
per_device_output_tensors.rbegin()->size() != first_op_output_count) {
first_bad_status.reset(TF_NewStatus());
TF_SetStatus(first_bad_status.get(), TF_INTERNAL,
"Parallel ops produced different numbers of tensors.");
}
}
}
if (first_bad_status != nullptr) {
TF_SetStatus(status, TF_GetCode(first_bad_status.get()),
TF_Message(first_bad_status.get()));
return result;
}
std::vector<std::unique_ptr<ParallelTensor>> per_device_outputs;
per_device_outputs.reserve(first_op_output_count);
for (int i = 0; i < first_op_output_count; ++i) {
std::vector<TensorHandlePtr> components;
components.reserve(underlying_devices_.size());
for (int j = 0; j < underlying_devices_.size(); ++j) {
components.push_back(std::move(per_device_output_tensors[j][i]));
}
if (expected_output_shapes[i].IsFullyDefined()) {
per_devic | #include "tensorflow/c/eager/parallel_device/parallel_device_lib.h"
#include "tensorflow/c/c_api.h"
#include "tensorflow/c/c_api_experimental.h"
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/c/eager/parallel_device/parallel_device_testlib.h"
#include "tensorflow/c/eager/tfe_context_internal.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace parallel_device {
using ::testing::ElementsAre;
using ::testing::HasSubstr;
TEST(PARALLEL_DEVICE_LIB, TestOpWithError) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true,
2),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
std::vector<std::string> devices{
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:1"};
ParallelDevice parallel_device(std::move(devices));
std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> handle_op(
TFE_NewOp(context.get(), "VarHandleOp", status.get()), TFE_DeleteOp);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
TFE_OpSetAttrType(handle_op.get(), "dtype", TF_FLOAT);
TFE_OpSetAttrShape(handle_op.get(), "shape", nullptr, 0,
status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
auto outputs =
parallel_device.Execute(context.get(), std::vector<ParallelTensor*>(),
"VarHandleOp", TFE_OpGetAttrs(handle_op.get()),
1, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
const std::vector<std::unique_ptr<ParallelTensor>>& handles = *outputs;
std::vector<ParallelTensor*> handle_inputs;
handle_inputs.reserve(handles.size());
for (auto& handle : handles) {
handle_inputs.push_back(handle.get());
}
std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> read_op(
TFE_NewOp(context.get(), "ReadVariableOp", status.get()), TFE_DeleteOp);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
TFE_OpSetAttrType(read_op.get(), "dtype", TF_FLOAT);
parallel_device.Execute(context.get(), handle_inputs, "ReadVariableOp",
TFE_OpGetAttrs(read_op.get()),
1, status.get());
ASSERT_FALSE(TF_GetCode(status.get()) == TF_OK);
TF_SetStatus(status.get(), TF_OK, "");
parallel_device.Execute(context.get(), std::vector<ParallelTensor*>(),
"VarHandleOp", TFE_OpGetAttrs(handle_op.get()),
1, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
}
TEST(PARALLEL_DEVICE_LIB, TestExplicitOutputShape) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true,
2),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
std::vector<std::string> devices{
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:1"};
ParallelDevice parallel_device(std::move(devices));
std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> handle_op(
TFE_NewOp(context.get(), "VarHandleOp", status.get()), TFE_DeleteOp);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
TFE_OpSetAttrType(handle_op.get(), "dtype", TF_FLOAT);
TFE_OpSetAttrShape(handle_op.get(), "shape", nullptr, 0,
status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
CancellationManager cancellation_manager;
parallel_device.StartExecute(context.get(), std::vector<ParallelTensor*>(),
"VarHandleOp", TFE_OpGetAttrs(handle_op.get()),
1,
cancellation_manager);
auto outputs = parallel_device.Join(
{PartialTensorShape({})}, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
const std::vector<std::unique_ptr<ParallelTensor>>& handles = *outputs;
const std::vector<int64_t>* shape;
Status s = handles[0]->Shape(&shape);
ASSERT_TRUE(s.ok());
EXPECT_EQ(0, shape->size());
}
TEST(PARALLEL_DEVICE_LIB, TestCancelOnError) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true, 2),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
std::vector<std::string> devices{
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:1"};
ParallelDevice parallel_device(devices);
const FunctionDef assert_and_collective = FunctionDefHelper::Define(
"AssertAndCollective",
{"x: float", "condition: bool"},
{"y: float"},
{},
{
{{"assert"},
"Assert",
{"condition", "x"},
{{"T", std::vector<DataType>{DT_FLOAT}}}},
{{"y"},
"CollectiveReduce",
{"x"},
{{"T", DT_FLOAT},
{"group_size", static_cast<int>(devices.size())},
{"group_key", 0},
{"instance_key", 0},
{"merge_op", "Add"},
{"final_op", "Id"},
{"subdiv_offsets", std::vector<int>()}},
{"assert"}},
});
TF_ASSERT_OK(ContextFromInterface(unwrap(context.get()))
->AddFunctionDef(assert_and_collective));
std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> call_op(
TFE_NewOp(context.get(), "AssertAndCollective", status.get()),
TFE_DeleteOp);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::unique_ptr<ParallelTensor> reduced_values =
parallel_device.ScalarsFromSequence<float>({1.0, 2.0}, context.get(),
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::unique_ptr<ParallelTensor> run_collective =
parallel_device.ScalarsFromSequence<bool>({true, true}, context.get(),
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
auto outputs = parallel_device.Execute(
context.get(), {reduced_values.get(), run_collective.get()},
"AssertAndCollective", TFE_OpGetAttrs(call_op.get()),
1, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
ASSERT_EQ(outputs->size(), 1);
ParallelTensor* parallel_result = (*outputs)[0].get();
ExpectScalarEq<float>(parallel_result->tensor(0), 3.);
ExpectScalarEq<float>(parallel_result->tensor(1), 3.);
run_collective = parallel_device.ScalarsFromSequence<bool>(
{true, false}, context.get(), status.get());
parallel_device.Execute(context.get(),
{reduced_values.get(), run_collective.get()},
"AssertAndCollective", TFE_OpGetAttrs(call_op.get()),
1, status.get());
EXPECT_NE(TF_GetCode(status.get()), TF_CANCELLED);
EXPECT_EQ(TF_GetCode(status.get()), TF_INVALID_ARGUMENT);
EXPECT_THAT(TF_Message(status.get()), HasSubstr("assertion failed"));
}
TEST(PARALLEL_DEVICE_LIB, TestDifferentShapes) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true,
2),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
std::vector<std::string> devices{
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:1"};
ParallelDevice parallel_device(std::move(devices));
TensorHandlePtr two_vector = VectorFloatTensorHandle({3., 4.}, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
TensorHandlePtr three_vector =
VectorFloatTensorHandle({5., 6., 7.}, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
std::vector<TensorHandlePtr> vector_handles;
vector_handles.reserve(2);
vector_handles.push_back(std::move(two_vector));
vector_handles.push_back(std::move(three_vector));
std::unique_ptr<ParallelTensor> unknown_length_vector =
ParallelTensor::FromTensorHandles(
parallel_device, std::move(vector_handles), status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
const std::vector<int64_t>* shape;
TF_ASSERT_OK(unknown_length_vector->Shape(&shape));
EXPECT_THAT(*shape, ElementsAre(-1));
TensorHandlePtr scalar = FloatTensorHandle(2., status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
two_vector = VectorFloatTensorHandle({3., 4.}, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
std::vector<TensorHandlePtr> mixed_handles;
mixed_handles.reserve(2);
mixed_handles.push_back(std::move(scalar));
mixed_handles.push_back(std::move(two_vector));
std::unique_ptr<ParallelTensor> unknown_dims_vector =
ParallelTensor::FromTensorHandles(parallel_device,
std::move(mixed_handles), status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK);
TF_ASSERT_OK(unknown_length_vector->Shape(&shape));
EXPECT_THAT(*shape, ElementsAre(-1));
std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> size_op(
TFE_NewOp(context.get(), "Size", status.get()), TFE_DeleteOp);
auto result = parallel_device.Execute(
context.get(), {unknown_dims_vector.get()}, "Size",
TFE_OpGetAttrs(size_op.get()), 1, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK);
TF_ASSERT_OK((*result)[0]->Shape(&shape));
EXPECT_EQ(0, shape->size());
}
TEST(PARALLEL_DEVICE_LIB, TestScalarsFromSequence) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true, 2),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::vector<std::string> devices{
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:1"};
ParallelDevice parallel_device(std::move(devices));
{
std::unique_ptr<ParallelTensor> float_tensors =
parallel_device.ScalarsFromSequence<float>({10.0, 11.0}, context.get(),
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
ExpectScalarEq<float>(float_tensors->tensor(0), 10.0);
ExpectScalarEq<float>(float_tensors->tensor(1), 11.0);
}
{
std::unique_ptr<ParallelTensor> int_tensors =
parallel_device.ScalarsFromSequence<int>({5, 6}, context.get(),
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
ExpectScalarEq<int>(int_tensors->tensor(0), 5);
ExpectScalarEq<int>(int_tensors->tensor(1), 6);
}
}
}
} |
1,274 | cpp | tensorflow/tensorflow | tensor_shape_utils | tensorflow/c/kernels/tensor_shape_utils.cc | tensorflow/c/kernels/tensor_shape_utils_test.cc | #ifndef TENSORFLOW_C_KERNELS_TENSOR_SHAPE_UTILS_H_
#define TENSORFLOW_C_KERNELS_TENSOR_SHAPE_UTILS_H_
#include <string>
#include "tensorflow/c/tf_tensor.h"
namespace tensorflow {
std::string ShapeDebugString(TF_Tensor* tensor);
}
#endif
#include "tensorflow/c/kernels/tensor_shape_utils.h"
#include <string>
#include "tensorflow/c/tf_tensor.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/strcat.h"
namespace tensorflow {
std::string ShapeDebugString(TF_Tensor* tensor) {
CHECK_GE(TF_NumDims(tensor), 0);
tensorflow::string s = "[";
for (int i = 0; i < TF_NumDims(tensor); ++i) {
if (i > 0) tensorflow::strings::StrAppend(&s, ",");
int64_t dim = TF_Dim(tensor, i);
CHECK_GE(dim, 0);
tensorflow::strings::StrAppend(&s, dim);
}
tensorflow::strings::StrAppend(&s, "]");
return s;
}
} | #include "tensorflow/c/kernels/tensor_shape_utils.h"
#include "tensorflow/c/tf_tensor_internal.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
struct TF_TensorWrapper {
TF_Tensor* tf_tensor;
explicit TF_TensorWrapper(TF_Tensor* tensor) { tf_tensor = tensor; }
~TF_TensorWrapper() { TF_DeleteTensor(tf_tensor); }
};
void TestShapeMatch(TensorShape shape) {
Tensor tensor(DT_FLOAT, shape);
Status status;
TF_Tensor* tf_tensor = TF_TensorFromTensor(tensor, &status);
TF_TensorWrapper tensor_wrapper = TF_TensorWrapper(tf_tensor);
ASSERT_TRUE(status.ok()) << status.ToString();
ASSERT_EQ(tensor.shape().DebugString(), ShapeDebugString(tf_tensor));
}
TEST(ShapeDebugString, RegularShape) { TestShapeMatch(TensorShape({5, 4, 7})); }
TEST(ShapeDebugString, ScalarShape) { TestShapeMatch(TensorShape({})); }
}
} |
1,275 | cpp | tensorflow/tensorflow | source_writer | tensorflow/java/src/gen/cc/source_writer.cc | tensorflow/java/src/gen/cc/source_writer_test.cc | #ifndef TENSORFLOW_JAVA_SRC_GEN_CC_SOURCE_WRITER_H_
#define TENSORFLOW_JAVA_SRC_GEN_CC_SOURCE_WRITER_H_
#include <string>
#include <stack>
#include <list>
#include <set>
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/java/src/gen/cc/java_defs.h"
namespace tensorflow {
namespace java {
class SourceWriter {
public:
SourceWriter();
virtual ~SourceWriter();
SourceWriter& Indent(int tab);
SourceWriter& Prefix(const char* line_prefix);
SourceWriter& Write(const StringPiece& str);
SourceWriter& WriteFromFile(const string& fname, Env* env = Env::Default());
SourceWriter& Append(const StringPiece& str);
SourceWriter& AppendType(const Type& type);
SourceWriter& EndLine();
SourceWriter& BeginBlock(const string& expression = "");
SourceWriter& EndBlock();
SourceWriter& BeginMethod(const Method& method, int modifiers,
const Javadoc* javadoc = nullptr);
SourceWriter& EndMethod();
SourceWriter& BeginType(const Type& type, int modifiers,
const std::list<Type>* extra_dependencies = nullptr,
const Javadoc* javadoc = nullptr);
SourceWriter& BeginInnerType(const Type& type, int modifiers,
const Javadoc* javadoc = nullptr);
SourceWriter& EndType();
SourceWriter& WriteField(const Variable& field, int modifiers,
const Javadoc* javadoc = nullptr);
protected:
virtual void DoAppend(const StringPiece& str) = 0;
private:
class TypeVisitor {
public:
virtual ~TypeVisitor() = default;
void Visit(const Type& type);
protected:
virtual void DoVisit(const Type& type) = 0;
};
class GenericNamespace : public TypeVisitor {
public:
GenericNamespace() = default;
explicit GenericNamespace(const GenericNamespace* parent)
: generic_names_(parent->generic_names_) {}
std::list<const Type*> declared_types() {
return declared_types_;
}
protected:
virtual void DoVisit(const Type& type);
private:
std::list<const Type*> declared_types_;
std::set<string> generic_names_;
};
class TypeImporter : public TypeVisitor {
public:
explicit TypeImporter(const string& current_package)
: current_package_(current_package) {}
virtual ~TypeImporter() = default;
const std::set<string> imports() {
return imports_;
}
protected:
virtual void DoVisit(const Type& type);
private:
string current_package_;
std::set<string> imports_;
};
string left_margin_;
string line_prefix_;
bool newline_ = true;
std::stack<GenericNamespace*> generic_namespaces_;
SourceWriter& WriteModifiers(int modifiers);
SourceWriter& WriteJavadoc(const Javadoc& javadoc);
SourceWriter& WriteAnnotations(const std::list<Annotation>& annotations);
SourceWriter& WriteGenerics(const std::list<const Type*>& generics);
GenericNamespace* PushGenericNamespace(int modifiers);
void PopGenericNamespace();
};
class SourceFileWriter : public SourceWriter {
public:
explicit SourceFileWriter(WritableFile* file) : file_(file) {}
virtual ~SourceFileWriter() = default;
protected:
void DoAppend(const StringPiece& str) override {
TF_CHECK_OK(file_->Append(str));
}
private:
WritableFile* file_;
};
class SourceBufferWriter : public SourceWriter {
public:
SourceBufferWriter() : owns_buffer_(true), buffer_(new string()) {}
explicit SourceBufferWriter(string* buffer)
: owns_buffer_(false), buffer_(buffer) {}
virtual ~SourceBufferWriter() {
if (owns_buffer_) delete buffer_;
}
const string& str() { return *buffer_; }
protected:
void DoAppend(const StringPiece& str) override {
buffer_->append(str.begin(), str.end());
}
private:
bool owns_buffer_;
string* buffer_;
};
}
}
#endif
#include <string>
#include <algorithm>
#include <list>
#include "tensorflow/java/src/gen/cc/source_writer.h"
namespace tensorflow {
namespace java {
SourceWriter::SourceWriter() {
generic_namespaces_.push(new GenericNamespace());
}
SourceWriter::~SourceWriter() {
while (!generic_namespaces_.empty()) {
GenericNamespace* generic_namespace = generic_namespaces_.top();
generic_namespaces_.pop();
delete generic_namespace;
}
}
SourceWriter& SourceWriter::Indent(int tab) {
left_margin_.resize(
std::max(static_cast<int>(left_margin_.size() + tab), 0), ' ');
return *this;
}
SourceWriter& SourceWriter::Prefix(const char* line_prefix) {
line_prefix_ = line_prefix;
return *this;
}
SourceWriter& SourceWriter::Write(const StringPiece& str) {
size_t line_pos = 0;
do {
size_t start_pos = line_pos;
line_pos = str.find('\n', start_pos);
if (line_pos != string::npos) {
++line_pos;
Append(str.substr(start_pos, line_pos - start_pos));
newline_ = true;
} else {
Append(str.substr(start_pos, str.size() - start_pos));
}
} while (line_pos != string::npos && line_pos < str.size());
return *this;
}
SourceWriter& SourceWriter::WriteFromFile(const string& fname, Env* env) {
string data_;
TF_CHECK_OK(ReadFileToString(env, fname, &data_));
return Write(data_);
}
SourceWriter& SourceWriter::Append(const StringPiece& str) {
if (!str.empty()) {
if (newline_) {
DoAppend(left_margin_ + line_prefix_);
newline_ = false;
}
DoAppend(str);
}
return *this;
}
SourceWriter& SourceWriter::AppendType(const Type& type) {
if (type.wildcard()) {
Append("?");
} else {
Append(type.name());
if (!type.parameters().empty()) {
Append("<");
bool first = true;
for (const Type& t : type.parameters()) {
if (!first) {
Append(", ");
}
AppendType(t);
first = false;
}
Append(">");
}
}
return *this;
}
SourceWriter& SourceWriter::EndLine() {
Append("\n");
newline_ = true;
return *this;
}
SourceWriter& SourceWriter::BeginBlock(const string& expression) {
if (!expression.empty()) {
Append(expression + " {");
} else {
Append(newline_ ? "{" : " {");
}
return EndLine().Indent(2);
}
SourceWriter& SourceWriter::EndBlock() {
return Indent(-2).Append("}").EndLine();
}
SourceWriter& SourceWriter::BeginMethod(const Method& method, int modifiers,
const Javadoc* javadoc) {
GenericNamespace* generic_namespace = PushGenericNamespace(modifiers);
if (!method.constructor()) {
generic_namespace->Visit(method.return_type());
}
for (const Variable& v : method.arguments()) {
generic_namespace->Visit(v.type());
}
EndLine();
if (javadoc != nullptr) {
WriteJavadoc(*javadoc);
}
if (!method.annotations().empty()) {
WriteAnnotations(method.annotations());
}
WriteModifiers(modifiers);
if (!generic_namespace->declared_types().empty()) {
WriteGenerics(generic_namespace->declared_types());
Append(" ");
}
if (!method.constructor()) {
AppendType(method.return_type()).Append(" ");
}
Append(method.name()).Append("(");
bool first = true;
for (const Variable& v : method.arguments()) {
if (!first) {
Append(", ");
}
AppendType(v.type()).Append(v.variadic() ? "... " : " ").Append(v.name());
first = false;
}
return Append(")").BeginBlock();
}
SourceWriter& SourceWriter::EndMethod() {
EndBlock();
PopGenericNamespace();
return *this;
}
SourceWriter& SourceWriter::BeginType(const Type& type, int modifiers,
const std::list<Type>* extra_dependencies,
const Javadoc* javadoc) {
if (!type.package().empty()) {
Append("package ").Append(type.package()).Append(";").EndLine();
}
TypeImporter type_importer(type.package());
type_importer.Visit(type);
if (extra_dependencies != nullptr) {
for (const Type& t : *extra_dependencies) {
type_importer.Visit(t);
}
}
if (!type_importer.imports().empty()) {
EndLine();
for (const string& s : type_importer.imports()) {
Append("import ").Append(s).Append(";").EndLine();
}
}
return BeginInnerType(type, modifiers, javadoc);
}
SourceWriter& SourceWriter::BeginInnerType(const Type& type, int modifiers,
const Javadoc* javadoc) {
GenericNamespace* generic_namespace = PushGenericNamespace(modifiers);
generic_namespace->Visit(type);
EndLine();
if (javadoc != nullptr) {
WriteJavadoc(*javadoc);
}
if (!type.annotations().empty()) {
WriteAnnotations(type.annotations());
}
WriteModifiers(modifiers);
CHECK_EQ(Type::Kind::CLASS, type.kind()) << ": Not supported yet";
Append("class ").Append(type.name());
if (!generic_namespace->declared_types().empty()) {
WriteGenerics(generic_namespace->declared_types());
}
if (!type.supertypes().empty()) {
bool first_interface = true;
for (const Type& t : type.supertypes()) {
if (t.kind() == Type::CLASS) {
Append(" extends ");
} else if (first_interface) {
Append(" implements ");
first_interface = false;
} else {
Append(", ");
}
AppendType(t);
}
}
return BeginBlock();
}
SourceWriter& SourceWriter::EndType() {
EndBlock();
PopGenericNamespace();
return *this;
}
SourceWriter& SourceWriter::WriteField(const Variable& field, int modifiers,
const Javadoc* javadoc) {
if (javadoc != nullptr && !javadoc->brief().empty()) {
Append("").EndLine();
}
WriteModifiers(modifiers);
AppendType(field.type()).Append(" ").Append(field.name()).Append(";");
EndLine();
return *this;
}
SourceWriter& SourceWriter::WriteModifiers(int modifiers) {
if (modifiers & PUBLIC) {
Append("public ");
} else if (modifiers & PROTECTED) {
Append("protected ");
} else if (modifiers & PRIVATE) {
Append("private ");
}
if (modifiers & STATIC) {
Append("static ");
}
if (modifiers & FINAL) {
Append("final ");
}
return *this;
}
SourceWriter& SourceWriter::WriteJavadoc(const Javadoc& javadoc) {
Append("").EndLine();
}
SourceWriter& SourceWriter::WriteAnnotations(
const std::list<Annotation>& annotations) {
for (const Annotation& a : annotations) {
Append("@" + a.name());
if (!a.attributes().empty()) {
Append("(").Append(a.attributes()).Append(")");
}
EndLine();
}
return *this;
}
SourceWriter& SourceWriter::WriteGenerics(
const std::list<const Type*>& generics) {
Append("<");
bool first = true;
for (const Type* pt : generics) {
if (!first) {
Append(", ");
}
Append(pt->name());
if (!pt->supertypes().empty()) {
Append(" extends ").AppendType(pt->supertypes().front());
}
first = false;
}
return Append(">");
}
SourceWriter::GenericNamespace* SourceWriter::PushGenericNamespace(
int modifiers) {
GenericNamespace* generic_namespace;
if (modifiers & STATIC) {
generic_namespace = new GenericNamespace();
} else {
generic_namespace = new GenericNamespace(generic_namespaces_.top());
}
generic_namespaces_.push(generic_namespace);
return generic_namespace;
}
void SourceWriter::PopGenericNamespace() {
GenericNamespace* generic_namespace = generic_namespaces_.top();
generic_namespaces_.pop();
delete generic_namespace;
}
void SourceWriter::TypeVisitor::Visit(const Type& type) {
DoVisit(type);
for (const Type& t : type.parameters()) {
Visit(t);
}
for (const Annotation& t : type.annotations()) {
DoVisit(t);
}
for (const Type& t : type.supertypes()) {
Visit(t);
}
}
void SourceWriter::GenericNamespace::DoVisit(const Type& type) {
if (type.kind() == Type::GENERIC && !type.wildcard() &&
generic_names_.find(type.name()) == generic_names_.end()) {
declared_types_.push_back(&type);
generic_names_.insert(type.name());
}
}
void SourceWriter::TypeImporter::DoVisit(const Type& type) {
if (!type.package().empty() && type.package() != current_package_) {
imports_.insert(type.canonical_name());
}
}
}
} | #include <list>
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/java/src/gen/cc/java_defs.h"
#include "tensorflow/java/src/gen/cc/source_writer.h"
namespace tensorflow {
namespace java {
namespace {
TEST(AppendTest, SingleLineText) {
SourceBufferWriter writer;
writer.Append("You say goodbye and I say hello!");
const char* expected = "You say goodbye and I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(AppendTest, MultiLineText) {
SourceBufferWriter writer;
writer.Append("You say goodbye\nand I say hello!");
const char* expected = "You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(AppendTest, MultiLineTextWithIndent) {
SourceBufferWriter writer;
writer.Indent(2).Append("You say goodbye\nand I say hello!");
const char* expected = " You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(AppendTest, MultiLineTextWithPrefix) {
SourceBufferWriter writer;
writer.Prefix("--").Append("You say goodbye\nand I say hello!");
const char* expected = "--You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(AppendTest, MultiLineTextWithIndentAndPrefix) {
SourceBufferWriter writer;
writer.Indent(2).Prefix("--").Append("You say goodbye\nand I say hello!");
const char* expected = " --You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteTest, SingleLineText) {
SourceBufferWriter writer;
writer.Write("You say goodbye and I say hello!");
const char* expected = "You say goodbye and I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteTest, MultiLineText) {
SourceBufferWriter writer;
writer.Write("You say goodbye\nand I say hello!");
const char* expected = "You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteTest, MultiLineTextWithIndent) {
SourceBufferWriter writer;
writer.Indent(2).Write("You say goodbye\nand I say hello!");
const char* expected = " You say goodbye\n and I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteTest, MultiLineTextWithPrefix) {
SourceBufferWriter writer;
writer.Prefix("--").Write("You say goodbye\nand I say hello!");
const char* expected = "--You say goodbye\n--and I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteTest, MultiLineTextWithIndentAndPrefix) {
SourceBufferWriter writer;
writer.Indent(2).Prefix("--").Write("You say goodbye\nand I say hello!");
const char* expected = " --You say goodbye\n --and I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, Basic) {
SourceBufferWriter writer;
writer.Append("You say goodbye").EndLine().Append("and I say hello!");
const char* expected = "You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, Indent) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Indent(2)
.Append("and I say hello!");
const char* expected = "You say goodbye\n and I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, IndentAndOutdent) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Indent(2)
.Append("and I say hello!")
.EndLine()
.Indent(-2)
.Append("Hello, hello!");
const char* expected = "You say goodbye\n and I say hello!\nHello, hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, Prefix) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Prefix("--")
.Append("and I say hello!");
const char* expected = "You say goodbye\n--and I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, PrefixAndRemovePrefix) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Prefix("--")
.Append("and I say hello!")
.EndLine()
.Prefix("")
.Append("Hello, hello!");
const char* expected = "You say goodbye\n--and I say hello!\nHello, hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, IndentAndPrefixAndOutdentAndRemovePrefix) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Indent(2)
.Prefix("--")
.Append("and I say hello!")
.EndLine()
.Indent(-2)
.Prefix("")
.Append("Hello, hello!");
const char* expected = "You say goodbye\n --and I say hello!\nHello, hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, NegativeIndent) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Indent(-10)
.Append("and I say hello!");
const char* expected = "You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, CumulativeIndent) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Indent(2)
.Append("and I say hello!")
.EndLine()
.Indent(2)
.Append("Hello, hello!");
const char* expected =
"You say goodbye\n and I say hello!\n Hello, hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, EmptyPrefix) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Prefix("")
.Append("and I say hello!");
const char* expected = "You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(StreamTest, BlocksAndLines) {
SourceBufferWriter writer;
writer.Append("int i = 0;").EndLine()
.Append("int j = 10;").EndLine()
.Append("if (true)")
.BeginBlock()
.Append("int aLongWayToTen = 0;").EndLine()
.Append("while (++i <= j)")
.BeginBlock()
.Append("++aLongWayToTen;").EndLine()
.EndBlock()
.EndBlock();
const char* expected =
"int i = 0;\n"
"int j = 10;\n"
"if (true) {\n"
" int aLongWayToTen = 0;\n"
" while (++i <= j) {\n"
" ++aLongWayToTen;\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(StreamTest, Types) {
SourceBufferWriter writer;
Type generic = Type::Generic("T").add_supertype(Type::Class("Number"));
writer.AppendType(Type::Int())
.Append(", ")
.AppendType(Type::Class("String"))
.Append(", ")
.AppendType(generic)
.Append(", ")
.AppendType(Type::ListOf(generic))
.Append(", ")
.AppendType(Type::ListOf(Type::IterableOf(generic)))
.Append(", ")
.AppendType(Type::ListOf(Type::Wildcard()));
const char* expected =
"int, String, T, List<T>, List<Iterable<T>>, List<?>";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(StreamTest, FileSnippet) {
SourceBufferWriter writer;
const string fname = tensorflow::io::JoinPath(
tensorflow::testing::TensorFlowSrcRoot(),
"java/src/gen/resources/test.java.snippet");
writer.WriteFromFile(fname)
.BeginBlock()
.WriteFromFile(fname)
.EndBlock();
const char* expected =
"
"System.out.println(\"Hello!\");\n"
"{\n"
"
" System.out.println(\"Hello!\");\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, SimpleClass) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
writer.BeginType(clazz, PUBLIC).EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test {\n}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, SimpleClassWithDependencies) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
std::list<Type> deps;
deps.push_back(Type::Class("TypeA", "org.test.sub"));
deps.push_back(Type::Class("TypeA", "org.test.sub"));
deps.push_back(Type::Class("TypeB", "org.other"));
deps.push_back(Type::Class("SamePackageType", "org.tensorflow"));
deps.push_back(Type::Class("NoPackageType"));
writer.BeginType(clazz, PUBLIC, &deps).EndType();
const char* expected =
"package org.tensorflow;\n\n"
"import org.other.TypeB;\n"
"import org.test.sub.TypeA;\n\n"
"public class Test {\n}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, AnnotatedAndDocumentedClass) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Javadoc clazz_doc = Javadoc::Create("Javadoc test")
.details("This is a\nmultiline description.");
clazz.add_annotation(Annotation::Create("Bean"));
clazz.add_annotation(Annotation::Create("SuppressWarnings")
.attributes("\"rawtypes\""));
writer.BeginType(clazz, PUBLIC, nullptr, &clazz_doc).EndType();
const char* expected =
"package org.tensorflow;\n\n"
"\n"
"@Bean\n"
"@SuppressWarnings(\"rawtypes\")\n"
"public class Test {\n}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, ParameterizedClass) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
clazz.add_parameter(Type::Generic("T"));
clazz.add_parameter(Type::Generic("U").add_supertype(Type::Class("Number")));
writer.BeginType(clazz, PUBLIC).EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test<T, U extends Number> {\n}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, ParameterizedClassAndSupertypes) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Type type_t = Type::Generic("T");
clazz.add_parameter(type_t);
Type type_u = Type::Generic("U").add_supertype(Type::Class("Number"));
clazz.add_parameter(type_u);
clazz.add_supertype(Type::Interface("Parameterizable").add_parameter(type_u));
clazz.add_supertype(Type::Interface("Runnable"));
clazz.add_supertype(Type::Class("SuperTest").add_parameter(type_t));
writer.BeginType(clazz, PUBLIC).EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test<T, U extends Number>"
" extends SuperTest<T> implements Parameterizable<U>, Runnable {\n}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, ParameterizedClassFields) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Type type_t = Type::Generic("T").add_supertype(Type::Class("Number"));
clazz.add_parameter(type_t);
Variable field1 = Variable::Create("field1", Type::Class("String"));
Variable field2 = Variable::Create("field2", Type::Class("String"));
Variable field3 = Variable::Create("field3", type_t);
Javadoc field3_doc = Javadoc::Create("This variable is documented");
writer.BeginType(clazz, PUBLIC)
.WriteField(field1, STATIC | PUBLIC | FINAL)
.WriteField(field2, PRIVATE)
.WriteField(field3, PRIVATE, &field3_doc)
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test<T extends Number> {\n"
" public static final String field1;\n"
" private String field2;\n"
" \n"
" private T field3;\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, SimpleInnerClass) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Type inner_class = Type::Class("InnerTest");
writer.BeginType(clazz, PUBLIC)
.BeginInnerType(inner_class, PUBLIC)
.EndType()
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test {\n"
" \n"
" public class InnerTest {\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, StaticParameterizedInnerClass) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Type type_t = Type::Generic("T").add_supertype(Type::Class("Number"));
clazz.add_parameter(type_t);
Type inner_class = Type::Class("InnerTest");
inner_class.add_parameter(type_t);
writer.BeginType(clazz, PUBLIC)
.BeginInnerType(inner_class, PUBLIC | STATIC)
.EndType()
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test<T extends Number> {\n"
" \n"
" public static class InnerTest<T extends Number> {\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteMethod, SimpleMethod) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Method method = Method::Create("doNothing", Type::Void());
writer.BeginType(clazz, PUBLIC)
.BeginMethod(method, PUBLIC)
.EndMethod()
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test {\n"
" \n"
" public void doNothing() {\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteMethod, AnnotatedAndDocumentedMethod) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Method method = Method::Create("doNothing", Type::Void());
Javadoc method_doc =
Javadoc::Create("Javadoc test")
.details("This method has a\nmultiline description.");
method.add_annotation(Annotation::Create("Override"));
method.add_annotation(Annotation::Create("SuppressWarnings")
.attributes("\"rawtypes\""));
writer.BeginType(clazz, PUBLIC)
.BeginMethod(method, PUBLIC, &method_doc)
.EndMethod()
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test {\n"
" \n"
" \n"
" @Override\n"
" @SuppressWarnings(\"rawtypes\")\n"
" public void doNothing() {\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteMethod, DocumentedMethodWithArguments) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Variable reverse = Variable::Create("reverse", Type::Boolean());
Method method = Method::Create("boolToInt", Type::Int());
method.add_argument(Variable::Create("b", Type::Boolean()));
method.add_argument(reverse);
Javadoc method_doc =
Javadoc::Create("Converts a boolean to an int")
.details("This method will convert\na boolean to an int")
.add_param_tag(reverse.name(), "if true, value is reversed")
.add_tag("return", "int value for this boolean");
writer.BeginType(clazz, PUBLIC)
.BeginMethod(method, PUBLIC, &method_doc)
.Append("if (b && !reverse)")
.BeginBlock()
.Append("return 1;")
.EndLine()
.EndBlock()
.Append("return 0;")
.EndLine()
.EndMethod()
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test {\n"
" \n"
" \n"
" public int boolToInt(boolean b, boolean reverse) {\n"
" if (b && !reverse) {\n"
" return 1;\n"
" }\n"
" return 0;\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteMethod, ParameterizedMethod) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Type type_t = Type::Generic("T").add_supertype(Type::Class("Number"));
clazz.add_parameter(type_t);
Method method = Method::Create("doNothing", type_t);
writer.BeginType(clazz, PUBLIC)
.BeginMethod(method, PUBLIC)
.Append("return null;")
.EndLine()
.EndMethod()
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test<T extends Number> {\n"
" \n"
" public T doNothing() {\n"
" return null;\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteMethod, StaticParameterizedMethod) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Type type_t = Type::Generic("T").add_supertype(Type::Class("Number"));
clazz.add_parameter(type_t);
Method method = Method::Create("doNothing", type_t);
writer.BeginType(clazz, PUBLIC)
.BeginMethod(method, PUBLIC | STATIC)
.Append("return null;")
.EndLine()
.EndMethod()
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test<T extends Number> {\n"
" \n"
" public static <T extends Number> T doNothing() {\n"
" return null;\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
}
}
} |
1,276 | cpp | tensorflow/tensorflow | gen_proto_text_functions_lib | tensorflow/tools/proto_text/gen_proto_text_functions_lib.cc | tensorflow/tools/proto_text/gen_proto_text_functions_lib_test.cc | #ifndef TENSORFLOW_TOOLS_PROTO_TEXT_GEN_PROTO_TEXT_FUNCTIONS_LIB_H_
#define TENSORFLOW_TOOLS_PROTO_TEXT_GEN_PROTO_TEXT_FUNCTIONS_LIB_H_
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
struct ProtoTextFunctionCode {
string header;
string header_impl;
string cc;
};
ProtoTextFunctionCode GetProtoTextFunctionCode(
const tensorflow::protobuf::FileDescriptor& fd,
const string& tf_header_prefix);
}
#endif
#include "tensorflow/tools/proto_text/gen_proto_text_functions_lib.h"
#include <algorithm>
#include <set>
#include <unordered_set>
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
using ::tensorflow::protobuf::Descriptor;
using ::tensorflow::protobuf::EnumDescriptor;
using ::tensorflow::protobuf::FieldDescriptor;
using ::tensorflow::protobuf::FieldOptions;
using ::tensorflow::protobuf::FileDescriptor;
namespace tensorflow {
namespace {
template <typename... Args>
string StrCat(const Args&... args) {
std::ostringstream s;
std::vector<int> give_me_a_name{((s << args), 0)...};
return s.str();
}
template <typename... Args>
string StrAppend(string* to_append, const Args&... args) {
*to_append += StrCat(args...);
return *to_append;
}
class Generator {
public:
explicit Generator(const string& tf_header_prefix)
: tf_header_prefix_(tf_header_prefix),
header_(&code_.header),
header_impl_(&code_.header_impl),
cc_(&code_.cc) {}
void Generate(const FileDescriptor& fd);
ProtoTextFunctionCode code() const { return code_; }
private:
struct Section {
explicit Section(string* str) : str(str) {}
string* str;
string indent;
};
Generator& SetOutput(Section* section) {
cur_ = section;
return *this;
}
Generator& Nest() {
StrAppend(&cur_->indent, " ");
return *this;
}
Generator& Unnest() {
cur_->indent = cur_->indent.substr(0, cur_->indent.size() - 2);
return *this;
}
template <typename... Args>
Generator& Print(Args... args) {
StrAppend(cur_->str, cur_->indent, args..., "\n");
return *this;
}
void AppendFieldValueAppend(const FieldDescriptor& field,
const bool omit_default,
const string& field_expr);
void AppendFieldAppend(const FieldDescriptor& field);
void AppendDebugStringFunctions(const Descriptor& md);
void AppendEnumFunctions(const EnumDescriptor& enum_d);
void AppendParseMessageFunction(const Descriptor& md);
void AppendMessageFunctions(const Descriptor& md);
void AddNamespaceToCurrentSection(const string& package, bool open);
void AddHeadersToCurrentSection(const std::vector<string>& headers);
const string tf_header_prefix_;
ProtoTextFunctionCode code_;
Section* cur_ = nullptr;
Section header_;
Section header_impl_;
Section cc_;
std::unordered_set<string> map_append_signatures_included_;
Generator(const Generator&) = delete;
void operator=(const Generator&) = delete;
};
string GetPackageReferencePrefix(const FileDescriptor* fd) {
string result = "::";
const string& package = fd->package();
for (size_t i = 0; i < package.size(); ++i) {
if (package[i] == '.') {
result += "::";
} else {
result += package[i];
}
}
result += "::";
return result;
}
string GetClassName(const Descriptor& d) {
if (d.containing_type() == nullptr) return d.name();
return StrCat(GetClassName(*d.containing_type()), "_", d.name());
}
string GetClassName(const EnumDescriptor& ed) {
if (ed.containing_type() == nullptr) return ed.name();
return StrCat(GetClassName(*ed.containing_type()), "_", ed.name());
}
string GetQualifiedName(const Descriptor& d) {
return StrCat(GetPackageReferencePrefix(d.file()), GetClassName(d));
}
string GetQualifiedName(const EnumDescriptor& d) {
return StrCat(GetPackageReferencePrefix(d.file()), GetClassName(d));
}
string GetQualifiedAppendFn(const Descriptor& d) {
return StrCat(GetPackageReferencePrefix(d.file()),
"internal::AppendProtoDebugString");
}
string GetEnumNameFn(const EnumDescriptor& enum_d) {
return StrCat("EnumName_", GetClassName(enum_d));
}
string GetQualifiedEnumNameFn(const EnumDescriptor& enum_d) {
return StrCat(GetPackageReferencePrefix(enum_d.file()),
GetEnumNameFn(enum_d));
}
string GetProtoTextHeaderName(const FileDescriptor& fd, bool impl) {
const int dot_index = fd.name().find_last_of('.');
return fd.name().substr(0, dot_index) +
(impl ? ".pb_text-impl.h" : ".pb_text.h");
}
string GetProtoHeaderName(const FileDescriptor& fd) {
const int dot_index = fd.name().find_last_of('.');
return fd.name().substr(0, dot_index) + ".pb.h";
}
string GetCppClass(const FieldDescriptor& d) {
string cpp_class = d.cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE
? GetQualifiedName(*d.message_type())
: d.cpp_type_name();
if (cpp_class == "int64") {
cpp_class = kProtobufInt64Typename;
}
return cpp_class;
}
string GetHeaderGuard(const FileDescriptor& fd, bool impl) {
string s = fd.name();
std::replace(s.begin(), s.end(), '/', '_');
std::replace(s.begin(), s.end(), '.', '_');
return s + (impl ? "_IMPL_H_" : "_H_");
}
void Generator::AppendFieldValueAppend(const FieldDescriptor& field,
const bool omit_default,
const string& field_expr) {
CHECK(!field.has_presence() || field.containing_oneof() != nullptr ||
field.cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE)
<< field.file()->name();
SetOutput(&cc_);
switch (field.cpp_type()) {
case FieldDescriptor::CPPTYPE_INT32:
case FieldDescriptor::CPPTYPE_INT64:
case FieldDescriptor::CPPTYPE_UINT32:
case FieldDescriptor::CPPTYPE_UINT64:
case FieldDescriptor::CPPTYPE_DOUBLE:
case FieldDescriptor::CPPTYPE_FLOAT:
Print("o->", omit_default ? "AppendNumericIfNotZero" : "AppendNumeric",
"(\"", field.name(), "\", ", field_expr, ");");
break;
case FieldDescriptor::CPPTYPE_BOOL:
Print("o->", omit_default ? "AppendBoolIfTrue" : "AppendBool", "(\"",
field.name(), "\", ", field_expr, ");");
break;
case FieldDescriptor::CPPTYPE_STRING: {
const auto ctype = field.options().ctype();
CHECK(ctype == FieldOptions::CORD || ctype == FieldOptions::STRING)
<< "Unsupported ctype " << ctype;
Print("o->", omit_default ? "AppendStringIfNotEmpty" : "AppendString",
"(\"", field.name(), "\", ProtobufStringToString(", field_expr,
"));");
break;
}
case FieldDescriptor::CPPTYPE_ENUM:
if (omit_default) {
Print("if (", field_expr, " != 0) {").Nest();
}
Print("const char* enum_name = ",
GetQualifiedEnumNameFn(*field.enum_type()), "(", field_expr, ");");
Print("if (enum_name[0]) {").Nest();
Print("o->AppendEnumName(\"", field.name(), "\", enum_name);");
Unnest().Print("} else {").Nest();
Print("o->AppendNumeric(\"", field.name(), "\", ", field_expr, ");");
Unnest().Print("}");
if (omit_default) {
Unnest().Print("}");
}
break;
case FieldDescriptor::CPPTYPE_MESSAGE:
CHECK(!field.message_type()->options().map_entry());
if (omit_default) {
Print("if (msg.has_", field.name(), "()) {").Nest();
}
Print("o->OpenNestedMessage(\"", field.name(), "\");");
Print(GetQualifiedAppendFn(*field.message_type()), "(o, ", field_expr,
");");
Print("o->CloseNestedMessage();");
if (omit_default) {
Unnest().Print("}");
}
break;
}
}
void Generator::AppendFieldAppend(const FieldDescriptor& field) {
const string& name = field.name();
if (field.is_map()) {
Print("{").Nest();
const auto& key_type = *field.message_type()->FindFieldByName("key");
const auto& value_type = *field.message_type()->FindFieldByName("value");
Print("std::vector<", key_type.cpp_type_name(), "> keys;");
Print("for (const auto& e : msg.", name, "()) keys.push_back(e.first);");
Print("std::stable_sort(keys.begin(), keys.end());");
Print("for (const auto& key : keys) {").Nest();
Print("o->OpenNestedMessage(\"", name, "\");");
AppendFieldValueAppend(key_type, false , "key");
AppendFieldValueAppend(value_type, false ,
StrCat("msg.", name, "().at(key)"));
Print("o->CloseNestedMessage();");
Unnest().Print("}");
Unnest().Print("}");
} else if (field.is_repeated()) {
Print("for (int i = 0; i < msg.", name, "_size(); ++i) {");
Nest();
AppendFieldValueAppend(field, false ,
"msg." + name + "(i)");
Unnest().Print("}");
} else {
const auto* oneof = field.containing_oneof();
if (oneof != nullptr) {
string camel_name = field.camelcase_name();
camel_name[0] = toupper(camel_name[0]);
Print("if (msg.", oneof->name(), "_case() == ",
GetQualifiedName(*oneof->containing_type()), "::k", camel_name,
") {");
Nest();
AppendFieldValueAppend(field, false ,
"msg." + name + "()");
Unnest();
Print("}");
} else {
AppendFieldValueAppend(field, true ,
"msg." + name + "()");
}
}
}
void Generator::AppendEnumFunctions(const EnumDescriptor& enum_d) {
const string sig = StrCat("const char* ", GetEnumNameFn(enum_d), "(\n ",
GetQualifiedName(enum_d), " value)");
SetOutput(&header_);
Print().Print("
Print(sig, ";");
SetOutput(&cc_);
Print().Print(sig, " {");
Nest().Print("switch (value) {").Nest();
for (int i = 0; i < enum_d.value_count(); ++i) {
const auto& value = *enum_d.value(i);
Print("case ", value.number(), ": return \"", value.name(), "\";");
}
Print("default: return \"\";");
Unnest().Print("}");
Unnest().Print("}");
}
void Generator::AppendParseMessageFunction(const Descriptor& md) {
const bool map_append = (md.options().map_entry());
string sig;
if (!map_append) {
sig = StrCat("bool ProtoParseFromString(\n const string& s,\n ",
GetQualifiedName(md), "* msg)");
SetOutput(&header_).Print(sig, "\n TF_MUST_USE_RESULT;");
SetOutput(&cc_);
Print().Print(sig, " {").Nest();
Print("msg->Clear();");
Print("Scanner scanner(s);");
Print("if (!internal::ProtoParseFromScanner(",
"&scanner, false, false, msg)) return false;");
Print("scanner.Eos();");
Print("return scanner.GetResult();");
Unnest().Print("}");
}
sig = StrCat("bool ProtoParseFromScanner(",
"\n ::tensorflow::strings::Scanner* scanner, bool nested, "
"bool close_curly,\n ");
const FieldDescriptor* key_type = nullptr;
const FieldDescriptor* value_type = nullptr;
if (map_append) {
key_type = md.FindFieldByName("key");
value_type = md.FindFieldByName("value");
StrAppend(&sig, "::tensorflow::protobuf::Map<", GetCppClass(*key_type),
", ", GetCppClass(*value_type), ">* map)");
} else {
StrAppend(&sig, GetQualifiedName(md), "* msg)");
}
if (!map_append_signatures_included_.insert(sig).second) {
return;
}
if (!map_append) {
SetOutput(&header_impl_).Print(sig, ";");
}
SetOutput(&cc_);
Print().Print("namespace internal {");
if (map_append) {
Print("namespace {");
}
Print().Print(sig, " {").Nest();
if (map_append) {
Print(GetCppClass(*key_type), " map_key;");
Print("bool set_map_key = false;");
Print(GetCppClass(*value_type), " map_value;");
Print("bool set_map_value = false;");
}
Print("std::vector<bool> has_seen(", md.field_count(), ", false);");
Print("while(true) {").Nest();
Print("ProtoSpaceAndComments(scanner);");
Print("if (nested && (scanner->Peek() == (close_curly ? '}' : '>'))) {")
.Nest();
Print("scanner->One(Scanner::ALL);");
Print("ProtoSpaceAndComments(scanner);");
if (map_append) {
Print("if (!set_map_key || !set_map_value) return false;");
Print("(*map)[map_key] = map_value;");
}
Print("return true;");
Unnest().Print("}");
Print("if (!nested && scanner->empty()) { return true; }");
Print("scanner->RestartCapture()");
Print(" .Many(Scanner::LETTER_DIGIT_UNDERSCORE)");
Print(" .StopCapture();");
Print("StringPiece identifier;");
Print("if (!scanner->GetResult(nullptr, &identifier)) return false;");
Print("bool parsed_colon = false;");
Print("(void)parsed_colon;");
Print("ProtoSpaceAndComments(scanner);");
Print("if (scanner->Peek() == ':') {");
Nest().Print("parsed_colon = true;");
Print("scanner->One(Scanner::ALL);");
Print("ProtoSpaceAndComments(scanner);");
Unnest().Print("}");
for (int i = 0; i < md.field_count(); ++i) {
const FieldDescriptor* field = md.field(i);
const string& field_name = field->name();
string mutable_value_expr;
string set_value_prefix;
if (map_append) {
mutable_value_expr = StrCat("&map_", field_name);
set_value_prefix = StrCat("map_", field_name, " = ");
} else if (field->is_repeated()) {
if (field->is_map()) {
mutable_value_expr = StrCat("msg->mutable_", field_name, "()");
set_value_prefix =
"UNREACHABLE";
} else {
mutable_value_expr = StrCat("msg->add_", field_name, "()");
set_value_prefix = StrCat("msg->add_", field_name);
}
} else {
mutable_value_expr = StrCat("msg->mutable_", field_name, "()");
set_value_prefix = StrCat("msg->set_", field_name);
}
Print(i == 0 ? "" : "else ", "if (identifier == \"", field_name, "\") {");
Nest();
if (field->is_repeated()) {
CHECK(!map_append);
Print("const bool is_list = (scanner->Peek() == '[');");
Print("do {");
Nest().Print("if (is_list) {");
Nest().Print("scanner->One(Scanner::ALL);");
Print("ProtoSpaceAndComments(scanner);");
Unnest().Print("}");
} else if (field->containing_oneof() != nullptr) {
CHECK(!map_append);
const string oneof_name = field->containing_oneof()->name();
Print("if (msg->", oneof_name, "_case() != 0) return false;");
}
if (!field->is_repeated() && !map_append) {
Print("if (has_seen[", i, "]) return false;");
Print("has_seen[", i, "] = true;");
}
if (field->cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE) {
Print("const char open_char = scanner->Peek();");
Print("if (open_char != '{' && open_char != '<') return false;");
Print("scanner->One(Scanner::ALL);");
Print("ProtoSpaceAndComments(scanner);");
if (field->is_map()) {
Print("if (!ProtoParseFromScanner(");
} else {
Print("if (!", GetPackageReferencePrefix(field->message_type()->file()),
"internal::ProtoParseFromScanner(");
}
Print(" scanner, true, open_char == '{', ", mutable_value_expr,
")) return false;");
} else if (field->cpp_type() == FieldDescriptor::CPPTYPE_STRING) {
Print("string str_value;");
Print(
"if (!parsed_colon || "
"!::tensorflow::strings::ProtoParseStringLiteralFromScanner(");
Print(" scanner, &str_value)) return false;");
Print("SetProtobufStringSwapAllowed(&str_value, ", mutable_value_expr,
");");
} else if (field->cpp_type() == FieldDescriptor::CPPTYPE_ENUM) {
Print("StringPiece value;");
Print(
"if (!parsed_colon || "
"!scanner->RestartCapture().Many("
"Scanner::LETTER_DIGIT_DASH_UNDERSCORE)."
"GetResult(nullptr, &value)) return false;");
const auto* enum_d = field->enum_type();
string value_prefix;
if (enum_d->containing_type() == nullptr) {
value_prefix = GetPackageReferencePrefix(enum_d->file());
} else {
value_prefix = StrCat(GetQualifiedName(*enum_d), "_");
}
for (int enum_i = 0; enum_i < enum_d->value_count(); ++enum_i) {
const auto* value_d = enum_d->value(enum_i);
const string& value_name = value_d->name();
string condition = StrCat("value == \"", value_name, "\"");
Print(enum_i == 0 ? "" : "} else ", "if (", condition, ") {");
Nest();
Print(set_value_prefix, "(", value_prefix, value_name, ");");
Unnest();
}
Print("} else {");
Nest();
Print("int32 int_value;");
Print("if (strings::SafeStringToNumeric(value, &int_value)) {");
Nest();
Print(set_value_prefix, "(static_cast<", GetQualifiedName(*enum_d),
">(int_value));");
Unnest();
Print("} else {").Nest().Print("return false;").Unnest().Print("}");
Unnest().Print("}");
} else {
Print(field->cpp_type_name(), " value;");
switch (field->cpp_type()) {
case FieldDescriptor::CPPTYPE_INT32:
case FieldDescriptor::CPPTYPE_INT64:
case FieldDescriptor::CPPTYPE_UINT32:
case FieldDescriptor::CPPTYPE_UINT64:
case FieldDescriptor::CPPTYPE_DOUBLE:
case FieldDescriptor::CPPTYPE_FLOAT:
Print(
"if (!parsed_colon || "
"!::tensorflow::strings::ProtoParseNumericFromScanner(",
"scanner, &value)) return false;");
break;
case FieldDescriptor::CPPTYPE_BOOL:
Print(
"if (!parsed_colon || "
"!::tensorflow::strings::ProtoParseBoolFromScanner(",
"scanner, &value)) return false;");
break;
default:
LOG(FATAL) << "handled earlier";
}
Print(set_value_prefix, "(value);");
}
if (field->is_repeated()) {
Unnest().Print("} while (is_list && scanner->Peek() == ',');");
Print(
"if (is_list && "
"!scanner->OneLiteral(\"]\").GetResult()) return false;");
}
if (map_append) {
Print("set_map_", field_name, " = true;");
}
Unnest().Print("}");
}
Unnest().Print("}");
Unnest().Print("}");
Unnest().Print();
if (map_append) {
Print("}
}
Print("}
}
void Generator::AppendDebugStringFunctions(const Descriptor& md) {
SetOutput(&header_impl_).Print();
SetOutput(&header_).Print().Print("
string(md.full_name()));
for (int short_pass = 0; short_pass < 2; ++short_pass) {
const bool short_debug = (short_pass == 1);
const string sig = StrCat(
"string ", short_debug ? "ProtoShortDebugString" : "ProtoDebugString",
"(\n const ", GetQualifiedName(md), "& msg)");
SetOutput(&header_).Print(sig, ";");
SetOutput(&cc_);
Print().Print(sig, " {").Nest();
Print("string s;");
Print("::tensorflow::strings::ProtoTextOutput o(&s, ",
short_debug ? "true" : "false", ");");
Print("internal::AppendProtoDebugString(&o, msg);");
Print("o.CloseTopMessage();");
Print("return s;");
Unnest().Print("}");
}
const string sig =
StrCat("void AppendProtoDebugString(\n",
" ::tensorflow::strings::ProtoTextOutput* o,\n const ",
GetQualifiedName(md), "& msg)");
SetOutput(&header_impl_).Print(sig, ";");
SetOutput(&cc_);
Print().Print("namespace internal {").Print();
Print(sig, " {").Nest();
std::vector<const FieldDescriptor*> fields;
fields.reserve(md.field_count());
for (int i = 0; i < md.field_count(); ++i) {
fields.push_back(md.field(i));
}
std::sort(fields.begin(), fields.end(),
[](const FieldDescriptor* left, const FieldDescriptor* right) {
return left->number() < right->number();
});
for (const FieldDescriptor* field : fields) {
SetOutput(&cc_);
AppendFieldAppend(*field);
}
Unnest().Print("}").Print().Print("}
}
void Generator::AppendMessageFunctions(const Descriptor& md) {
if (md.options().map_entry()) {
AppendParseMessageFunction(md);
return;
}
for (int i = 0; i < md.enum_type_count(); ++i) {
AppendEnumFunctions(*md.enum_type(i));
}
for (int i = 0; i < md.nested_type_count(); ++i) {
AppendMessageFunctions(*md.nested_type(i));
}
AppendDebugStringFunctions(md);
AppendParseMessageFunction(md);
}
void Generator::AddNamespaceToCurrentSection(const string& package, bool open) {
Print();
std::vector<string> parts = {""};
for (size_t i = 0; i < package.size(); ++i) {
if (package[i] == '.') {
parts.resize(parts.size() + 1);
} else {
parts.back() += package[i];
}
}
if (open) {
for (const auto& p : parts) {
Print("namespace ", p, " {");
}
} else {
for (auto it = parts.rbegin(); it != parts.rend(); ++it) {
Print("}
}
}
}
void Generator::AddHeadersToCurrentSection(const std::vector<string>& headers) {
std::vector<string> sorted = headers;
std::sort(sorted.begin(), sorted.end());
for (const auto& h : sorted) {
Print("#include \"", h, "\"");
}
}
void GetAllFileDescriptorsFromFile(const FileDescriptor* fd,
std::set<const FileDescriptor*>* all_fd,
std::set<const Descriptor*>* all_d);
void GetAllFileDescriptorsFromMessage(const Descriptor* d,
std::set<const FileDescriptor*>* all_fd,
std::set<const Descriptor*>* all_d) {
if (!all_d->insert(d).second) return;
GetAllFileDescriptorsFromFile(d->file(), all_fd, all_d);
for (int i = 0; i < d->field_count(); ++i) {
auto* f = d->field(i);
switch (f->cpp_type()) {
case FieldDescriptor::CPPTYPE_INT32:
case FieldDescriptor::CPPTYPE_INT64:
case FieldDescriptor::CPPTYPE_UINT32:
case FieldDescriptor::CPPTYPE_UINT64:
case FieldDescriptor::CPPTYPE_DOUBLE:
case FieldDescriptor::CPPTYPE_FLOAT:
case FieldDescriptor::CPPTYPE_BOOL:
case FieldDescriptor::CPPTYPE_STRING:
break;
case FieldDescriptor::CPPTYPE_MESSAGE:
GetAllFileDescriptorsFromMessage(f->message_type(), all_fd, all_d);
break;
case FieldDescriptor::CPPTYPE_ENUM:
GetAllFileDescriptorsFromFile(f->enum_type()->file(), all_fd, all_d);
break;
}
}
for (int i = 0; i < d->nested_type_count(); ++i) {
GetAllFileDescriptorsFromMessage(d->nested_type(i), all_fd, all_d);
}
}
void GetAllFileDescriptorsFromFile(const FileDescriptor* fd,
std::set<const FileDescriptor*>* all_fd,
std::set<const Descriptor*>* all_d) {
if (!all_fd->insert(fd).second) return;
for (int i = 0; i < fd->message_type_count(); ++i) {
GetAllFileDescriptorsFromMessage(fd->message_type(i), all_fd, all_d);
}
}
void Generator::Generate(const FileDescriptor& fd) {
const string package = fd.package();
std::set<const FileDescriptor*> all_fd;
std::set<const Descriptor*> all_d;
GetAllFileDescriptorsFromFile(&fd, &all_fd, &all_d);
std::vector<string> headers;
SetOutput(&header_);
Print("
Print("#ifndef ", GetHeaderGuard(fd, false ));
Print("#define ", GetHeaderGuard(fd, false ));
Print();
headers = {
GetProtoHeaderName(fd),
StrCat(tf_header_prefix_, "tensorflow/core/platform/macros.h"),
StrCat(tf_header_prefix_, "tensorflow/core/platform/protobuf.h"),
StrCat(tf_header_prefix_, "tensorflow/core/platform/types.h"),
};
for (const auto& h : headers) {
Print("#include \"", h, "\"");
}
AddNamespaceToCurrentSection(package, true );
SetOutput(&header_impl_);
Print("
Print("#ifndef ", GetHeaderGuard(fd, true ));
Print("#define ", GetHeaderGuard(fd, true ));
Print();
headers = {
GetProtoTextHeaderName(fd, false ),
StrCat(tf_header_prefix_,
"tensorflow/core/lib/strings/proto_text_util.h"),
StrCat(tf_header_prefix_, "tensorflow/core/lib/strings/scanner.h"),
};
for (const FileDescriptor* d : all_fd) {
if (d != &fd) {
headers.push_back(GetProtoTextHeaderName(*d, true ));
}
headers.push_back(GetProtoHeaderName(*d));
}
AddHeadersToCurrentSection(headers);
AddNamespaceToCurrentSection(package, true );
SetOutput(&header_impl_).Print().Print("namespace internal {");
SetOutput(&cc_);
Print("
Print();
Print("#include <algorithm>");
Print();
headers = {GetProtoTextHeaderName(fd, true )};
AddHeadersToCurrentSection(headers);
Print();
Print("using ::tensorflow::strings::ProtoSpaceAndComments;");
Print("using ::tensorflow::strings::Scanner;");
Print("using ::tensorflow::strings::StrCat;");
AddNamespaceToCurrentSection(package, true );
for (int i = 0; i < fd.enum_type_count(); ++i) {
AppendEnumFunctions(*fd.enum_type(i));
}
for (int i = 0; i < fd.message_type_count(); ++i) {
AppendMessageFunctions(*fd.message_type(i));
}
SetOutput(&header_);
AddNamespaceToCurrentSection(package, false );
Print().Print("#endif | #include "tensorflow/tools/proto_text/gen_proto_text_functions_lib.h"
#include <string>
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/tools/proto_text/test.pb_text.h"
#include "tensorflow/tools/proto_text/test.pb.h"
namespace tensorflow {
namespace test {
namespace {
std::string PrintShortTextFormat(const tensorflow::protobuf::Message& message) {
std::string message_short_text;
protobuf::TextFormat::Printer printer;
printer.SetSingleLineMode(true);
printer.SetExpandAny(true);
printer.PrintToString(message, &message_short_text);
if (!message_short_text.empty() &&
message_short_text[message_short_text.size() - 1] == ' ') {
message_short_text.resize(message_short_text.size() - 1);
}
return message_short_text;
}
std::string PrintTextFormat(const tensorflow::protobuf::Message& message) {
std::string message_text;
protobuf::TextFormat::Printer printer;
printer.SetExpandAny(true);
printer.PrintToString(message, &message_text);
return message_text;
}
template <typename T>
T RoundtripParseProtoOrDie(const T& input, bool short_text) {
const string s =
short_text ? PrintShortTextFormat(input) : PrintTextFormat(input);
T t;
EXPECT_TRUE(ProtoParseFromString(s, &t)) << "Failed to parse " << s;
return t;
}
#define EXPECT_TEXT_TRANSFORMS_MATCH() \
EXPECT_EQ(PrintTextFormat(proto), ProtoDebugString(proto)); \
EXPECT_EQ(PrintShortTextFormat(proto), ProtoShortDebugString(proto)); \
EXPECT_EQ(proto.DebugString(), \
RoundtripParseProtoOrDie(proto, true).DebugString()); \
EXPECT_EQ(proto.DebugString(), \
RoundtripParseProtoOrDie(proto, false).DebugString());
#define EXPECT_PARSE_FAILURE(str) \
EXPECT_FALSE(ProtoParseFromString(str, &proto)); \
EXPECT_FALSE(protobuf::TextFormat::ParseFromString(str, &proto))
#define EXPECT_PARSE_SUCCESS(expected, str) \
do { \
EXPECT_TRUE(ProtoParseFromString(str, &proto)); \
string proto_text_str = ProtoShortDebugString(proto); \
EXPECT_TRUE(protobuf::TextFormat::ParseFromString(str, &proto)); \
string protobuf_str = ProtoShortDebugString(proto); \
EXPECT_EQ(proto_text_str, protobuf_str); \
EXPECT_EQ(expected, proto_text_str); \
} while (false)
TEST(CreateProtoDebugStringLibTest, ValidSimpleTypes) {
TestAllTypes proto;
proto.Clear();
proto.set_optional_int32(-1);
proto.set_optional_int64(-2);
proto.set_optional_uint32(3);
proto.set_optional_uint64(4);
proto.set_optional_sint32(-5);
proto.set_optional_sint64(-6);
proto.set_optional_fixed32(-7);
proto.set_optional_fixed64(-8);
proto.set_optional_sfixed32(-9);
proto.set_optional_sfixed64(-10);
proto.set_optional_float(-12.34);
proto.set_optional_double(-5.678);
proto.set_optional_bool(true);
EXPECT_TEXT_TRANSFORMS_MATCH();
proto.Clear();
proto.set_optional_int32(std::numeric_limits<int32>::max());
proto.set_optional_int64(std::numeric_limits<protobuf_int64>::max());
proto.set_optional_uint32(std::numeric_limits<uint32>::max());
proto.set_optional_uint64(std::numeric_limits<uint64>::max());
proto.set_optional_double(std::numeric_limits<double>::max());
EXPECT_TEXT_TRANSFORMS_MATCH();
proto.Clear();
proto.set_optional_double(std::numeric_limits<double>::min());
EXPECT_TEXT_TRANSFORMS_MATCH();
proto.Clear();
proto.set_optional_int32(std::numeric_limits<int32>::lowest());
proto.set_optional_int64(std::numeric_limits<protobuf_int64>::lowest());
proto.set_optional_double(std::numeric_limits<double>::lowest());
EXPECT_TEXT_TRANSFORMS_MATCH();
proto.Clear();
proto.set_optional_double(std::numeric_limits<double>::infinity());
proto.set_optional_float(std::numeric_limits<float>::infinity());
EXPECT_TEXT_TRANSFORMS_MATCH();
proto.set_optional_double(-1 * std::numeric_limits<double>::infinity());
proto.set_optional_float(-1 * std::numeric_limits<float>::infinity());
EXPECT_TEXT_TRANSFORMS_MATCH();
proto.Clear();
for (int i = 0; i < 256; ++i) {
proto.mutable_optional_string()->push_back(static_cast<char>(i));
proto.mutable_optional_bytes()->push_back(static_cast<char>(i));
}
strings::StrAppend(proto.mutable_optional_string(), "¢€𐍈");
proto.set_optional_cord(proto.optional_string());
EXPECT_TEXT_TRANSFORMS_MATCH();
proto.Clear();
proto.add_repeated_int32(-1);
proto.add_repeated_int32(0);
proto.add_repeated_int64(0);
proto.add_repeated_int64(1);
proto.add_repeated_uint32(-10);
proto.add_repeated_uint32(0);
proto.add_repeated_uint32(10);
proto.add_repeated_uint64(-20);
proto.add_repeated_uint64(0);
proto.add_repeated_uint64(20);
proto.add_repeated_sint32(-30);
proto.add_repeated_sint32(0);
proto.add_repeated_sint32(30);
proto.add_repeated_sint64(-40);
proto.add_repeated_sint64(0);
proto.add_repeated_sint64(40);
proto.add_repeated_fixed32(-50);
proto.add_repeated_fixed32(0);
proto.add_repeated_fixed32(50);
proto.add_repeated_fixed64(-60);
proto.add_repeated_fixed64(0);
proto.add_repeated_fixed64(60);
proto.add_repeated_sfixed32(-70);
proto.add_repeated_sfixed32(0);
proto.add_repeated_sfixed32(70);
proto.add_repeated_sfixed64(-80);
proto.add_repeated_sfixed64(0);
proto.add_repeated_sfixed64(80);
proto.add_repeated_float(-1.2345);
proto.add_repeated_float(0);
proto.add_repeated_float(-2.3456);
proto.add_repeated_double(-10.2345);
proto.add_repeated_double(0);
proto.add_repeated_double(-20.3456);
proto.add_repeated_bool(false);
proto.add_repeated_bool(true);
proto.add_repeated_bool(false);
proto.add_repeated_string("abc");
proto.add_repeated_string("");
proto.add_repeated_string("def");
proto.add_repeated_cord("abc");
proto.add_repeated_cord("");
proto.add_repeated_cord("def");
proto.add_packed_repeated_int64(-1000);
proto.add_packed_repeated_int64(0);
proto.add_packed_repeated_int64(1000);
EXPECT_TEXT_TRANSFORMS_MATCH();
EXPECT_PARSE_SUCCESS("repeated_int32: 1 repeated_int32: 2 repeated_int32: 3",
"repeated_int32: [1, 2 , 3]");
EXPECT_PARSE_SUCCESS(("repeated_bool: false repeated_bool: false "
"repeated_bool: true repeated_bool: true "
"repeated_bool: false repeated_bool: true"),
"repeated_bool: [false, 0, 1, true, False, True]");
EXPECT_PARSE_SUCCESS(("repeated_string: \"a,b\" "
"repeated_string: \"cdef\""),
"repeated_string: [ 'a,b', 'cdef' ] ");
EXPECT_PARSE_SUCCESS("optional_string: \"123\\\" \\'xyz\"",
"optional_string: '123\\\" \\'xyz' ");
EXPECT_PARSE_SUCCESS("optional_double: 10000", "optional_double: 1e4");
EXPECT_PARSE_FAILURE("optional_string: '1' optional_string: '2'");
EXPECT_PARSE_FAILURE("optional_double: 123 optional_double: 456");
EXPECT_PARSE_FAILURE("optional_double: 0001");
EXPECT_PARSE_FAILURE("optional_double: 000.1");
EXPECT_PARSE_FAILURE("optional_double: a");
EXPECT_PARSE_FAILURE("optional_double: x123");
EXPECT_PARSE_FAILURE("optional_double: '123'");
EXPECT_PARSE_FAILURE("optional_double: --111");
EXPECT_PARSE_FAILURE("optional_string: 'abc\"");
EXPECT_PARSE_FAILURE("optional_bool: truE");
EXPECT_PARSE_FAILURE("optional_bool: FALSE");
}
TEST(CreateProtoDebugStringLibTest, NestedMessages) {
TestAllTypes proto;
proto.Clear();
EXPECT_TEXT_TRANSFORMS_MATCH();
proto.Clear();
proto.mutable_optional_nested_message();
EXPECT_TEXT_TRANSFORMS_MATCH();
proto.Clear();
proto.mutable_optional_foreign_message();
EXPECT_TEXT_TRANSFORMS_MATCH();
proto.Clear();
proto.mutable_optional_nested_message();
proto.mutable_optional_foreign_message();
EXPECT_TEXT_TRANSFORMS_MATCH();
proto.Clear();
proto.mutable_optional_nested_message()->set_optional_int32(1);
proto.mutable_optional_foreign_message()->set_c(-1234);
EXPECT_TEXT_TRANSFORMS_MATCH();
proto.Clear();
proto.mutable_optional_nested_message()->set_optional_int32(1234);
proto.mutable_optional_nested_message()
->mutable_msg();
EXPECT_TEXT_TRANSFORMS_MATCH();
proto.Clear();
proto.mutable_optional_nested_message()->set_optional_int32(1234);
proto.mutable_optional_nested_message()->mutable_msg()->set_optional_string(
"abc");
EXPECT_TEXT_TRANSFORMS_MATCH();
proto.Clear();
proto.mutable_optional_nested_message()->mutable_msg()->set_optional_string(
"abc");
proto.mutable_optional_nested_message()->set_optional_int64(1234);
EXPECT_TEXT_TRANSFORMS_MATCH();
proto.Clear();
auto* nested = proto.add_repeated_nested_message();
nested = proto.add_repeated_nested_message();
nested->set_optional_int32(123);
nested->mutable_msg();
nested = proto.add_repeated_nested_message();
nested->mutable_msg();
nested->mutable_msg()->set_optional_string("abc");
nested->set_optional_int64(1234);
EXPECT_TEXT_TRANSFORMS_MATCH();
EXPECT_PARSE_SUCCESS("optional_nested_message { optional_int32: 123 }",
"optional_nested_message: < optional_int32: 123 >");
EXPECT_PARSE_FAILURE("optional_nested_message: < optional_int32: 123 }");
EXPECT_PARSE_FAILURE("optional_nested_message: { optional_int32: 123 >");
EXPECT_PARSE_SUCCESS("optional_nested_message { optional_int32: 123 }",
"optional_nested_message < optional_int32: 123 >");
EXPECT_PARSE_SUCCESS("optional_nested_message { optional_int32: 123 }",
"optional_nested_message{ optional_int32: 123 } ");
EXPECT_PARSE_SUCCESS(
("repeated_nested_message { } "
"repeated_nested_message { optional_int32: 123 }"),
"repeated_nested_message: [ { }, { optional_int32: 123 } ]");
EXPECT_PARSE_SUCCESS(
("repeated_nested_message { } "
"repeated_nested_message { optional_int32: 123 }"),
"repeated_nested_message [ { }, { optional_int32: 123 } ]");
EXPECT_PARSE_SUCCESS(
("repeated_nested_message { } "
"repeated_nested_message { optional_int32: 123 } "
"repeated_nested_message { optional_int32: 456 }"),
("repeated_nested_message [ { }, { optional_int32: 123 } ]"
"repeated_nested_message [ { optional_int32: 456 } ]"));
EXPECT_PARSE_FAILURE("optional_nested_message: {optional_int32: 'abc' }");
EXPECT_PARSE_FAILURE(
("optional_nested_message { optional_int32: 123 } "
"optional_nested_message { optional_int64: 456 }"));
}
TEST(CreateProtoDebugStringLibTest, RecursiveMessage) {
NestedTestAllTypes proto;
NestedTestAllTypes* cur = &proto;
for (int depth = 0; depth < 20; ++depth) {
cur->mutable_payload()->set_optional_int32(1000 + depth);
cur = cur->mutable_child();
}
EXPECT_TEXT_TRANSFORMS_MATCH();
}
template <typename T>
T ParseProto(const string& value_text_proto) {
T value;
EXPECT_TRUE(protobuf::TextFormat::ParseFromString(value_text_proto, &value))
<< value_text_proto;
return value;
}
TestAllTypes::NestedMessage ParseNestedMessage(const string& value_text_proto) {
return ParseProto<TestAllTypes::NestedMessage>(value_text_proto);
}
TEST(CreateProtoDebugStringLibTest, Map) {
TestAllTypes proto;
std::vector<TestAllTypes::NestedMessage> msg_values;
msg_values.push_back(ParseNestedMessage("optional_int32: 345"));
msg_values.push_back(ParseNestedMessage("optional_int32: 123"));
msg_values.push_back(ParseNestedMessage("optional_int32: 234"));
msg_values.push_back(ParseNestedMessage("optional_int32: 0"));
proto.Clear();
{
auto& map = *proto.mutable_map_string_to_message();
map["def"] = msg_values[0];
map["abc"] = msg_values[1];
map["cde"] = msg_values[2];
map[""] = msg_values[3];
}
EXPECT_TEXT_TRANSFORMS_MATCH();
proto.Clear();
{
auto& map = *proto.mutable_map_int32_to_message();
map[20] = msg_values[0];
map[10] = msg_values[1];
map[15] = msg_values[2];
map[0] = msg_values[3];
}
EXPECT_TEXT_TRANSFORMS_MATCH();
proto.Clear();
{
auto& map = *proto.mutable_map_int64_to_message();
map[20] = msg_values[0];
map[10] = msg_values[1];
map[15] = msg_values[2];
map[0] = msg_values[3];
}
EXPECT_TEXT_TRANSFORMS_MATCH();
proto.Clear();
{
auto& map = *proto.mutable_map_int64_to_message();
map[true] = msg_values[0];
map[false] = msg_values[1];
}
EXPECT_TEXT_TRANSFORMS_MATCH();
proto.Clear();
{
auto& map = *proto.mutable_map_string_to_int64();
map["def"] = 0;
map["abc"] = std::numeric_limits<protobuf_int64>::max();
map[""] = 20;
}
EXPECT_TEXT_TRANSFORMS_MATCH();
proto.Clear();
{
auto& map = *proto.mutable_map_int64_to_string();
map[0] = "def";
map[std::numeric_limits<protobuf_int64>::max()] = "";
map[20] = "abc";
}
EXPECT_TEXT_TRANSFORMS_MATCH();
EXPECT_PARSE_SUCCESS(("map_string_to_int64 { key: \"abc\" value: 5 } "
"map_string_to_int64 { key: \"def\" value: 2 } "
"map_string_to_int64 { key: \"ghi\" value: 4 }"),
("map_string_to_int64: { key: 'abc' value: 1 } "
"map_string_to_int64: { key: 'def' value: 2 } "
"map_string_to_int64: { key: 'ghi' value: 3 } "
"map_string_to_int64: { key: 'ghi' value: 4 } "
"map_string_to_int64: { key: 'abc' value: 5 } "));
}
TEST(CreateProtoDebugStringLibTest, Enums) {
TestAllTypes proto;
proto.Clear();
proto.set_optional_nested_enum(TestAllTypes::ZERO);
EXPECT_TEXT_TRANSFORMS_MATCH();
proto.Clear();
proto.set_optional_nested_enum(TestAllTypes::FOO);
EXPECT_TEXT_TRANSFORMS_MATCH();
proto.Clear();
proto.add_repeated_nested_enum(TestAllTypes::FOO);
proto.add_repeated_nested_enum(TestAllTypes::ZERO);
proto.add_repeated_nested_enum(TestAllTypes::BAR);
proto.add_repeated_nested_enum(TestAllTypes::NEG);
proto.add_repeated_nested_enum(TestAllTypes::ZERO);
proto.set_optional_foreign_enum(ForeignEnum::FOREIGN_BAR);
EXPECT_TEXT_TRANSFORMS_MATCH();
EXPECT_PARSE_SUCCESS(
"optional_nested_enum: BAR "
"repeated_nested_enum: BAR "
"repeated_nested_enum: ZERO "
"repeated_nested_enum: FOO",
("repeated_nested_enum: 2 "
"repeated_nested_enum: 0 "
"optional_nested_enum: 2 "
"repeated_nested_enum: 1"));
EXPECT_PARSE_SUCCESS("", "optional_nested_enum: -0");
EXPECT_PARSE_FAILURE("optional_nested_enum: 2147483648");
EXPECT_PARSE_FAILURE("optional_nested_enum: BARNONE");
EXPECT_PARSE_FAILURE("optional_nested_enum: 'BAR'");
EXPECT_PARSE_FAILURE("optional_nested_enum: \"BAR\" ");
EXPECT_EQ(string("BAR"),
string(EnumName_TestAllTypes_NestedEnum(TestAllTypes::BAR)));
EXPECT_EQ(string(""), string(EnumName_TestAllTypes_NestedEnum(
static_cast<TestAllTypes_NestedEnum>(123))));
}
TEST(CreateProtoDebugStringLibTest, Oneof) {
TestAllTypes proto;
proto.Clear();
proto.set_oneof_string("abc");
EXPECT_TEXT_TRANSFORMS_MATCH();
proto.Clear();
proto.set_oneof_string("");
EXPECT_TEXT_TRANSFORMS_MATCH();
proto.Clear();
proto.set_oneof_string("abc");
proto.set_oneof_uint32(123);
EXPECT_TEXT_TRANSFORMS_MATCH();
proto.Clear();
proto.set_oneof_uint32(0);
EXPECT_TEXT_TRANSFORMS_MATCH();
proto.Clear();
proto.set_oneof_enum(TestAllTypes::ZERO);
EXPECT_TEXT_TRANSFORMS_MATCH();
EXPECT_PARSE_FAILURE("oneof_string: \"abc\" oneof_uint32: 13 ");
EXPECT_PARSE_FAILURE("oneof_string: \"abc\" oneof_string: \"def\" ");
}
TEST(CreateProtoDebugStringLibTest, Comments) {
TestAllTypes proto;
EXPECT_PARSE_SUCCESS("optional_int64: 123 optional_string: \"#text\"",
("#leading comment \n"
"optional_int64# comment\n"
":# comment\n"
"123# comment\n"
"optional_string # comment\n"
": # comment\n"
"\"#text\"#comment####\n"));
EXPECT_PARSE_FAILURE("optional_int64:
EXPECT_PARSE_FAILURE("optional_int64:\n123");
}
}
}
} |
1,277 | cpp | tensorflow/tensorflow | transform_utils | tensorflow/tools/graph_transforms/transform_utils.cc | tensorflow/tools/graph_transforms/transform_utils_test.cc | #ifndef TENSORFLOW_TOOLS_GRAPH_TRANSFORMS_TRANSFORM_UTILS_H_
#define TENSORFLOW_TOOLS_GRAPH_TRANSFORMS_TRANSFORM_UTILS_H_
#include <set>
#include <unordered_set>
#include <vector>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace graph_transforms {
void MapNamesToNodes(const GraphDef& graph_def,
std::map<string, const NodeDef*>* result);
void MapNodesToOutputs(const GraphDef& graph_def,
std::map<string, std::vector<const NodeDef*>>* result);
void NodeNamePartsFromInput(const string& input_name, string* prefix,
string* node_name, string* suffix);
string CanonicalInputName(const string& input_name);
string NodeNameFromInput(const string& input_name);
uint64 HashNodeDef(const NodeDef& node);
void AddNodeInput(const string& input_name, NodeDef* node);
void CopyNodeAttr(const NodeDef& source, const string& source_key,
const string& dest_key, NodeDef* dest);
template <class T>
inline void SetNodeAttr(const string& key, const T& value, NodeDef* node) {
AttrValue attr_value;
SetAttrValue(value, &attr_value);
auto* attr_map = node->mutable_attr();
(*attr_map)[key] = attr_value;
}
template <class T>
inline void SetNodeTensorAttr(const string& key, const Tensor& tensor,
NodeDef* node) {
TensorProto tensor_proto;
tensor.AsProtoTensorContent(&tensor_proto);
SetNodeAttr(key, tensor_proto, node);
}
template <class T>
inline void SetNodeTensorAttr(const string& key, const TensorShape& shape,
const std::vector<T>& values, NodeDef* node) {
const DataType dtype = DataTypeToEnum<T>::v();
CHECK_EQ(shape.num_elements(), values.size());
Tensor tensor(dtype, shape);
T* dest_data = tensor.flat<T>().data();
std::copy_n(values.data(), values.size(), dest_data);
SetNodeTensorAttr<T>(key, tensor, node);
}
Tensor GetNodeTensorAttr(const NodeDef& node, const string& key);
void FilterGraphDef(const GraphDef& input_graph_def,
std::function<bool(const NodeDef&)> selector,
GraphDef* output_graph_def);
void RemoveAttributes(const GraphDef& input_graph_def,
const std::vector<string>& attributes,
GraphDef* output_graph_def);
Status SortByExecutionOrder(const GraphDef& input_graph_def,
GraphDef* output_graph_def);
void FindInvalidInputs(const GraphDef& graph_def,
std::vector<std::pair<string, string>>* invalid_inputs);
Status IsGraphValid(const GraphDef& graph_def);
Status GetInOutTypes(const NodeDef& node_def, DataTypeVector* inputs,
DataTypeVector* outputs);
Status TensorShapeFromString(const string& shape_string, TensorShape* result);
struct OpTypePattern {
string op;
std::vector<OpTypePattern> inputs;
string DebugString() const;
};
struct NodeMatch {
NodeMatch() : node() {}
NodeDef node;
std::vector<NodeMatch> inputs;
string DebugString() const;
};
class GraphMatcher {
public:
GraphMatcher(const GraphDef& graph_def);
Status GetOpTypeMatches(const OpTypePattern& pattern,
std::vector<NodeMatch>* matches);
private:
bool DoesOpTypeMatch(const NodeDef& node, const OpTypePattern& pattern,
const std::set<string>& previously_matched_nodes,
NodeMatch* match);
GraphDef graph_def_;
std::map<string, const NodeDef*> node_map_;
};
struct ReplaceMatchingOpTypesOptions {
bool allow_inconsistencies;
};
Status ReplaceMatchingOpTypes(
const GraphDef& input_graph_def, const OpTypePattern& pattern,
const std::function<Status(const NodeMatch&, const std::set<string>&,
const std::set<string>&, std::vector<NodeDef>*)>&
node_generator,
const ReplaceMatchingOpTypesOptions& options, GraphDef* output_graph_def);
void MatchedNodesAsArray(const NodeMatch& match, std::vector<NodeDef>* result);
Status RenameNodeInputs(const GraphDef& input_graph_def,
const std::map<string, string>& inputs_to_rename,
const std::unordered_set<string>& nodes_to_ignore,
GraphDef* output_graph_def);
void CopyOriginalMatch(const NodeMatch& match, std::vector<NodeDef>* new_nodes);
typedef std::map<string, std::vector<string>> TransformFuncParameters;
struct TransformFuncContext {
std::vector<string> input_names;
std::vector<string> output_names;
TransformFuncParameters params;
int CountParameters(const string& name) const;
Status GetOneStringParameter(const string& name, const string& default_value,
string* result) const;
Status GetOneInt32Parameter(const string& name, int32_t default_value,
int32* result) const;
Status GetOneInt64Parameter(const string& name, int64_t default_value,
int64_t* result) const;
Status GetOneFloatParameter(const string& name, float default_value,
float* result) const;
Status GetOneBoolParameter(const string& name, bool default_value,
bool* result) const;
};
typedef std::function<Status(const GraphDef&,
const TransformFuncContext& context, GraphDef*)>
TransformFunc;
typedef std::map<string, TransformFunc> TransformRegistry;
TransformRegistry* GetTransformRegistry();
class TransformRegistrar {
public:
TransformRegistrar(const string& name, TransformFunc transform_func) {
TransformRegistry* transform_registry = GetTransformRegistry();
(*transform_registry)[name] = transform_func;
}
};
#define REGISTER_GRAPH_TRANSFORM(name, func) \
REGISTER_GRAPH_TRANSFORM_UNIQ_HELPER(__COUNTER__, name, func)
#define REGISTER_GRAPH_TRANSFORM_UNIQ_HELPER(ctr, name, func) \
REGISTER_GRAPH_TRANSFORM_UNIQ(ctr, name, func)
#define REGISTER_GRAPH_TRANSFORM_UNIQ(ctr, name, func) \
static tensorflow::graph_transforms::TransformRegistrar \
registrar__body__##ctr##__object(name, func);
}
}
#endif
#include "tensorflow/tools/graph_transforms/transform_utils.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
namespace graph_transforms {
namespace {
inline bool IsMerge(const NodeDef& node_def) {
return node_def.op() == "Merge" || node_def.op() == "RefMerge" ||
node_def.op() == "_XlaMerge";
}
void RecordMatchedNodes(const NodeMatch& match,
std::set<string>* matched_nodes) {
matched_nodes->insert(match.node.name());
for (const NodeMatch& input_match : match.inputs) {
RecordMatchedNodes(input_match, matched_nodes);
}
}
inline uint64 Hash64String(const string& input) {
return Hash64(input.data(), input.size());
}
}
void MatchedNodesAsArray(const NodeMatch& match, std::vector<NodeDef>* result) {
std::set<string> found_nodes;
std::vector<NodeMatch> current_matches = {match};
while (!current_matches.empty()) {
std::vector<NodeMatch> next_matches;
for (const NodeMatch& current_match : current_matches) {
if (found_nodes.count(current_match.node.name())) {
continue;
}
found_nodes.insert(current_match.node.name());
result->push_back(current_match.node);
for (const NodeMatch& input_match : current_match.inputs) {
next_matches.push_back(input_match);
}
}
current_matches = next_matches;
}
}
void MapNamesToNodes(const GraphDef& graph_def,
std::map<string, const NodeDef*>* result) {
for (const NodeDef& node : graph_def.node()) {
(*result)[node.name()] = &node;
}
}
void MapNodesToOutputs(const GraphDef& graph_def,
std::map<string, std::vector<const NodeDef*>>* result) {
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(graph_def, &node_map);
for (const NodeDef& node : graph_def.node()) {
for (const string& input : node.input()) {
string input_node_name = NodeNameFromInput(input);
(*result)[input_node_name].push_back(&node);
}
}
}
void NodeNamePartsFromInput(const string& input_name, string* prefix,
string* node_name, string* suffix) {
std::vector<string> input_parts = str_util::Split(input_name, ':');
if (input_parts.size() < 2) {
*suffix = "";
} else {
*suffix = ":" + input_parts[1];
}
StringPiece node_name_piece(input_parts[0]);
if (absl::ConsumePrefix(&node_name_piece, "^")) {
*prefix = "^";
} else {
*prefix = "";
}
*node_name = string(node_name_piece);
}
string NodeNameFromInput(const string& input_name) {
string prefix;
string node_name;
string suffix;
NodeNamePartsFromInput(input_name, &prefix, &node_name, &suffix);
return node_name;
}
string CanonicalInputName(const string& input_name) {
string prefix;
string node_name;
string suffix;
NodeNamePartsFromInput(input_name, &prefix, &node_name, &suffix);
if (suffix.empty()) {
suffix = ":0";
}
return prefix + node_name + suffix;
}
uint64 HashNodeDef(const NodeDef& node) {
uint64 hash = Hash64String(node.op());
hash = Hash64Combine(hash, Hash64String(node.name()));
for (const string& input : node.input()) {
hash = Hash64Combine(hash, Hash64String(CanonicalInputName(input)));
}
hash = Hash64Combine(hash, Hash64String(node.device()));
std::vector<string> attr_names;
attr_names.reserve(node.attr().size());
for (const auto& attr : node.attr()) {
attr_names.push_back(attr.first);
}
std::sort(attr_names.begin(), attr_names.end());
string attr_serialized;
for (const string& attr_name : attr_names) {
auto attr = node.attr().at(attr_name);
attr.SerializeToString(&attr_serialized);
hash = Hash64Combine(hash, Hash64String(attr_serialized));
}
return hash;
}
void AddNodeInput(const string& input_name, NodeDef* node) {
*(node->mutable_input()->Add()) = input_name;
}
void CopyNodeAttr(const NodeDef& source, const string& source_key,
const string& dest_key, NodeDef* dest) {
CHECK_NE(0, source.attr().count(source_key))
<< "No key '" << source_key << "' found in " << source.DebugString();
(*(dest->mutable_attr()))[dest_key] = source.attr().at(source_key);
}
Tensor GetNodeTensorAttr(const NodeDef& node, const string& key) {
TensorProto tensor_proto = node.attr().at(key).tensor();
Tensor tensor;
CHECK(tensor.FromProto(tensor_proto));
return tensor;
}
void FilterGraphDef(const GraphDef& input_graph_def,
std::function<bool(const NodeDef&)> selector,
GraphDef* output_graph_def) {
output_graph_def->mutable_node()->Clear();
for (const NodeDef& node : input_graph_def.node()) {
if (selector(node)) {
*output_graph_def->mutable_node()->Add() = node;
}
}
}
void RemoveAttributes(const GraphDef& input_graph_def,
const std::vector<string>& attributes,
GraphDef* output_graph_def) {
output_graph_def->mutable_node()->Clear();
for (const NodeDef& node : input_graph_def.node()) {
NodeDef* new_node = output_graph_def->mutable_node()->Add();
*new_node = node;
for (const string& attribute : attributes) {
new_node->mutable_attr()->erase(attribute);
}
}
}
Status SortByExecutionOrder(const GraphDef& input_graph_def,
GraphDef* output_graph_def) {
const int num_nodes = input_graph_def.node_size();
std::vector<int> ready;
std::vector<int> pending_count;
pending_count.reserve(num_nodes);
std::vector<gtl::InlinedVector<int, 4>> outputs(num_nodes);
std::map<string, int> name_index;
for (int i = 0; i < input_graph_def.node_size(); ++i) {
const NodeDef& node(input_graph_def.node(i));
name_index[node.name()] = i;
}
for (int n = 0; n < num_nodes; ++n) {
const NodeDef& node_def(input_graph_def.node(n));
if (IsMerge(node_def)) {
int32_t num_control_edges = 0;
for (int i = 0; i < node_def.input_size(); ++i) {
if (absl::StartsWith(node_def.input(i), "^")) {
num_control_edges++;
}
}
pending_count.push_back(num_control_edges + 1);
} else {
pending_count.push_back(node_def.input_size());
}
if (node_def.input_size() == 0) {
ready.push_back(n);
continue;
}
for (int i = 0; i < node_def.input_size(); ++i) {
const string& input_name = node_def.input(i);
const string& input_node_name = NodeNameFromInput(input_name);
if (!name_index.count(input_node_name)) {
return errors::InvalidArgument("Node '", node_def.name(),
"': Unknown input node '",
node_def.input(i), "'");
}
outputs[name_index[input_node_name]].push_back(n);
}
}
int processed = 0;
output_graph_def->Clear();
while (!ready.empty()) {
int o = ready.back();
ready.pop_back();
++processed;
const NodeDef& node_def(input_graph_def.node(o));
*output_graph_def->mutable_node()->Add() = node_def;
for (size_t i = 0; i < outputs[o].size(); ++i) {
const int output = outputs[o][i];
pending_count[output]--;
if (pending_count[output] == 0) {
ready.push_back(output);
}
}
}
if (processed < num_nodes) {
LOG(WARNING) << "IN " << __func__ << (num_nodes - processed)
<< " NODES IN A CYCLE";
for (int64_t i = 0; i < num_nodes; i++) {
if (pending_count[i] != 0) {
LOG(WARNING) << "PENDING: " << SummarizeNodeDef(input_graph_def.node(i))
<< "WITH PENDING COUNT = " << pending_count[i];
}
}
return errors::InvalidArgument(num_nodes - processed, " nodes in a cycle");
}
return OkStatus();
}
string OpTypePattern::DebugString() const {
string result = "{" + op + ", {";
for (const OpTypePattern& input : inputs) {
result += input.DebugString() + ",";
}
result += "}}";
return result;
}
string NodeMatch::DebugString() const {
string result = "{";
result += node.DebugString();
result += ", {";
for (const NodeMatch& input : inputs) {
result += input.DebugString() + ",";
}
result += "}}";
return result;
}
GraphMatcher::GraphMatcher(const GraphDef& graph_def) {
SortByExecutionOrder(graph_def, &graph_def_).IgnoreError();
MapNamesToNodes(graph_def_, &node_map_);
}
Status GraphMatcher::GetOpTypeMatches(const OpTypePattern& pattern,
std::vector<NodeMatch>* matches) {
std::set<string> matched_nodes;
for (const NodeDef& node : graph_def_.node()) {
if (matched_nodes.count(node.name())) {
continue;
}
NodeMatch match;
if (DoesOpTypeMatch(node, pattern, matched_nodes, &match)) {
RecordMatchedNodes(match, &matched_nodes);
matches->push_back(match);
}
}
return OkStatus();
}
bool GraphMatcher::DoesOpTypeMatch(
const NodeDef& node, const OpTypePattern& pattern,
const std::set<string>& previously_matched_nodes, NodeMatch* match) {
VLOG(1) << "Looking at node " << node.DebugString();
VLOG(1) << "pattern=" << pattern.DebugString();
VLOG(1) << "match=" << match->DebugString();
if (previously_matched_nodes.count(node.name())) {
VLOG(1) << "node " << node.name() << " has been previously matched";
return false;
}
bool pattern_matched = false;
if (pattern.op == "*") {
pattern_matched = true;
} else {
std::vector<string> pattern_ops = str_util::Split(pattern.op, '|');
for (const string& pattern_op : pattern_ops) {
if (node.op() == pattern_op) {
pattern_matched = true;
}
}
}
if (!pattern_matched) {
VLOG(1) << "node.op() != pattern.op()";
return false;
}
match->node = node;
std::vector<string> non_control_inputs;
for (const string& input : node.input()) {
if (!input.empty() && (input[0] != '^')) {
non_control_inputs.push_back(input);
}
}
if (pattern.inputs.empty()) {
return true;
}
if (non_control_inputs.size() != pattern.inputs.size()) {
VLOG(1) << "non_control_inputs.size() != pattern.inputs.size()";
return false;
}
for (int i = 0; i < pattern.inputs.size(); ++i) {
const string& input_node_name = NodeNameFromInput(non_control_inputs[i]);
const NodeDef& input_node = *(node_map_[input_node_name]);
const OpTypePattern& input_pattern = pattern.inputs[i];
match->inputs.push_back(NodeMatch());
NodeMatch* input_match = &(match->inputs.back());
if (!DoesOpTypeMatch(input_node, input_pattern, previously_matched_nodes,
input_match)) {
return false;
}
}
return true;
}
Status ReplaceMatchingOpTypes(
const GraphDef& input_graph_def, const OpTypePattern& pattern,
const std::function<Status(const NodeMatch&, const std::set<string>&,
const std::set<string>&, std::vector<NodeDef>*)>&
node_generator,
const ReplaceMatchingOpTypesOptions& options, GraphDef* output_graph_def) {
GraphMatcher matcher(input_graph_def);
std::vector<NodeMatch> matches;
TF_RETURN_IF_ERROR(matcher.GetOpTypeMatches(pattern, &matches));
std::set<string> matched_nodes;
std::map<string, const NodeMatch*> matches_by_head_name;
for (const NodeMatch& match : matches) {
matches_by_head_name[match.node.name()] = &match;
RecordMatchedNodes(match, &matched_nodes);
}
std::map<string, std::vector<const NodeDef*>> outputs_map;
MapNodesToOutputs(input_graph_def, &outputs_map);
output_graph_def->Clear();
for (const NodeDef& input_node : input_graph_def.node()) {
if (matches_by_head_name.count(input_node.name())) {
const NodeMatch* match = matches_by_head_name[input_node.name()];
std::vector<NodeDef> matched_nodes_array;
MatchedNodesAsArray(*match, &matched_nodes_array);
std::set<string> matched_nodes_lookup;
for (const NodeDef& matched_node : matched_nodes_array) {
matched_nodes_lookup.insert(matched_node.name());
}
std::set<string> input_nodes;
std::set<string> output_nodes;
for (const NodeDef& matched_node : matched_nodes_array) {
for (const string& input_name : matched_node.input()) {
string input_node_name = NodeNameFromInput(input_name);
if (!matched_nodes_lookup.count(input_node_name)) {
input_nodes.insert(matched_node.name());
}
}
if (outputs_map.count(matched_node.name())) {
for (const NodeDef* dependent_node :
outputs_map[matched_node.name()]) {
if (!matched_nodes_lookup.count(dependent_node->name())) {
output_nodes.insert(matched_node.name());
}
}
}
}
std::vector<NodeDef> new_nodes;
TF_RETURN_IF_ERROR(
node_generator(*match, input_nodes, output_nodes, &new_nodes));
std::set<string> new_node_names;
for (const NodeDef& new_node : new_nodes) {
new_node_names.insert(new_node.name());
}
bool abort_replacement = false;
if (!options.allow_inconsistencies) {
for (const string& expected_output : output_nodes) {
if (!new_node_names.count(expected_output)) {
LOG(WARNING) << "Expected " << expected_output
<< " to be preserved.";
abort_replacement = true;
}
}
}
if (abort_replacement) {
LOG(WARNING) << "Generator function didn't preserve needed nodes, "
<< "copying old replacements back in instead.";
std::vector<NodeDef> old_nodes;
MatchedNodesAsArray(*match, &old_nodes);
for (const NodeDef& old_node : old_nodes) {
NodeDef* added_node = output_graph_def->mutable_node()->Add();
*added_node = old_node;
}
} else {
for (const NodeDef& new_node : new_nodes) {
NodeDef* added_node = output_graph_def->mutable_node()->Add();
*added_node = new_node;
}
}
} else if (!matched_nodes.count(input_node.name())) {
NodeDef* added_node = output_graph_def->mutable_node()->Add();
*added_node = input_node;
} else {
}
}
return OkStatus();
}
Status RenameNodeInputs(const GraphDef& input_graph_def,
const std::map<string, string>& inputs_to_rename,
const std::unordered_set<string>& nodes_to_ignore,
GraphDef* output_graph_def) {
std::map<string, std::vector<std::pair<string, string>>>
canonical_inputs_to_rename;
for (const auto& input_to_rename : inputs_to_rename) {
canonical_inputs_to_rename[NodeNameFromInput(input_to_rename.first)]
.push_back({input_to_rename.first, input_to_rename.second});
}
output_graph_def->Clear();
for (const NodeDef& node : input_graph_def.node()) {
NodeDef* new_node = output_graph_def->mutable_node()->Add();
*new_node = node;
new_node->mutable_input()->Clear();
for (const string& input_name : node.input()) {
std::set<string> already_visited;
string new_input_name = input_name;
while (
canonical_inputs_to_rename.count(NodeNameFromInput(new_input_name))) {
string input_node_name = NodeNameFromInput(new_input_name);
if (already_visited.count(input_node_name)) {
return errors::InvalidArgument(
"RenameNodeInputs argument contains a cycle for ",
input_node_name);
}
already_visited.insert(input_node_name);
if (nodes_to_ignore.count(node.name())) {
break;
}
bool any_match_found = false;
for (const std::pair<string, string>& input_to_rename :
canonical_inputs_to_rename.at(input_node_name)) {
const string& source_name = input_to_rename.first;
const string& dest_name = input_to_rename.second;
bool is_match;
string match_name;
if (str_util::EndsWith(source_name, ":*")) {
is_match = true;
string prefix;
string unused_node_name;
string suffix;
NodeNamePartsFromInput(new_input_name, &prefix, &unused_node_name,
&suffix);
match_name = prefix + dest_name + suffix;
} else {
is_match = (CanonicalInputName(source_name) ==
CanonicalInputName(new_input_name));
match_name = dest_name;
}
if (is_match) {
new_input_name = match_name;
any_match_found = true;
}
}
if ( | #include "tensorflow/tools/graph_transforms/transform_utils.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace graph_transforms {
class TransformUtilsTest : public ::testing::Test {
protected:
void TestMapNamesToNodes() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
const int width = 100;
Tensor a_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&a_data, 1.0f);
Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data));
Tensor b_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&b_data, 1.0f);
Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data));
Output add = Add(root.WithOpName("add"), a_const, b_const);
Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT);
Output mul = Mul(root.WithOpName("output"), add, placeholder);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(graph_def, &node_map);
EXPECT_EQ(1, node_map.count("a"));
EXPECT_EQ(1, node_map.count("b"));
EXPECT_EQ(1, node_map.count("add"));
EXPECT_EQ(1, node_map.count("placeholder"));
EXPECT_EQ(1, node_map.count("output"));
EXPECT_EQ(0, node_map.count("no_such_node"));
}
void TestMapNodesToOutputs() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
const int width = 100;
Tensor a_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&a_data, 1.0f);
Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data));
Tensor b_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&b_data, 1.0f);
Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data));
Output add = Add(root.WithOpName("add"), a_const, b_const);
Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT);
Output mul = Mul(root.WithOpName("output"), add, placeholder);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
std::map<string, std::vector<const NodeDef*>> outputs_map;
MapNodesToOutputs(graph_def, &outputs_map);
EXPECT_EQ(1, outputs_map.count("a"));
EXPECT_EQ(1, outputs_map["a"].size());
EXPECT_EQ("add", outputs_map["a"][0]->name());
EXPECT_EQ(1, outputs_map.count("b"));
EXPECT_EQ(1, outputs_map["b"].size());
EXPECT_EQ("add", outputs_map["b"][0]->name());
EXPECT_EQ(1, outputs_map.count("add"));
EXPECT_EQ(1, outputs_map["add"].size());
EXPECT_EQ("output", outputs_map["add"][0]->name());
EXPECT_EQ(1, outputs_map.count("placeholder"));
EXPECT_EQ(1, outputs_map["placeholder"].size());
EXPECT_EQ("output", outputs_map["placeholder"][0]->name());
EXPECT_EQ(0, outputs_map.count("output"));
EXPECT_EQ(0, outputs_map.count("no_such_node"));
}
void TestNodeNamePartsFromInput() {
string prefix;
string node_name;
string suffix;
NodeNamePartsFromInput("some_node_name", &prefix, &node_name, &suffix);
EXPECT_EQ("", prefix);
EXPECT_EQ("some_node_name", node_name);
EXPECT_EQ("", suffix);
NodeNamePartsFromInput("some_node_name/with/slashes", &prefix, &node_name,
&suffix);
EXPECT_EQ("", prefix);
EXPECT_EQ("some_node_name/with/slashes", node_name);
EXPECT_EQ("", suffix);
NodeNamePartsFromInput("some_node_name:0", &prefix, &node_name, &suffix);
EXPECT_EQ("", prefix);
EXPECT_EQ("some_node_name", node_name);
EXPECT_EQ(":0", suffix);
NodeNamePartsFromInput("^some_node_name", &prefix, &node_name, &suffix);
EXPECT_EQ("^", prefix);
EXPECT_EQ("some_node_name", node_name);
EXPECT_EQ("", suffix);
NodeNamePartsFromInput("^some_node_name:99", &prefix, &node_name, &suffix);
EXPECT_EQ("^", prefix);
EXPECT_EQ("some_node_name", node_name);
EXPECT_EQ(":99", suffix);
}
void TestNodeNameFromInput() {
EXPECT_EQ("node_name", NodeNameFromInput("node_name"));
EXPECT_EQ("node_name", NodeNameFromInput("node_name:0"));
EXPECT_EQ("node_name", NodeNameFromInput("^node_name"));
EXPECT_EQ("node_name", NodeNameFromInput("^node_name:42"));
}
void TestCanonicalInputName() {
EXPECT_EQ("node_name:0", CanonicalInputName("node_name"));
EXPECT_EQ("node_name:0", CanonicalInputName("node_name:0"));
EXPECT_EQ("^node_name:0", CanonicalInputName("^node_name"));
EXPECT_EQ("^node_name:42", CanonicalInputName("^node_name:42"));
}
void TestAddNodeInput() {
NodeDef node;
AddNodeInput("foo", &node);
EXPECT_EQ("foo", node.input(0));
}
void TestCopyNodeAttr() {
NodeDef node;
auto mutable_attr = node.mutable_attr();
(*mutable_attr)["foo"].set_i(3);
NodeDef copied_node;
CopyNodeAttr(node, "foo", "bar", &copied_node);
EXPECT_EQ(3, copied_node.attr().at("bar").i());
}
void TestSetNodeAttr() {
NodeDef node;
int32_t value_i = 32;
SetNodeAttr("foo", value_i, &node);
EXPECT_EQ(32, node.attr().at("foo").i());
string value_s = "some_value";
SetNodeAttr("bar", value_s, &node);
EXPECT_EQ("some_value", node.attr().at("bar").s());
}
void TestSetNodeTensorAttr() {
NodeDef node;
SetNodeTensorAttr<int32>("foo", {3, 1}, {1, 2, 3}, &node);
TensorProto tensor_proto = node.attr().at("foo").tensor();
Tensor tensor;
CHECK(tensor.FromProto(tensor_proto));
EXPECT_EQ(DT_INT32, tensor.dtype());
EXPECT_EQ(3, tensor.shape().dim_size(0));
EXPECT_EQ(1, tensor.shape().dim_size(1));
EXPECT_EQ(1, tensor.flat<int32>()(0));
EXPECT_EQ(2, tensor.flat<int32>()(1));
EXPECT_EQ(3, tensor.flat<int32>()(2));
}
void TestSetNodeTensorAttrWithTensor() {
NodeDef node;
Tensor input_tensor(DT_INT32, {4, 5});
test::FillIota<int32>(&input_tensor, 1);
SetNodeTensorAttr<int32>("foo", input_tensor, &node);
TensorProto tensor_proto = node.attr().at("foo").tensor();
Tensor tensor;
CHECK(tensor.FromProto(tensor_proto));
test::ExpectTensorEqual<int32>(input_tensor, tensor);
}
void TestGetNodeTensorAttr() {
NodeDef node;
Tensor input_tensor(DT_INT32, {4, 5});
test::FillIota<int32>(&input_tensor, 1);
TensorProto tensor_proto;
input_tensor.AsProtoTensorContent(&tensor_proto);
SetNodeAttr("foo", tensor_proto, &node);
Tensor result = GetNodeTensorAttr(node, "foo");
test::ExpectTensorEqual<int32>(input_tensor, result);
}
void TestFilterGraphDef() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
const int width = 100;
Tensor a_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&a_data, 1.0f);
Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data));
Tensor b_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&b_data, 1.0f);
Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data));
Output add = Add(root.WithOpName("add"), a_const, b_const);
Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT);
Output mul = Mul(root.WithOpName("output"), add, placeholder);
Output remove_me = Add(root.WithOpName("remove_me"), mul, add);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
GraphDef result_graph_def;
FilterGraphDef(
graph_def,
[](const NodeDef& node) { return (node.name() != "remove_me"); },
&result_graph_def);
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(result_graph_def, &node_map);
EXPECT_EQ(1, node_map.count("a"));
EXPECT_EQ(1, node_map.count("b"));
EXPECT_EQ(1, node_map.count("add"));
EXPECT_EQ(1, node_map.count("placeholder"));
EXPECT_EQ(1, node_map.count("output"));
EXPECT_EQ(0, node_map.count("remove_me"));
}
void TestRemoveAttributes() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
GraphDef result_graph_def;
RemoveAttributes(graph_def, {"dtype"}, &result_graph_def);
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(result_graph_def, &node_map);
const NodeDef* removed_placeholder = node_map["placeholder"];
EXPECT_EQ(nullptr,
tensorflow::AttrSlice(*removed_placeholder).Find("dtype"));
}
void TestGetOpTypeMatches() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
const int width = 100;
Tensor a_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&a_data, 1.0f);
Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data));
Tensor b_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&b_data, 1.0f);
Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data));
Output add = Add(root.WithOpName("add"), a_const, b_const);
Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT);
Output mul = Mul(root.WithOpName("output"), add, placeholder);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
GraphMatcher matcher(graph_def);
std::vector<NodeMatch> const_matches;
TF_ASSERT_OK(matcher.GetOpTypeMatches({"Const"}, &const_matches));
EXPECT_EQ(2, const_matches.size());
for (const NodeMatch& match : const_matches) {
EXPECT_EQ("Const", match.node.op());
EXPECT_TRUE(("a" == match.node.name()) || ("b" == match.node.name()))
<< "match.node.name()=" << match.node.name();
}
std::vector<NodeMatch> add_matches;
TF_ASSERT_OK(matcher.GetOpTypeMatches({"Add"}, &add_matches));
EXPECT_EQ(1, add_matches.size());
EXPECT_EQ("Add", add_matches[0].node.op());
EXPECT_EQ("add", add_matches[0].node.name());
std::vector<NodeMatch> add_child_matches;
TF_ASSERT_OK(matcher.GetOpTypeMatches({"Add", {{"Const"}, {"Const"}}},
&add_child_matches));
EXPECT_EQ(1, add_child_matches.size());
EXPECT_EQ("Add", add_child_matches[0].node.op());
EXPECT_EQ("add", add_child_matches[0].node.name());
EXPECT_EQ(2, add_child_matches[0].inputs.size());
for (const NodeMatch& match : add_child_matches[0].inputs) {
EXPECT_EQ("Const", match.node.op());
EXPECT_TRUE(("a" == match.node.name()) || ("b" == match.node.name()))
<< "match.node.name()=" << match.node.name();
}
std::vector<NodeMatch> no_such_matches;
TF_ASSERT_OK(matcher.GetOpTypeMatches({"NoSuch"}, &no_such_matches));
EXPECT_EQ(0, no_such_matches.size());
std::vector<NodeMatch> all_matches;
TF_ASSERT_OK(matcher.GetOpTypeMatches(
{"Mul", {{"Add", {{"Const"}, {"Const"}}}, {"Placeholder"}}},
&all_matches));
EXPECT_EQ(1, all_matches.size());
EXPECT_EQ("Mul", all_matches[0].node.op());
EXPECT_EQ("output", all_matches[0].node.name());
EXPECT_EQ(2, all_matches[0].inputs.size());
EXPECT_EQ("Add", all_matches[0].inputs[0].node.op());
EXPECT_EQ("add", all_matches[0].inputs[0].node.name());
EXPECT_EQ(2, all_matches[0].inputs[0].inputs.size());
EXPECT_EQ("Const", all_matches[0].inputs[0].inputs[0].node.op());
EXPECT_EQ("a", all_matches[0].inputs[0].inputs[0].node.name());
EXPECT_EQ(0, all_matches[0].inputs[0].inputs[0].inputs.size());
EXPECT_EQ("Const", all_matches[0].inputs[0].inputs[1].node.op());
EXPECT_EQ("b", all_matches[0].inputs[0].inputs[1].node.name());
EXPECT_EQ(0, all_matches[0].inputs[0].inputs[1].inputs.size());
EXPECT_EQ("Placeholder", all_matches[0].inputs[1].node.op());
EXPECT_EQ("placeholder", all_matches[0].inputs[1].node.name());
EXPECT_EQ(0, all_matches[0].inputs[1].inputs.size());
std::vector<NodeMatch> wildcard_matches;
TF_ASSERT_OK(
matcher.GetOpTypeMatches({"*", {{"*"}, {"*"}}}, &wildcard_matches));
EXPECT_EQ(1, wildcard_matches.size());
EXPECT_EQ("Add", wildcard_matches[0].node.op());
EXPECT_EQ("Const", wildcard_matches[0].inputs[0].node.op());
EXPECT_EQ("a", wildcard_matches[0].inputs[0].node.name());
EXPECT_EQ("Const", wildcard_matches[0].inputs[1].node.op());
EXPECT_EQ("b", wildcard_matches[0].inputs[1].node.name());
std::vector<NodeMatch> or_matches;
TF_ASSERT_OK(matcher.GetOpTypeMatches({"Add|Mul"}, &or_matches));
EXPECT_EQ(2, or_matches.size());
EXPECT_EQ("Add", or_matches[0].node.op());
EXPECT_EQ("add", or_matches[0].node.name());
EXPECT_EQ("Mul", or_matches[1].node.op());
EXPECT_EQ("output", or_matches[1].node.name());
}
void TestGetOpTypeMatchesDAG() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
const int width = 100;
Tensor a_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&a_data, 1.0f);
Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data));
Output add = Add(root.WithOpName("add"), a_const, a_const);
Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT);
Output mul = Mul(root.WithOpName("output"), add, placeholder);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
GraphMatcher matcher(graph_def);
std::vector<NodeMatch> add_matches;
TF_ASSERT_OK(matcher.GetOpTypeMatches({"Add", {{"Const"}, {"Const"}}},
&add_matches));
EXPECT_EQ(1, add_matches.size());
EXPECT_EQ("Add", add_matches[0].node.op());
EXPECT_EQ("add", add_matches[0].node.name());
EXPECT_EQ("Const", add_matches[0].inputs[0].node.op());
EXPECT_EQ("a", add_matches[0].inputs[0].node.name());
EXPECT_EQ("Const", add_matches[0].inputs[1].node.op());
EXPECT_EQ("a", add_matches[0].inputs[1].node.name());
}
void TestReplaceMatchingOpTypes() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
const int width = 10;
Tensor a_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&a_data, 1.0f);
Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data));
Tensor b_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&b_data, 1.0f);
Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data));
Output add = Add(root.WithOpName("add"), a_const, b_const);
Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT);
Output mul = Mul(root.WithOpName("output"), add, placeholder);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
GraphDef replaced_graph_def;
TF_ASSERT_OK(ReplaceMatchingOpTypes(
graph_def, {"*"},
[](const NodeMatch& match, const std::set<string>& input_nodes,
const std::set<string>& output_nodes,
std::vector<NodeDef>* new_nodes) {
NodeDef original_copy;
original_copy = match.node;
const string original_name = match.node.name();
original_copy.set_name(original_name + "_before_identity");
new_nodes->push_back(original_copy);
NodeDef identity_node;
identity_node.set_op("Identity");
identity_node.set_name(original_name);
*(identity_node.mutable_input()->Add()) = original_copy.name();
new_nodes->push_back(identity_node);
return OkStatus();
},
{}, &replaced_graph_def));
EXPECT_EQ(10, replaced_graph_def.node_size());
for (const NodeDef& node : replaced_graph_def.node()) {
if (node.name() == "output") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ("output_before_identity", node.input(0));
} else if (node.name() == "output_before_identity") {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ("add", node.input(0));
EXPECT_EQ("placeholder", node.input(1));
} else if (node.name() == "placeholder") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ("placeholder_before_identity", node.input(0));
} else if (node.name() == "placeholder_before_identity") {
EXPECT_EQ("Placeholder", node.op());
} else if (node.name() == "add") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ("add_before_identity", node.input(0));
} else if (node.name() == "add_before_identity") {
EXPECT_EQ("Add", node.op());
EXPECT_EQ("a", node.input(0));
EXPECT_EQ("b", node.input(1));
} else if (node.name() == "a") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ("a_before_identity", node.input(0));
} else if (node.name() == "a_before_identity") {
EXPECT_EQ("Const", node.op());
} else if (node.name() == "b") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ("b_before_identity", node.input(0));
} else if (node.name() == "b_before_identity") {
EXPECT_EQ("Const", node.op());
} else {
EXPECT_EQ(true, false) << "Unexpected node name found: " << node.name();
}
}
}
void TestMatchedNodesAsArray() {
NodeMatch fourth;
fourth.node.set_name("fourth");
NodeMatch second;
second.node.set_name("second");
second.inputs.push_back(fourth);
NodeMatch third;
third.node.set_name("third");
third.inputs.push_back(fourth);
NodeMatch first;
first.node.set_name("first");
first.inputs.push_back(second);
first.inputs.push_back(third);
std::vector<NodeDef> result;
MatchedNodesAsArray(first, &result);
EXPECT_EQ(4, result.size());
EXPECT_EQ("first", result[0].name());
EXPECT_EQ("second", result[1].name());
EXPECT_EQ("third", result[2].name());
EXPECT_EQ("fourth", result[3].name());
}
void TestRenameNodeInputs() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
const int width = 10;
Tensor a_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&a_data, 1.0f);
Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data));
Tensor b_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&b_data, 1.0f);
Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data));
Output add = Add(root.WithOpName("add"), a_const, a_const);
Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT);
Output mul = Mul(root.WithOpName("output"), add, placeholder);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
GraphDef renamed_graph_def;
TF_ASSERT_OK(RenameNodeInputs(graph_def, {{"a", "b"}},
std::unordered_set<string>(),
&renamed_graph_def));
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(renamed_graph_def, &node_map);
EXPECT_EQ("b", node_map.at("add")->input(0));
EXPECT_EQ("b", node_map.at("add")->input(1));
}
void TestRenameNodeInputsWithRedirects() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
const int width = 10;
Tensor a_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&a_data, 1.0f);
Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data));
Tensor b_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&b_data, 1.0f);
Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data));
Tensor c_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&c_data, 1.0f);
Output c_const = Const(root.WithOpName("c"), Input::Initializer(c_data));
Output add = Add(root.WithOpName("add"), a_const, b_const);
Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT);
Output mul = Mul(root.WithOpName("output"), add, placeholder);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
GraphDef renamed_graph_def;
TF_ASSERT_OK(RenameNodeInputs(
graph_def, {{"a", "f"}, {"f", "e"}, {"e", "d"}, {"d", "c"}},
std::unordered_set<string>(), &renamed_graph_def));
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(renamed_graph_def, &node_map);
EXPECT_EQ("c", node_map.at("add")->input(0));
EXPECT_EQ("b", node_map.at("add")->input(1));
}
void TestRenameNodeInputsWithCycle() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
const int width = 10;
Tensor a_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&a_data, 1.0f);
Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data));
Tensor b_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&b_data, 1.0f);
Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data));
Tensor c_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&c_data, 1.0f);
Output c_const = Const(root.WithOpName("c"), Input::Initializer(c_data));
Output add = Add(root.WithOpName("add"), a_const, b_const);
Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT);
Output mul = Mul(root.WithOpName("output"), add, placeholder);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
GraphDef renamed_graph_def;
Status rename_status =
RenameNodeInputs(graph_def, {{"a", "d"}, {"d", "a"}},
std::unordered_set<string>(), &renamed_graph_def);
EXPECT_FALSE(rename_status.ok());
}
void TestRenameNodeInputsWithWildcard() {
auto root = tensorflow::Scope::DisabledShapeInferenceScope();
using namespace ::tensorflow::ops;
const int width = 10;
Tensor a_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&a_data, 1.0f);
Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data));
QuantizeV2 quantize_a(root.WithOpName("quantize_a"), a_const, a_const,
a_const, DT_QUINT8,
QuantizeV2::Attrs().Mode("MIN_FIRST"));
Tensor b_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&b_data, 1.0f);
Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data));
QuantizeV2 quantize_b(root.WithOpName("quantize_b"), b_const, b_const,
b_const, DT_QUINT8,
QuantizeV2::Attrs().Mode("MIN_FIRST"));
Output add = Add(root.WithOpName("add"), quantize_a.output_min,
quantize_a.output_max);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
GraphDef renamed_graph_def;
TF_ASSERT_OK(RenameNodeInputs(graph_def, {{"quantize_a:*", "quantize_b"}},
std::unordered_set<string>(),
&renamed_graph_def));
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(renamed_graph_def, &node_map);
EXPECT_EQ("quantize_b:1", node_map.at("add")->input(0));
EXPECT_EQ("quantize_b:2", node_map.at("add")->input(1));
}
void TestRenameNodeInputsWithIgnores() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
const int width = 10;
Tensor a_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&a_data, 1.0f);
Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data));
Tensor b_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&b_data, 1.0f);
Output b_const = Const(root.WithOpName("b"), Input::Initializer(b_data));
Output add = Add(root.WithOpName("add"), a_const, a_const);
Output add2 = Add(root.WithOpName("add2"), a_const, a_const);
Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT);
Output mul = Mul(root.WithOpName("mul"), add, placeholder);
Output mul2 = Mul(root.WithOpName("output"), mul, add2);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
GraphDef renamed_graph_def;
TF_ASSERT_OK(RenameNodeInputs(graph_def, {{"a", "b"}}, {"add2"},
&renamed_graph_def));
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(renamed_graph_def, &node_map);
EXPECT_EQ("b", node_map.at("add")->input(0));
EXPECT_EQ("b", node_map.at("add")->input(1));
EXPECT_EQ("a", node_map.at("add2")->input(0));
EXPECT_EQ("a", node_map.at("add2")->input(1));
}
void TestFindInvalidInputs() {
GraphDef graph_def;
NodeDef* mul_node = graph_def.mutable_node()->Add();
mul_node->set_op("Mul");
mul_node->set_name("mul_node");
*(mul_node->mutable_input()->Add()) = "add_node1";
*(mul_node->mutable_input()->Add()) = "add_node2:0";
*(mul_node->mutable_input()->Add()) = "^const_node1:0";
NodeDef* add_node1 = graph_def.mutable_node()->Add();
add_node1->set_op("Add");
add_node1->set_name("add_node1");
*(add_node1->mutable_input()->Add()) = "missing_input1";
*(add_node1->mutable_input()->Add()) = "const_node1:0";
*(add_node1->mutable_input()->Add()) = "missing_input2";
NodeDef* add_node2 = graph_def.mutable_node()->Add();
add_node2->set_op("Add");
add_node2->set_name("add_node2");
*(add_node2->mutable_input()->Add()) = "missing_input3";
*(add_node2->mutable_input()->Add()) = "const_node1:0";
*(add_node2->mutable_input()->Add()) = "^const_node2";
NodeDef* const_node1 = graph_def.mutable_node()->Add();
const_node1->set_op("Const");
const_node1->set_name("const_node1");
NodeDef* const_node2 = graph_def.mutable_node()->Add();
const_node2->set_op("Const");
const_node2->set_name("const_node2");
std::vector<std::pair<string, string>> invalid_inputs;
FindInvalidInputs(graph_def, &invalid_inputs);
EXPECT_EQ(3, invalid_inputs.size());
for (const std::pair<string, string>& invalid_input : invalid_inputs) {
EXPECT_TRUE((invalid_input.first == "add_node1") ||
(invalid_input.first == "add_node2"));
if (invalid_input.first == "add_node1") {
EXPECT_TRUE((invalid_input.second == "missing_input1") ||
(invalid_input.second == "missing_input2"))
<< invalid_input.second;
} else if (invalid_input.first == "add_node2") {
EXPECT_EQ("missing_input3", invalid_input.second);
}
}
}
void TestIsGraphValid() {
GraphDef invalid_graph_def;
NodeDef* mul_node = invalid_graph_def.mutable_node()->Add();
mul_node->set_op("Mul");
mul_node->set_name("mul_node");
*(mul_node->mutable_input()->Add()) = "add_node1";
*(mul_node->mutable_input()->Add()) = "add_node2:0";
*(mul_node->mutable_input()->Add()) = "^const_node1:0";
NodeDef* add_node1 = invalid_graph_def.mutable_node()->Add();
add_node1->set_op("Add");
add_node1->set_name("add_node1");
*(add_node1->mutable_input()->Add()) = "missing_input1";
*(add_node1->mutable_input()->Add()) = "const_node1:0";
*(add_node1->mutable_input()->Add()) = "missing_input2";
NodeDef* add_node2 = invalid_graph_def.mutable_node()->Add();
add_node2->set_op("Add");
add_node2->set_name("add_node2");
*(add_node2->mutable_input()->Add()) = "missing_input3";
*(add_node2->mutable_input()->Add()) = "const_node1:0";
*(add_node2->mutable_input()->Add()) = "^const_node2";
NodeDef* const_node1 = invalid_graph_def.mutable_node()->Add();
const_node1->set_op("Const");
const_node1->set_name("const_node1");
NodeDef* const_node2 = invalid_graph_def.mutable_node()->Add();
const_node2->set_op("Const");
const_node2->set_name("const_node2");
EXPECT_FALSE(IsGraphValid(invalid_graph_def).ok());
GraphDef valid_graph_def;
NodeDef* const_node3 = valid_graph_def.mutable_node()->Add();
const_node3->set_op("Const");
const_node3->set_name("const_node2");
EXPECT_TRUE(IsGraphValid(valid_graph_def).ok());
}
void TestGetInOutTypes() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
const int width = 20;
Tensor float_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&float_data, 1.0f);
Output float_const =
Const(root.WithOpName("float_const"), Input::Initializer(float_data));
Tensor int_data(DT_INT32, TensorShape({width}));
test::FillIota<int32>(&int_data, 1);
Output int_const =
Const(root.WithOpName("int_const"), Input::Initializer(int_data));
Output float_relu = Relu(root.WithOpName("float_relu"), float_const);
Output int_relu = Relu(root.WithOpName("int_relu"), int_const);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(graph_def, &node_map);
const NodeDef* float_const_def = node_map.at("float_const");
DataTypeVector float_const_inputs;
DataTypeVector float_const_outputs;
TF_EXPECT_OK(GetInOutTypes(*float_const_def, &float_const_inputs,
&float_const_outputs));
ASSERT_EQ(0, float_const_inputs.size());
ASSERT_EQ(1, float_const_outputs.size());
EXPECT_EQ(DT_FLOAT, float_const_outputs[0]);
const NodeDef* int_const_def = node_map.at("int_const");
DataTypeVector int_const_inputs;
DataTypeVector int_const_outputs;
TF_EXPECT_OK(
GetInOutTypes(*int_const_def, &int_const_inputs, &int_const_outputs));
ASSERT_EQ(0, int_const_inputs.size());
ASSERT_EQ(1, int_const_outputs.size());
EXPECT_EQ(DT_INT32, int_const_outputs[0]);
const NodeDef* float_relu_def = node_map.at("float_relu");
DataTypeVector float_relu_inputs;
DataTypeVector float_relu_outputs;
TF_EXPECT_OK(GetInOutTypes(*float_relu_def, &float_relu_inputs,
&float_relu_outputs));
ASSERT_EQ(1, float_relu_inputs.size());
EXPECT_EQ(DT_FLOAT, float_relu_inputs[0]);
ASSERT_EQ(1, float_relu_outputs.size());
EXPECT_EQ(DT_FLOAT, float_relu_outputs[0]);
const NodeDef* int_relu_def = node_map.at("int_relu");
DataTypeVector int_relu_inputs;
DataTypeVector int_relu_outputs;
TF_EXPECT_OK(
GetInOutTypes(*int_relu_def, &int_relu_inputs, &int_relu_outputs));
ASSERT_EQ(1, int_relu_inputs.size());
EXPECT_EQ(DT_INT32, int_relu_inputs[0]);
ASSERT_EQ(1, int_relu_outputs.size());
EXPECT_EQ(DT_INT32, int_relu_outputs[0]);
}
void TestCopyOriginalMatch() {
NodeDef a;
a.set_op("Relu");
a.set_name("a");
AddNodeInput("b", &a);
NodeDef b;
b.set_op("Const");
b.set_name("b");
NodeMatch b_match;
b_match.node = b;
NodeMatch a_match;
a_match.node = a;
a_match.inputs.push_back(b_match);
std::vector<NodeDef> new_nodes;
CopyOriginalMatch(a_match, &new_nodes);
EXPECT_EQ(2, new_nodes.size());
EXPECT_EQ("a", new_nodes[0].name());
EXPECT_EQ("Relu", new_nodes[0].op());
EXPECT_EQ("b", new_nodes[1].name());
EXPECT_EQ("Const", new_nodes[1].op());
}
void TestHashNodeDef() {
using namespace ::tensorflow::ops;
const int width = 10;
auto a_root = tensorflow::Scope::NewRootScope();
Ten |
1,278 | cpp | tensorflow/tensorflow | file_utils | tensorflow/core/data/service/snapshot/file_utils.cc | tensorflow/core/data/service/snapshot/file_utils_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_SNAPSHOT_FILE_UTILS_H_
#define TENSORFLOW_CORE_DATA_SERVICE_SNAPSHOT_FILE_UTILS_H_
#include <cstdint>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/platform/env.h"
#include "tsl/platform/protobuf.h"
namespace tensorflow {
namespace data {
absl::Status AtomicallyWriteStringToFile(absl::string_view filename,
absl::string_view str, tsl::Env* env);
absl::Status AtomicallyWriteBinaryProto(absl::string_view filename,
const tsl::protobuf::Message& proto,
tsl::Env* env);
absl::Status AtomicallyWriteTextProto(absl::string_view filename,
const tsl::protobuf::Message& proto,
tsl::Env* env);
absl::Status AtomicallyWriteTFRecords(absl::string_view filename,
const std::vector<Tensor>& tensors,
absl::string_view compression,
tsl::Env* env);
absl::StatusOr<std::vector<std::string>> GetChildren(
absl::string_view directory, tsl::Env* env);
bool IsTemporaryFile(absl::string_view filename);
int64_t SnapshotChunksCardinality(absl::string_view snapshot_path,
tsl::Env* env);
}
}
#endif
#include "tensorflow/core/data/service/snapshot/file_utils.h"
#include <cstdint>
#include <string>
#include <utility>
#include <vector>
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/data/snapshot_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/random.h"
#include "tsl/platform/status_to_from_proto.h"
#include "tsl/protobuf/status.pb.h"
namespace tensorflow {
namespace data {
namespace {
constexpr const char kTempFileSuffix[] = ".tmp";
absl::Status AtomicallyWrite(
absl::string_view filename, tsl::Env* env,
absl::FunctionRef<absl::Status(const std::string&)> nonatomically_write) {
std::string uncommitted_filename = absl::StrCat(filename, "__");
if (!env->CreateUniqueFileName(&uncommitted_filename, kTempFileSuffix)) {
return tsl::errors::Internal("Failed to write file ", filename,
": Unable to create temporary files.");
}
TF_RETURN_IF_ERROR(nonatomically_write(uncommitted_filename));
absl::Status status =
env->RenameFile(uncommitted_filename, std::string(filename));
if (!status.ok()) {
return tsl::errors::Internal("Failed to rename file: ", status.ToString(),
". Source: ", uncommitted_filename,
", destination: ", filename);
}
return status;
}
}
absl::Status AtomicallyWriteStringToFile(absl::string_view filename,
absl::string_view str, tsl::Env* env) {
auto nonatomically_write = [&](const std::string& uncommitted_filename) {
TF_RETURN_IF_ERROR(WriteStringToFile(env, uncommitted_filename, str));
return absl::OkStatus();
};
TF_RETURN_WITH_CONTEXT_IF_ERROR(
AtomicallyWrite(filename, env, nonatomically_write),
"Requested to write string: ", str);
return absl::OkStatus();
}
absl::Status AtomicallyWriteBinaryProto(absl::string_view filename,
const tsl::protobuf::Message& proto,
tsl::Env* env) {
auto nonatomically_write = [&](const std::string& uncommitted_filename) {
TF_RETURN_IF_ERROR(WriteBinaryProto(env, uncommitted_filename, proto));
return absl::OkStatus();
};
TF_RETURN_WITH_CONTEXT_IF_ERROR(
AtomicallyWrite(filename, env, nonatomically_write),
"Requested to write proto in binary format: ", proto.DebugString());
return absl::OkStatus();
}
absl::Status AtomicallyWriteTextProto(absl::string_view filename,
const tsl::protobuf::Message& proto,
tsl::Env* env) {
auto nonatomically_write = [&](const std::string& uncommitted_filename) {
TF_RETURN_IF_ERROR(WriteTextProto(env, uncommitted_filename, proto));
return absl::OkStatus();
};
TF_RETURN_WITH_CONTEXT_IF_ERROR(
AtomicallyWrite(filename, env, nonatomically_write),
"Requested to write proto in text format: ", proto.DebugString());
return absl::OkStatus();
}
absl::Status AtomicallyWriteTFRecords(absl::string_view filename,
const std::vector<Tensor>& tensors,
absl::string_view compression,
tsl::Env* env) {
auto nonatomically_write = [&](const std::string& uncommitted_filename) {
snapshot_util::TFRecordWriter writer(uncommitted_filename,
std::string(compression));
TF_RETURN_IF_ERROR(writer.Initialize(env));
TF_RETURN_IF_ERROR(writer.WriteTensors(tensors));
return writer.Close();
};
TF_RETURN_WITH_CONTEXT_IF_ERROR(
AtomicallyWrite(filename, env, nonatomically_write),
" Requested to atomically write TF record file: ", filename);
return absl::OkStatus();
}
absl::StatusOr<std::vector<std::string>> GetChildren(
absl::string_view directory, tsl::Env* env) {
std::vector<std::string> files, result;
TF_RETURN_IF_ERROR(env->FileExists(std::string(directory)));
absl::Status status = env->GetChildren(std::string(directory), &files);
if (absl::IsNotFound(status)) {
return result;
}
for (std::string& file : files) {
if (!IsTemporaryFile(file)) {
result.push_back(std::move(file));
}
}
return result;
}
bool IsTemporaryFile(absl::string_view filename) {
return absl::EndsWith(filename, kTempFileSuffix);
}
int64_t SnapshotChunksCardinality(absl::string_view snapshot_path,
tsl::Env* env) {
if (!env->FileExists(SnapshotDoneFilePath(snapshot_path)).ok()) {
return kUnknownCardinality;
}
absl::StatusOr<std::vector<std::string>> chunks =
GetChildren(CommittedChunksDirectory(snapshot_path), env);
if (!chunks.ok()) {
return kUnknownCardinality;
}
return chunks->size();
}
}
} | #include "tensorflow/core/data/service/snapshot/file_utils.h"
#include <cstdint>
#include <string>
#include <vector>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/data/snapshot_utils.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/lib/io/compression.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::ElementsAre;
using ::testing::IsEmpty;
using tsl::testing::IsOkAndHolds;
using tsl::testing::StatusIs;
absl::StatusOr<std::string> CreateTestDirectory() {
std::string directory;
if (!tsl::Env::Default()->LocalTempFilename(&directory)) {
return tsl::errors::FailedPrecondition(
"Failed to create local test directory.");
}
TF_RETURN_IF_ERROR(tsl::Env::Default()->RecursivelyCreateDir(directory));
return directory;
}
using AtomicallyWriteStringToFileTest = ::testing::TestWithParam<std::string>;
TEST_P(AtomicallyWriteStringToFileTest, WriteString) {
TF_ASSERT_OK_AND_ASSIGN(std::string directory, CreateTestDirectory());
std::string test_file = tsl::io::JoinPath(directory, "test_file");
std::string file_contents = GetParam();
TF_ASSERT_OK(AtomicallyWriteStringToFile(test_file, file_contents,
tsl::Env::Default()));
std::string data;
TF_EXPECT_OK(tsl::Env::Default()->FileExists(test_file));
TF_ASSERT_OK(tsl::ReadFileToString(tsl::Env::Default(), test_file, &data));
EXPECT_EQ(data, file_contents);
}
INSTANTIATE_TEST_SUITE_P(FileContents, AtomicallyWriteStringToFileTest,
::testing::ValuesIn<std::string>({"OK", ""}));
TEST(FileUtilsTest, AtomicallyWriteBinaryProto) {
TF_ASSERT_OK_AND_ASSIGN(std::string directory, CreateTestDirectory());
std::string test_file = tsl::io::JoinPath(directory, "test_file");
DatasetDef out = testing::RangeDataset(10);
TF_ASSERT_OK(AtomicallyWriteBinaryProto(test_file, out, tsl::Env::Default()));
DatasetDef in;
TF_EXPECT_OK(tsl::Env::Default()->FileExists(test_file));
TF_ASSERT_OK(tsl::ReadBinaryProto(tsl::Env::Default(), test_file, &in));
EXPECT_THAT(in, testing::EqualsProto(out));
}
TEST(FileUtilsTest, AtomicallyWriteTextProto) {
TF_ASSERT_OK_AND_ASSIGN(std::string directory, CreateTestDirectory());
std::string test_file = tsl::io::JoinPath(directory, "test_file");
DatasetDef out = testing::RangeDataset(10);
TF_ASSERT_OK(AtomicallyWriteTextProto(test_file, out, tsl::Env::Default()));
DatasetDef in;
TF_EXPECT_OK(tsl::Env::Default()->FileExists(test_file));
TF_ASSERT_OK(tsl::ReadTextProto(tsl::Env::Default(), test_file, &in));
EXPECT_THAT(in, testing::EqualsProto(out));
}
TEST(FileUtilsTest, AtomicallyWriteTFRecord) {
TF_ASSERT_OK_AND_ASSIGN(std::string directory, CreateTestDirectory());
std::string test_file = tsl::io::JoinPath(directory, "test_file");
Tensor out = CreateTensor<int64_t>(TensorShape({2}), {1, 2});
TF_ASSERT_OK(AtomicallyWriteTFRecords(
test_file, {out}, tsl::io::compression::kSnappy, tsl::Env::Default()));
TF_EXPECT_OK(tsl::Env::Default()->FileExists(test_file));
snapshot_util::TFRecordReaderImpl reader(test_file,
tsl::io::compression::kSnappy);
TF_ASSERT_OK(reader.Initialize(tsl::Env::Default()));
TF_ASSERT_OK_AND_ASSIGN(std::vector<Tensor> in, reader.GetTensors());
EXPECT_EQ(out.DebugString(), in.front().DebugString());
}
TEST(FileUtilsTest, GetChildren) {
TF_ASSERT_OK_AND_ASSIGN(std::string directory, CreateTestDirectory());
std::string test_file = tsl::io::JoinPath(directory, "test_file");
TF_ASSERT_OK(AtomicallyWriteStringToFile(test_file, "", tsl::Env::Default()));
std::string tmp_file = tsl::io::JoinPath(directory, "test_file.tmp");
TF_ASSERT_OK(AtomicallyWriteStringToFile(tmp_file, "", tsl::Env::Default()));
EXPECT_THAT(GetChildren(directory, tsl::Env::Default()),
IsOkAndHolds(ElementsAre("test_file")));
}
TEST(FileUtilsTest, GetChildrenEmptyDirectory) {
TF_ASSERT_OK_AND_ASSIGN(std::string empty_directory, CreateTestDirectory());
EXPECT_THAT(GetChildren(empty_directory, tsl::Env::Default()),
IsOkAndHolds(IsEmpty()));
}
TEST(FileUtilsTest, GetChildrenDirectoryNotFound) {
EXPECT_THAT(GetChildren("Not exist", tsl::Env::Default()),
StatusIs(tsl::error::NOT_FOUND));
}
TEST(FileUtilsTest, IsTemporaryFile) {
EXPECT_TRUE(IsTemporaryFile("file.tmp"));
EXPECT_FALSE(IsTemporaryFile("file"));
EXPECT_FALSE(IsTemporaryFile(""));
}
}
}
} |
1,279 | cpp | tensorflow/tensorflow | transform_graph | tensorflow/tools/graph_transforms/transform_graph.cc | tensorflow/tools/graph_transforms/transform_graph_test.cc | #ifndef TENSORFLOW_TOOLS_GRAPH_TRANSFORMS_TRANSFORM_GRAPH_H_
#define TENSORFLOW_TOOLS_GRAPH_TRANSFORMS_TRANSFORM_GRAPH_H_
#include <vector>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
int ParseFlagsAndTransformGraph(int argc, char* argv[], bool init_main);
typedef std::vector<std::pair<string, TransformFuncParameters>>
TransformParameters;
Status ParseTransformParameters(const string& transforms_string,
TransformParameters* params_list);
Status TransformGraph(const std::vector<string>& inputs,
const std::vector<string>& outputs,
const TransformParameters& transform_params,
GraphDef* graph_def);
}
}
#endif
#include "tensorflow/tools/graph_transforms/transform_graph.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/lib/strings/scanner.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/command_line_flags.h"
#include "tensorflow/tools/graph_transforms/file_utils.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
#if !defined(PLATFORM_WINDOWS)
#include <pwd.h>
#include <unistd.h>
#endif
namespace tensorflow {
namespace graph_transforms {
using tensorflow::strings::Scanner;
Status ParseTransformParameters(const string& transforms_string,
TransformParameters* params_list) {
params_list->clear();
enum {
TRANSFORM_NAME,
TRANSFORM_PARAM_NAME,
TRANSFORM_PARAM_VALUE,
} state = TRANSFORM_NAME;
StringPiece remaining(transforms_string);
StringPiece match;
StringPiece transform_name;
StringPiece parameter_name;
StringPiece parameter_value;
TransformFuncParameters func_parameters;
while (!remaining.empty()) {
if (state == TRANSFORM_NAME) {
func_parameters.clear();
Scanner(remaining).AnySpace().GetResult(&remaining, &match);
if (remaining.empty()) {
return OkStatus();
}
const bool found_transform_name =
Scanner(remaining)
.Many(Scanner::LETTER_DIGIT_UNDERSCORE)
.GetResult(&remaining, &transform_name);
if (!found_transform_name) {
return errors::InvalidArgument("Looking for transform name, but found ",
string(remaining).c_str());
}
if (Scanner(remaining).OneLiteral("(").GetResult(&remaining, &match)) {
state = TRANSFORM_PARAM_NAME;
} else {
params_list->push_back({string(transform_name), func_parameters});
transform_name = "";
state = TRANSFORM_NAME;
}
} else if (state == TRANSFORM_PARAM_NAME) {
if (Scanner(remaining).OneLiteral(")").GetResult(&remaining, &match)) {
params_list->push_back({string(transform_name), func_parameters});
transform_name = "";
state = TRANSFORM_NAME;
} else {
Scanner(remaining).ZeroOrOneLiteral(",").GetResult(&remaining, &match);
Scanner(remaining).AnySpace().GetResult(&remaining, &match);
const bool found_parameter_name =
Scanner(remaining)
.Many(Scanner::LETTER_DIGIT_UNDERSCORE)
.GetResult(&remaining, ¶meter_name);
if (!found_parameter_name) {
return errors::InvalidArgument(
"Looking for parameter name, but found ",
string(remaining).c_str());
}
if (Scanner(remaining).OneLiteral("=").GetResult(&remaining, &match)) {
state = TRANSFORM_PARAM_VALUE;
} else {
return errors::InvalidArgument("Looking for =, but found ",
string(remaining).c_str());
}
}
} else if (state == TRANSFORM_PARAM_VALUE) {
bool found_parameter_value;
if (Scanner(remaining).OneLiteral("\"").GetResult(&remaining, &match)) {
found_parameter_value =
Scanner(remaining).ScanEscapedUntil('"').GetResult(
&remaining, ¶meter_value);
if (found_parameter_value) {
Scanner(remaining).OneLiteral("\"").GetResult(&remaining, &match);
}
} else {
found_parameter_value =
Scanner(remaining)
.Many(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE)
.GetResult(&remaining, ¶meter_value);
}
if (!found_parameter_value) {
return errors::InvalidArgument("Looking for parameter name, but found ",
string(remaining).c_str());
}
func_parameters[string(parameter_name)].emplace_back(parameter_value);
Scanner(remaining).ZeroOrOneLiteral("\"").GetResult(&remaining, &match);
Scanner(remaining).ZeroOrOneLiteral("'").GetResult(&remaining, &match);
state = TRANSFORM_PARAM_NAME;
}
}
return OkStatus();
}
std::string ExpandPath(const std::string& path_string) {
#if defined(PLATFORM_WINDOWS)
return path_string;
#else
if (path_string.empty() || path_string[0] != '~') {
return path_string;
}
const char* home = nullptr;
std::string::size_type prefix = path_string.find_first_of('/');
if (path_string.length() == 1 || prefix == 1) {
home = getenv("HOME");
if (!home) {
struct passwd* pw = getpwuid(getuid());
if (pw) {
home = pw->pw_dir;
}
}
} else {
std::string user(path_string, 1, (prefix == std::string::npos)
? std::string::npos
: prefix - 1);
struct passwd* pw = getpwnam(user.c_str());
if (pw) {
home = pw->pw_dir;
}
}
if (!home) {
return path_string;
}
string path(home);
if (prefix == std::string::npos) {
return path;
}
if (path.length() == 0 || path[path.length() - 1] != '/') {
path += '/';
}
path += path_string.substr(prefix + 1);
return path;
#endif
}
int ParseFlagsAndTransformGraph(int argc, char* argv[], bool init_main) {
string in_graph_string = "";
string out_graph_string = "";
string inputs_string = "";
string outputs_string = "";
string transforms_string = "";
bool output_as_text = false;
std::vector<Flag> flag_list = {
Flag("in_graph", &in_graph_string, "input graph file name"),
Flag("out_graph", &out_graph_string, "output graph file name"),
Flag("inputs", &inputs_string, "inputs"),
Flag("outputs", &outputs_string, "outputs"),
Flag("transforms", &transforms_string, "list of transforms"),
Flag("output_as_text", &output_as_text,
"whether to write the graph in text protobuf format"),
};
string usage = Flags::Usage(argv[0], flag_list);
usage += "\nTransforms are:\n";
TransformRegistry* transform_registry = GetTransformRegistry();
for (const auto& pair : *transform_registry) {
usage += pair.first + "\n";
}
const bool parse_result = Flags::Parse(&argc, argv, flag_list);
if (init_main) {
port::InitMain(argv[0], &argc, &argv);
}
if (!parse_result) {
LOG(ERROR) << usage;
return -1;
}
if (argc > 1) {
LOG(ERROR) << "Unknown argument " << argv[1] << ".\n" << usage;
return -1;
}
if (in_graph_string.empty()) {
LOG(ERROR) << "in_graph graph can't be empty.\n" << usage;
return -1;
}
if (out_graph_string.empty()) {
LOG(ERROR) << "out_graph graph can't be empty.\n" << usage;
return -1;
}
if (transforms_string.empty()) {
LOG(ERROR) << "You must specify at least one transform.\n" << usage;
return -1;
}
string in_graph = ExpandPath(in_graph_string);
string out_graph = ExpandPath(out_graph_string);
std::vector<string> inputs = str_util::Split(inputs_string, ',');
std::vector<string> outputs = str_util::Split(outputs_string, ',');
TransformParameters transform_params;
Status parse_status =
ParseTransformParameters(transforms_string, &transform_params);
if (!parse_status.ok()) {
LOG(ERROR) << "Failed to parse --transform argument, error was "
<< parse_status.message();
return -1;
}
if (transform_params.empty()) {
LOG(ERROR) << "You must specify at least one transform.\n" << usage;
return -1;
}
GraphDef graph_def;
Status load_status = LoadTextOrBinaryGraphFile(in_graph, &graph_def);
if (!load_status.ok()) {
LOG(ERROR) << "Loading graph '" << in_graph_string << "' failed with "
<< load_status.message();
LOG(ERROR) << usage;
return -1;
}
Status transform_result =
TransformGraph(inputs, outputs, transform_params, &graph_def);
if (!transform_result.ok()) {
LOG(ERROR) << transform_result.message();
LOG(ERROR) << usage;
return -1;
}
Status save_status;
if (output_as_text) {
save_status = WriteTextProto(Env::Default(), out_graph, graph_def);
} else {
save_status = WriteBinaryProto(Env::Default(), out_graph, graph_def);
}
if (!save_status.ok()) {
LOG(ERROR) << "Saving graph '" << out_graph_string << "' failed with "
<< save_status.message();
return -1;
}
return 0;
}
Status ShouldIgnoreErrors(const TransformFuncParameters& transform_params,
bool* ignore_errors) {
*ignore_errors = false;
if (transform_params.count("ignore_errors") &&
(!transform_params.at("ignore_errors").empty())) {
const string& ignore_errors_string =
absl::AsciiStrToLower(transform_params.at("ignore_errors").at(0));
if (ignore_errors_string == "true") {
*ignore_errors = true;
} else if (ignore_errors_string == "false") {
*ignore_errors = false;
} else {
return errors::InvalidArgument(
"ignore_errors should be true or false, found ",
ignore_errors_string);
}
}
return OkStatus();
}
Status TransformGraph(const std::vector<string>& inputs,
const std::vector<string>& outputs,
const TransformParameters& transform_params,
GraphDef* graph_def) {
TransformRegistry* transform_registry = GetTransformRegistry();
for (const auto& transform_info : transform_params) {
const string& transform_name = transform_info.first;
if (transform_name.empty()) {
continue;
}
if (!transform_registry->count(transform_name)) {
return errors::InvalidArgument("Transform '", transform_name,
"' not recognized.");
}
LOG(INFO) << "Applying " << transform_name;
const TransformFunc& transform_func =
transform_registry->at(transform_name);
TransformFuncContext context;
context.input_names = inputs;
context.output_names = outputs;
context.params = transform_info.second;
bool ignore_errors;
TF_RETURN_IF_ERROR(
ShouldIgnoreErrors(transform_info.second, &ignore_errors));
GraphDef transformed_graph_def;
Status transform_result =
transform_func(*graph_def, context, &transformed_graph_def);
if (!transform_result.ok()) {
if (ignore_errors) {
LOG(ERROR) << transform_name << ": Ignoring error "
<< transform_result.message();
transformed_graph_def = *graph_def;
} else {
return transform_result;
}
}
*transformed_graph_def.mutable_library() = graph_def->library();
TF_RETURN_IF_ERROR(IsGraphValid(transformed_graph_def));
*graph_def = transformed_graph_def;
}
return OkStatus();
}
}
} | #include "tensorflow/tools/graph_transforms/transform_graph.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status ShouldIgnoreErrors(const TransformFuncParameters& transform_params,
bool* ignore_errors);
namespace {
Status test_empty_graph_transform(const GraphDef& graph_def,
const TransformFuncContext& context,
GraphDef* result) {
result->Clear();
return OkStatus();
}
}
REGISTER_GRAPH_TRANSFORM("test_empty_graph_transform",
test_empty_graph_transform);
class TransformGraphTest : public ::testing::Test {
protected:
void TestConstantFolding() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
const int width = 100;
Tensor a_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&a_data, 1.0f);
Output a_const =
Const(root.WithOpName("a_expect_removed"), Input::Initializer(a_data));
Tensor b_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&b_data, 1.0f);
Output b_const =
Const(root.WithOpName("b_expect_removed"), Input::Initializer(b_data));
Output add = Add(root.WithOpName("add_expect_removed"), a_const, b_const);
Output placeholder =
Placeholder(root.WithOpName("placeholder_expect_remains"), DT_FLOAT);
Output mul =
Mul(root.WithOpName("output_expect_remains"), add, placeholder);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
string graph_def_serialized;
graph_def.SerializeToString(&graph_def_serialized);
const string dir = testing::TmpDir();
const string in_filename_pb = io::JoinPath(dir, "in_graphdef.pb");
const string out_filename_pb = io::JoinPath(dir, "out_graphdef.pb");
TF_ASSERT_OK(WriteStringToFile(Env::Default(), in_filename_pb,
graph_def_serialized));
std::vector<string> args = {"some_binary",
"--in_graph=" + in_filename_pb,
"--out_graph=" + out_filename_pb,
"--inputs=placeholder_expect_remains",
"--outputs=output_expect_remains",
"--transforms=fold_constants"};
const int argc = 6;
EXPECT_EQ(argc, args.size());
char* argv[argc];
std::vector<char*> char_strings;
for (int i = 0; i < argc; ++i) {
string arg = args[i];
char* char_string = new char[arg.size() + 1];
std::copy_n(arg.c_str(), arg.size() + 1, char_string);
argv[i] = char_string;
char_strings.push_back(char_string);
}
ParseFlagsAndTransformGraph(argc, argv, false);
for (char* char_string : char_strings) {
delete[] char_string;
}
GraphDef out_graph_def;
TF_EXPECT_OK(
ReadBinaryProto(Env::Default(), out_filename_pb, &out_graph_def));
std::map<string, const NodeDef*> out_node_map;
graph_transforms::MapNamesToNodes(out_graph_def, &out_node_map);
for (const NodeDef& node : out_graph_def.node()) {
const int occurrence_count = out_node_map.count(node.name());
if (str_util::EndsWith(node.name(), "expect_removed")) {
EXPECT_EQ(0, occurrence_count) << "node.name()=" << node.name();
}
if (str_util::EndsWith(node.name(), "expect_remains")) {
EXPECT_EQ(1, occurrence_count) << "node.name()=" << node.name();
}
}
}
void TestTransformRegistration() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Output placeholder =
Placeholder(root.WithOpName("placeholder_expect_remains"), DT_FLOAT);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
EXPECT_EQ(1, graph_def.node().size());
TF_ASSERT_OK(TransformGraph({}, {}, {{"test_empty_graph_transform", {}}},
&graph_def));
EXPECT_EQ(0, graph_def.node().size());
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
Status no_such_status =
TransformGraph({}, {}, {{"test_no_such_transform", {}}}, &graph_def);
EXPECT_TRUE(absl::StrContains(no_such_status.ToString(), "not recognized"));
}
void TestParseTransformParameters() {
TransformParameters params_list;
TF_EXPECT_OK(ParseTransformParameters("foo", ¶ms_list));
EXPECT_EQ(1, params_list.size());
EXPECT_EQ("foo", params_list[0].first);
EXPECT_TRUE(params_list[0].second.empty());
TF_EXPECT_OK(ParseTransformParameters("foo bar", ¶ms_list));
EXPECT_EQ(2, params_list.size());
EXPECT_EQ("foo", params_list[0].first);
EXPECT_TRUE(params_list[0].second.empty());
EXPECT_EQ("bar", params_list[1].first);
EXPECT_TRUE(params_list[1].second.empty());
TF_EXPECT_OK(ParseTransformParameters("foo() bar()", ¶ms_list));
EXPECT_EQ(2, params_list.size());
EXPECT_EQ("foo", params_list[0].first);
EXPECT_TRUE(params_list[0].second.empty());
EXPECT_EQ("bar", params_list[1].first);
EXPECT_TRUE(params_list[1].second.empty());
TF_EXPECT_OK(
ParseTransformParameters("foo(bob_something=sue)", ¶ms_list));
EXPECT_EQ(1, params_list.size());
EXPECT_EQ("foo", params_list[0].first);
EXPECT_EQ(1, params_list[0].second.count("bob_something"));
EXPECT_EQ(1, params_list[0].second["bob_something"].size());
EXPECT_EQ("sue", params_list[0].second["bob_something"][0]);
TF_EXPECT_OK(ParseTransformParameters("bar(a=1, b=2, a=3)", ¶ms_list));
EXPECT_EQ(1, params_list.size());
EXPECT_EQ("bar", params_list[0].first);
EXPECT_EQ(1, params_list[0].second.count("a"));
EXPECT_EQ(2, params_list[0].second["a"].size());
EXPECT_EQ("1", params_list[0].second["a"][0]);
EXPECT_EQ("3", params_list[0].second["a"][1]);
EXPECT_EQ(1, params_list[0].second.count("b"));
EXPECT_EQ(1, params_list[0].second["b"].size());
EXPECT_EQ("2", params_list[0].second["b"][0]);
TF_EXPECT_OK(ParseTransformParameters("bar(a=\"1\", b=\"1,2,3\", a=3)",
¶ms_list));
EXPECT_EQ(1, params_list.size());
EXPECT_EQ("bar", params_list[0].first);
EXPECT_EQ(1, params_list[0].second.count("a"));
EXPECT_EQ(2, params_list[0].second["a"].size());
EXPECT_EQ("1", params_list[0].second["a"][0]);
EXPECT_EQ("3", params_list[0].second["a"][1]);
EXPECT_EQ(1, params_list[0].second.count("b"));
EXPECT_EQ(1, params_list[0].second["b"].size());
EXPECT_EQ("1,2,3", params_list[0].second["b"][0]);
}
void TestParseEscapedNewline() {
TransformParameters params_list;
ParseTransformParameters("\\\n", ¶ms_list).IgnoreError();
EXPECT_EQ(0, params_list.size());
}
void TestParseExtraSpaces() {
TransformParameters params_list;
ParseTransformParameters(" ", ¶ms_list).IgnoreError();
EXPECT_EQ(0, params_list.size());
TF_EXPECT_OK(ParseTransformParameters(" foo bar \\\n", ¶ms_list));
EXPECT_EQ(2, params_list.size());
EXPECT_EQ("foo", params_list[0].first);
EXPECT_TRUE(params_list[0].second.empty());
EXPECT_EQ("bar", params_list[1].first);
EXPECT_TRUE(params_list[1].second.empty());
}
void TestShouldIgnoreErrors() {
bool ignore_errors;
TF_EXPECT_OK(
ShouldIgnoreErrors({{"ignore_errors", {"true"}}}, &ignore_errors));
EXPECT_TRUE(ignore_errors);
TF_EXPECT_OK(
ShouldIgnoreErrors({{"ignore_errors", {"false"}}}, &ignore_errors));
EXPECT_FALSE(ignore_errors);
TF_EXPECT_OK(ShouldIgnoreErrors({}, &ignore_errors));
EXPECT_FALSE(ignore_errors);
EXPECT_FALSE(
ShouldIgnoreErrors({{"ignore_errors", {"foo"}}}, &ignore_errors).ok());
}
};
TEST_F(TransformGraphTest, TestConstantFolding) { TestConstantFolding(); }
TEST_F(TransformGraphTest, TestTransformRegistration) {
TestTransformRegistration();
}
TEST_F(TransformGraphTest, TestParseTransformParameters) {
TestParseTransformParameters();
}
TEST_F(TransformGraphTest, TestParseEscapedNewline) {
TestParseEscapedNewline();
}
TEST_F(TransformGraphTest, TestShouldIgnoreErrors) { TestShouldIgnoreErrors(); }
}
} |
1,280 | cpp | tensorflow/tensorflow | merge | tensorflow/tools/proto_splitter/merge.cc | tensorflow/tools/proto_splitter/merge_test.cc | #ifndef TENSORFLOW_TOOLS_PROTO_SPLITTER_MERGE_H_
#define TENSORFLOW_TOOLS_PROTO_SPLITTER_MERGE_H_
#include <memory>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "riegeli/bytes/fd_reader.h"
#include "riegeli/records/record_reader.h"
#include "tensorflow/tools/proto_splitter/chunk.pb.h"
#include "tsl/platform/protobuf.h"
namespace tensorflow::tools::proto_splitter {
class Merger {
private:
enum MergerOp { MERGE, READ };
public:
static absl::Status Merge(
const std::vector<std::unique_ptr<tsl::protobuf::Message>>& chunks,
const ::tensorflow::proto_splitter::ChunkedMessage& chunked_message,
tsl::protobuf::Message* merged_message);
static absl::Status Read(std::string prefix,
tsl::protobuf::Message* merged_message);
static absl::Status ReadPartial(
absl::string_view prefix,
const ::tensorflow::proto_splitter::ChunkMetadata& chunk_metadata,
tsl::protobuf::Message* merged_message);
private:
static absl::Status ReadPb(const std::string& pb_file,
tsl::protobuf::Message* merged_message);
static absl::Status ReadFields(
const ::tensorflow::proto_splitter::ChunkedMessage& chunked_message,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
const std::vector<::tensorflow::proto_splitter::ChunkInfo>&
chunks_info,
tsl::protobuf::Message* merged_message);
static absl::Status ProcessField(
const ::tensorflow::proto_splitter::ChunkedField& chunked_field,
tsl::protobuf::Message* merged_message,
const std::vector<::tensorflow::proto_splitter::ChunkInfo>& chunks_info,
const std::vector<std::unique_ptr<tsl::protobuf::Message>>& chunks,
riegeli::RecordReader<riegeli::FdReader<>>& reader, MergerOp op);
};
}
#endif
#include "tensorflow/tools/proto_splitter/merge.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "riegeli/base/object.h"
#include "riegeli/bytes/fd_reader.h"
#include "riegeli/records/record_reader.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/file_system_helper.h"
#include "tensorflow/tools/proto_splitter/cc/util.h"
#include "tensorflow/tools/proto_splitter/chunk.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace tensorflow::tools::proto_splitter {
using ::tensorflow::proto_splitter::ChunkedField;
using ::tensorflow::proto_splitter::ChunkedMessage;
using ::tensorflow::proto_splitter::ChunkInfo;
using ::tensorflow::proto_splitter::ChunkMetadata;
using ::tensorflow::proto_splitter::FieldIndex;
using tools::proto_splitter::GetChunkMetadata;
using tools::proto_splitter::GetRiegeliReader;
using tools::proto_splitter::OnlyContainsPb;
using tsl::protobuf::FieldDescriptor;
using tsl::protobuf::Message;
using tsl::protobuf::Reflection;
absl::Status Merger::Merge(const std::vector<std::unique_ptr<Message>>& chunks,
const ChunkedMessage& chunked_message,
Message* merged_message) {
riegeli::RecordReader<riegeli::FdReader<>> null_reader{riegeli::kClosed};
if (chunked_message.has_chunk_index()) {
merged_message->MergeFrom(*chunks[chunked_message.chunk_index()].get());
}
for (const auto& chunked_field : chunked_message.chunked_fields()) {
absl::Status s = ProcessField(chunked_field, merged_message, {}, chunks,
null_reader, MergerOp::MERGE);
if (!s.ok()) return s;
}
return absl::OkStatus();
}
absl::Status Merger::Read(std::string prefix, Message* merged_message) {
uint64_t start_time = Env::Default()->NowMicros();
TF_ASSIGN_OR_RETURN(bool only_contains_pb, OnlyContainsPb(prefix));
if (only_contains_pb) {
return ReadPb(absl::StrCat(prefix, ".pb"), merged_message);
}
TF_ASSIGN_OR_RETURN(auto reader,
GetRiegeliReader(absl::StrCat(prefix, ".cpb")));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
return absl::FailedPreconditionError(
absl::StrCat("Couldn't read ChunkMetadata from chunked proto.\n",
read_metadata.status().ToString()));
}
ChunkMetadata chunk_metadata = read_metadata.value();
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
absl::Status s =
ReadFields(chunk_metadata.message(), reader, chunks_info, merged_message);
reader.Close();
uint64_t end_time = Env::Default()->NowMicros();
LOG(INFO) << "Finished reading and merging chunked proto, took "
<< HumanReadableDuration(end_time - start_time) << ".";
return s;
}
absl::Status Merger::ReadPartial(absl::string_view prefix,
const ChunkMetadata& chunk_metadata,
Message* merged_message) {
uint64_t start_time = Env::Default()->NowMicros();
TF_ASSIGN_OR_RETURN(bool only_contains_pb, OnlyContainsPb(prefix));
if (only_contains_pb) {
return absl::FailedPreconditionError(
absl::StrCat("Attempting to read part of a chunked proto .cpb file, "
"but only found a regular proto: ",
prefix, ".pb"));
}
TF_ASSIGN_OR_RETURN(auto reader,
GetRiegeliReader(absl::StrCat(prefix, ".cpb")));
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
absl::Status s =
ReadFields(chunk_metadata.message(), reader, chunks_info, merged_message);
reader.Close();
uint64_t end_time = Env::Default()->NowMicros();
LOG(INFO) << "Finished reading and merging chunked proto, took "
<< HumanReadableDuration(end_time - start_time) << ".";
return s;
}
absl::Status Merger::ReadPb(const std::string& pb_file,
Message* merged_message) {
uint64_t start_time = Env::Default()->NowMicros();
TF_ASSIGN_OR_RETURN(bool file_exists,
internal::FileExists(Env::Default(), pb_file));
if (!file_exists)
return absl::NotFoundError(absl::StrCat("File not found: ", pb_file));
LOG(INFO) << "Reading binary proto from " << pb_file;
auto ret = ReadBinaryProto(Env::Default(), pb_file, merged_message);
uint64_t end_time = Env::Default()->NowMicros();
LOG(INFO) << "Finished reading binary proto, took "
<< HumanReadableDuration(end_time - start_time) << ".";
return ret;
}
absl::Status Merger::ReadFields(
const ChunkedMessage& chunked_message,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
const std::vector<ChunkInfo>& chunks_info,
tsl::protobuf::Message* merged_message) {
if (chunked_message.has_chunk_index()) {
TF_ASSIGN_OR_RETURN(
std::string chunk,
ReadChunk(reader, chunks_info[chunked_message.chunk_index()]));
if (!merged_message->MergeFromString(chunk)) {
return absl::FailedPreconditionError(
"Couldn't merge chunk into message.");
}
}
std::vector<ChunkedField> chunked_fields(
chunked_message.chunked_fields().begin(),
chunked_message.chunked_fields().end());
absl::Status sort_status = absl::OkStatus();
std::sort(
chunked_fields.begin(), chunked_fields.end(),
[&sort_status](ChunkedField cf1, ChunkedField cf2) {
int tag_depth =
std::min(cf1.field_tag().size(), cf2.field_tag().size());
for (int depth = 0; depth < tag_depth; ++depth) {
FieldIndex tag1 = cf1.field_tag()[depth];
FieldIndex tag2 = cf2.field_tag()[depth];
if (tag1.has_field() && tag2.has_field()) {
uint32_t field1 = tag1.field();
uint32_t field2 = tag2.field();
if (field1 != field2) return field1 < field2;
} else if (tag1.has_index() && tag2.has_index()) {
uint64_t index1 = tag1.index();
uint64_t index2 = tag2.index();
if (index1 != index2) return index1 < index2;
} else if (tag1.has_map_key() && tag2.has_map_key()) {
return false;
} else {
sort_status = absl::FailedPreconditionError("Field tag mismatch");
return false;
}
}
if (cf1.field_tag().size() == cf2.field_tag().size()) {
return cf1.message().chunk_index() < cf2.message().chunk_index();
}
return cf1.field_tag().size() < cf2.field_tag().size();
});
if (!sort_status.ok()) return sort_status;
for (const auto& chunked_field : chunked_fields) {
absl::Status s = ProcessField(chunked_field, merged_message, chunks_info,
{}, reader, MergerOp::READ);
if (!s.ok()) return s;
}
return absl::OkStatus();
}
absl::Status Merger::ProcessField(
const ChunkedField& chunked_field, Message* merged_message,
const std::vector<ChunkInfo>& chunks_info,
const std::vector<std::unique_ptr<Message>>& chunks,
riegeli::RecordReader<riegeli::FdReader<>>& reader, MergerOp op) {
std::string chunk;
switch (op) {
case MergerOp::READ: {
TF_ASSIGN_OR_RETURN(
chunk, ReadChunk(reader,
chunks_info[chunked_field.message().chunk_index()]));
break;
}
case MergerOp::MERGE: {
chunk =
chunks[chunked_field.message().chunk_index()]->SerializeAsString();
break;
}
}
if (chunked_field.field_tag().empty()) {
merged_message->MergeFromString(chunk);
return absl::OkStatus();
}
uint64_t field_index;
Message* curr_message = merged_message;
TF_ASSIGN_OR_RETURN(const std::vector<Field> fields,
GetFieldTypes(chunked_field.field_tag()));
const FieldDescriptor* field_desc = nullptr;
for (const auto& field : fields) {
merged_message = curr_message;
field_desc = merged_message->GetDescriptor()->FindFieldByNumber(
std::get<int>(field.first));
auto res = GetMutableField(merged_message, field);
if (!res.ok()) {
if (!absl::IsNotFound(res.status())) return res.status();
if (field_desc->is_map()) {
TF_RETURN_IF_ERROR(
AddMapEntry(curr_message, field_desc, field.second.value()));
res = GetMutableField(curr_message, field);
} else {
curr_message->GetReflection()->AddMessage(curr_message, field_desc);
res = GetMutableField(curr_message, field);
}
}
auto [parent, mutable_field, mutable_field_index] = res.value();
if (mutable_field->is_repeated() && mutable_field_index != -1) {
field_index = mutable_field_index;
curr_message = parent->GetReflection()->MutableRepeatedMessage(
parent, mutable_field, std::max(0, mutable_field_index));
if (mutable_field->is_map()) {
field_desc = mutable_field->message_type()->FindFieldByNumber(2);
merged_message = curr_message;
curr_message = curr_message->GetReflection()->MutableMessage(
curr_message, field_desc);
}
} else if (mutable_field->type() == FieldDescriptor::Type::TYPE_MESSAGE) {
curr_message =
parent->GetReflection()->MutableMessage(parent, mutable_field);
}
}
const Reflection* reflection = merged_message->GetReflection();
if (field_desc->is_repeated()) {
auto message_callback = [&reflection, &merged_message, &field_index, &op,
&chunks, &chunked_field, &reader, &chunks_info,
&field_desc]() -> absl::Status {
for (int _ = reflection->FieldSize(*merged_message, field_desc);
_ <= field_index; _++) {
reflection->AddMessage(merged_message, field_desc);
}
switch (op) {
case MergerOp::MERGE:
TF_RETURN_IF_ERROR(
Merge(chunks, chunked_field.message(),
reflection->MutableRepeatedMessage(
merged_message, field_desc, field_index)));
break;
case MergerOp::READ:
TF_RETURN_IF_ERROR(
ReadFields(chunked_field.message(), reader, chunks_info,
reflection->MutableRepeatedMessage(
merged_message, field_desc, field_index)));
break;
default:
return absl::InternalError("Encountered unknown MergerOp.");
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(SetRepeatedFieldElement(
merged_message, field_desc, field_index, chunk, message_callback));
} else {
auto message_callback = [&reflection, &merged_message, &op, &chunks,
&chunked_field, &reader, &chunks_info,
&field_desc]() -> absl::Status {
switch (op) {
case MergerOp::MERGE:
TF_RETURN_IF_ERROR(
Merge(chunks, chunked_field.message(),
reflection->MutableMessage(merged_message, field_desc)));
break;
case MergerOp::READ:
TF_RETURN_IF_ERROR(ReadFields(
chunked_field.message(), reader, chunks_info,
reflection->MutableMessage(merged_message, field_desc)));
break;
default:
return absl::InternalError("Encountered unknown MergerOp.");
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(
SetFieldElement(merged_message, field_desc, chunk, message_callback));
}
return absl::OkStatus();
}
} | #include "tensorflow/tools/proto_splitter/merge.h"
#include <array>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/saved_model.pb.h"
#include "tensorflow/tools/proto_splitter/cc/test_util.h"
#include "tensorflow/tools/proto_splitter/cc/util.h"
#include "tensorflow/tools/proto_splitter/chunk.pb.h"
#include "tensorflow/tools/proto_splitter/testdata/test_message.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace tensorflow::tools::proto_splitter {
namespace {
inline constexpr std::array kDFSplitTreeChunks = {
"val: \"0\"", "val: \"010\"", "val: \"01020\"",
"val: \"0102030\"", "val: \"0102031\"", "val: \"0102032\"",
"val: \"01021\"", "val: \"0102130\"", "val: \"0102131\"",
"val: \"0102132\""};
inline constexpr std::array kBFSplitTreeChunks = {
"val: \"0\"", "val: \"010\"", "val: \"01020\"",
"val: \"01021\"", "val: \"0102030\"", "val: \"0102031\"",
"val: \"0102032\"", "val: \"0102130\"", "val: \"0102131\"",
"val: \"0102132\""};
TEST(MergeTest, TestReadRiegeliTreeDepthFirst) {
const std::string cpb_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "df-split-tree");
::tensorflow::proto_splitter_testdata::StringNode merged_tree;
TF_ASSERT_OK(Merger::Read(cpb_path, &merged_tree));
const std::string pbtxt_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-tree");
::tensorflow::proto_splitter_testdata::StringNode test_proto;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(pbtxt_path, ".pbtxt"), &test_proto));
ASSERT_THAT(merged_tree, EqualsProto(test_proto));
}
TEST(MergeTest, TestReadRiegeliTreeBreadthFirst) {
const std::string cpb_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "bf-split-tree");
::tensorflow::proto_splitter_testdata::StringNode merged_tree;
TF_ASSERT_OK(Merger::Read(cpb_path, &merged_tree));
const std::string pbtxt_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-tree");
::tensorflow::proto_splitter_testdata::StringNode test_proto;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(pbtxt_path, ".pbtxt"), &test_proto));
ASSERT_THAT(merged_tree, EqualsProto(test_proto));
}
TEST(MergeTest, TestMergeTreeChunksDepthFirst) {
const std::string path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "df-split-tree");
std::vector<std::unique_ptr<::tsl::protobuf::Message>> chunks;
for (const auto& chunk : kDFSplitTreeChunks) {
::tensorflow::proto_splitter_testdata::StringNode string_node;
::tsl::protobuf::TextFormat::ParseFromString(chunk, &string_node);
std::unique_ptr<::tsl::protobuf::Message> node =
std::make_unique<::tensorflow::proto_splitter_testdata::StringNode>(
string_node);
chunks.push_back(std::move(node));
}
std::string split_tree_metadata;
TF_ASSERT_OK(tsl::ReadFileToString(
tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &split_tree_metadata));
::tensorflow::proto_splitter::ChunkedMessage chunked_message;
::tsl::protobuf::TextFormat::ParseFromString(split_tree_metadata,
&chunked_message);
::tensorflow::proto_splitter_testdata::StringNode merged_tree;
TF_ASSERT_OK(Merger::Merge(chunks, chunked_message, &merged_tree));
const std::string pbtxt_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-tree");
::tensorflow::proto_splitter_testdata::StringNode test_proto;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(pbtxt_path, ".pbtxt"), &test_proto));
ASSERT_THAT(merged_tree, EqualsProto(test_proto));
}
TEST(MergeTest, TestMergeTreeChunksBreadthFirst) {
const std::string path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "bf-split-tree");
std::vector<std::unique_ptr<::tsl::protobuf::Message>> chunks;
for (const auto& chunk : kBFSplitTreeChunks) {
::tensorflow::proto_splitter_testdata::StringNode string_node;
::tsl::protobuf::TextFormat::ParseFromString(chunk, &string_node);
std::unique_ptr<::tsl::protobuf::Message> node =
std::make_unique<::tensorflow::proto_splitter_testdata::StringNode>(
string_node);
chunks.push_back(std::move(node));
}
std::string split_tree_metadata;
TF_ASSERT_OK(tsl::ReadFileToString(
tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &split_tree_metadata));
::tensorflow::proto_splitter::ChunkedMessage chunked_message;
::tsl::protobuf::TextFormat::ParseFromString(split_tree_metadata,
&chunked_message);
::tensorflow::proto_splitter_testdata::StringNode merged_tree;
TF_ASSERT_OK(Merger::Merge(chunks, chunked_message, &merged_tree));
const std::string pbtxt_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-tree");
::tensorflow::proto_splitter_testdata::StringNode test_proto;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(pbtxt_path, ".pbtxt"), &test_proto));
ASSERT_THAT(merged_tree, EqualsProto(test_proto));
}
TEST(MergeTest, TestReadGraphDefLotsNodes) {
const std::string path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-lots-nodes");
GraphDef merged_graph_def;
TF_ASSERT_OK(Merger::Read(path, &merged_graph_def));
GraphDef test_graph_def;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &test_graph_def));
ASSERT_THAT(merged_graph_def, EqualsProto(test_graph_def));
}
TEST(MergeTest, TestReadGraphDefLargeNodes) {
const std::string path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-large-nodes");
GraphDef merged_graph_def;
TF_ASSERT_OK(Merger::Read(path, &merged_graph_def));
GraphDef test_graph_def;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &test_graph_def));
ASSERT_THAT(merged_graph_def, EqualsProto(test_graph_def));
}
TEST(MergeTest, TestReadGraphDefLargeConstant) {
const std::string path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-large-constant");
GraphDef merged_graph_def;
TF_ASSERT_OK(Merger::Read(path, &merged_graph_def));
GraphDef test_graph_def;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &test_graph_def));
ASSERT_THAT(merged_graph_def, EqualsProto(test_graph_def));
}
TEST(MergeTest, TestReadManyField) {
const std::string path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "many-field");
::tensorflow::proto_splitter_testdata::ManyFields merged_many_field;
TF_ASSERT_OK(Merger::Read(path, &merged_many_field));
::tensorflow::proto_splitter_testdata::ManyFields test_many_field;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &test_many_field));
ASSERT_THAT(merged_many_field, EqualsProto(test_many_field));
}
TEST(MergeTest, TestReadSavedModel) {
const std::string path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-standard");
SavedModel merged_saved_model;
TF_ASSERT_OK(Merger::Read(path, &merged_saved_model));
SavedModel test_saved_model;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &test_saved_model));
ASSERT_THAT(merged_saved_model, EqualsProto(test_saved_model));
}
TEST(MergeTest, TestReadChunkedModel) {
const std::string path =
io::JoinPath(testing::TensorFlowSrcRoot(), "cc/saved_model/testdata",
"chunked_saved_model/chunked_model/saved_model");
SavedModel merged_saved_model;
TF_ASSERT_OK(Merger::Read(path, &merged_saved_model));
SavedModel test_saved_model;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &test_saved_model));
ASSERT_THAT(merged_saved_model, EqualsProto(test_saved_model));
}
TEST(MergeTest, TestReadPartial) {
const std::string path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "many-field");
TF_ASSERT_OK_AND_ASSIGN(auto reader, tools::proto_splitter::GetRiegeliReader(
absl::StrCat(path, ".cpb")));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
TF_ASSERT_OK(read_metadata.status());
}
::tensorflow::proto_splitter::ChunkMetadata chunk_metadata =
read_metadata.value();
::tensorflow::proto_splitter::ChunkMetadata partial_chunk_metadata;
partial_chunk_metadata.mutable_chunks()->CopyFrom(chunk_metadata.chunks());
partial_chunk_metadata.mutable_message()->set_chunk_index(
chunk_metadata.message().chunk_index());
proto_splitter_testdata::ManyFields merged_many_fields;
TF_ASSERT_OK(
Merger::ReadPartial(path, partial_chunk_metadata, &merged_many_fields));
ASSERT_THAT(merged_many_fields, EqualsProto(R"pb(
map_field_int64 { key: -1345 value: "map_value_-1345" }
)pb"));
}
}
} |
1,281 | cpp | tensorflow/tensorflow | graph_def_splitter | tensorflow/tools/proto_splitter/cc/graph_def_splitter.cc | tensorflow/tools/proto_splitter/cc/graph_def_splitter_test.cc | #ifndef TENSORFLOW_TOOLS_PROTO_SPLITTER_CC_GRAPH_DEF_SPLITTER_H_
#define TENSORFLOW_TOOLS_PROTO_SPLITTER_CC_GRAPH_DEF_SPLITTER_H_
#include "tensorflow/tools/proto_splitter/cc/composable_splitter.h"
namespace tensorflow {
namespace tools::proto_splitter {
class GraphDefSplitter : public ComposableSplitter {
public:
using ComposableSplitter::ComposableSplitter;
absl::Status BuildChunks() override;
};
}
}
#endif
#include "tensorflow/tools/proto_splitter/cc/graph_def_splitter.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/tools/proto_splitter/cc/large_node_splitter.h"
#include "tensorflow/tools/proto_splitter/cc/max_size.h"
#include "tensorflow/tools/proto_splitter/cc/repeated_field_splitter.h"
#include "tensorflow/tools/proto_splitter/cc/size_splitter.h"
#include "tensorflow/tools/proto_splitter/cc/split.h"
#include "tensorflow/tools/proto_splitter/cc/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tools::proto_splitter {
namespace {
using namespace std::string_literals;
class ConstantSplitter : public SizeSplitter {
public:
using SizeSplitter::SizeSplitter;
absl::StatusOr<int> BuildChunksReturnSize() override {
NodeDef* node = tsl::protobuf::DynamicCastToGenerated<NodeDef>(message());
std::vector<FieldType> tensor_field = {"attr"s, "value"s, "tensor"s};
std::vector<FieldType> content_field = {"attr"s, "value"s, "tensor"s,
"tensor_content"s};
TF_ASSIGN_OR_RETURN(auto ret, GetMutableField(node, tensor_field));
auto tensor_msg =
ret.parent->GetReflection()->MutableMessage(ret.parent, ret.field);
TensorProto* tensor_proto =
tsl::protobuf::DynamicCastToGenerated<TensorProto>(tensor_msg);
int size_diff;
if (tensor_proto->tensor_content().empty()) {
Tensor t;
if (!t.FromProto(*tensor_proto)) {
return absl::InvalidArgumentError(
"Invalid Const NodeDef.attr[\"value\"].tensor value.");
}
TensorProto container;
t.AsProtoTensorContent(&container);
size_diff = container.tensor_content().size();
auto x = std::make_unique<std::string>(
std::move(*container.mutable_tensor_content()));
auto y = std::make_unique<MessageBytes>(std::move(*x));
TF_RETURN_IF_ERROR(AddChunk(std::move(y), &content_field));
} else {
size_diff = tensor_proto->tensor_content().size();
auto x = std::make_unique<std::string>(
std::move(*tensor_proto->mutable_tensor_content()));
auto y = std::make_unique<MessageBytes>(std::move(*x));
TF_RETURN_IF_ERROR(AddChunk(std::move(y), &content_field));
}
auto dtype = tensor_proto->dtype();
auto tensor_shape = tensor_proto->tensor_shape();
auto version_number = tensor_proto->version_number();
tensor_proto->Clear();
tensor_proto->set_dtype(dtype);
*tensor_proto->mutable_tensor_shape() = tensor_shape;
tensor_proto->set_version_number(version_number);
return size_diff;
}
};
class ConstantSplitterFactory : public SizeSplitterFactory {
public:
using SizeSplitterFactory::SizeSplitterFactory;
absl::StatusOr<std::unique_ptr<SizeSplitter>> CreateSplitter(
tsl::protobuf::Message* message, ComposableSplitterBase* parent_splitter,
std::vector<FieldType>* fields_in_parent, int size) override {
if (size < GetMaxSize()) return nullptr;
NodeDef* node = tsl::protobuf::DynamicCastToGenerated<NodeDef>(message);
if (node->op() != "Const")
return absl::UnimplementedError(absl::StrCat(
"Currently only able to split 'Const' nodes that are larger than the "
"2GB maximum proto size. Got node of type '",
node->op(), "' with size: ", size, "."));
ConstantSplitter* splitter =
new ConstantSplitter(message, parent_splitter, fields_in_parent);
return absl::WrapUnique(splitter);
}
};
class FunctionDefSplitter : public SizeSplitter {
public:
using SizeSplitter::SizeSplitter;
absl::StatusOr<int> BuildChunksReturnSize() override {
size_t current_size = GetInitialSize();
uint64_t max_size = GetMaxSize();
std::vector<FieldType> fields = {};
if (LARGE_SIZE_CHECK(current_size, max_size) && current_size < max_size) {
auto splitter = LargeNodeSplitter<FunctionDef>(message(), this, &fields);
splitter.SetInitialSize(current_size);
return splitter.BuildChunksReturnSize();
} else if (current_size > max_size) {
ConstantSplitterFactory constant_splitter_factory;
LargeNodeSplitterFactory<NodeDef> large_node_splitter_factory;
std::vector<SizeSplitterFactory*> factories = {
&constant_splitter_factory, &large_node_splitter_factory};
auto ret = RepeatedFieldSplitters<FunctionDef, NodeDef>::Create(
message(), this, &fields, "node_def"s, &factories);
if (!ret.ok()) return ret.status();
auto splitter = ret.value();
return splitter.BuildChunksReturnSize();
}
return 0;
}
};
class FunctionDefSplitterFactory : public SizeSplitterFactory {
public:
using SizeSplitterFactory::SizeSplitterFactory;
absl::StatusOr<std::unique_ptr<SizeSplitter>> CreateSplitter(
tsl::protobuf::Message* message, ComposableSplitterBase* parent_splitter,
std::vector<FieldType>* fields_in_parent, int size) override {
FunctionDefSplitter* splitter =
new FunctionDefSplitter(message, parent_splitter, fields_in_parent);
return absl::WrapUnique(splitter);
}
};
}
absl::Status GraphDefSplitter::BuildChunks() {
TF_RETURN_IF_ERROR(SetMessageAsBaseChunk());
GraphDef* g = tsl::protobuf::DynamicCastToGenerated<GraphDef>(message());
uint64_t max_size = GetMaxSize();
size_t graph_size = GetInitialSize();
if (graph_size < max_size) return absl::OkStatus();
std::vector<FieldType> field_in_parent = {};
ConstantSplitterFactory constant_splitter_factory;
LargeNodeSplitterFactory<NodeDef> large_node_splitter_factory;
std::vector<SizeSplitterFactory*> factories = {&constant_splitter_factory,
&large_node_splitter_factory};
auto node_splitter_ret = RepeatedFieldSplitters<GraphDef, NodeDef>::Create(
g, this, &field_in_parent, "node"s, &factories);
if (!node_splitter_ret.ok()) return node_splitter_ret.status();
auto node_splitter = node_splitter_ret.value();
FunctionDefSplitterFactory function_splitter_factory;
std::vector<FieldType> library_field = {"library"s};
std::vector<SizeSplitterFactory*> fn_factories = {&function_splitter_factory};
auto library_splitter_ret =
RepeatedFieldSplitters<FunctionDefLibrary, FunctionDef>::Create(
g->mutable_library(), this, &library_field, "function"s,
&fn_factories);
if (!library_splitter_ret.ok()) return library_splitter_ret.status();
auto library_splitter = library_splitter_ret.value();
size_t library_size = g->library().ByteSizeLong();
library_splitter.SetInitialSize(library_size);
size_t approx_node_size = graph_size - library_size;
node_splitter.SetInitialSize(approx_node_size);
if (library_size > approx_node_size) {
TF_ASSIGN_OR_RETURN(int size_diff,
library_splitter.BuildChunksReturnSize());
library_size -= size_diff;
if (approx_node_size + library_size > max_size) {
TF_ASSIGN_OR_RETURN(int size_diff, node_splitter.BuildChunksReturnSize());
approx_node_size -= size_diff;
}
} else {
TF_ASSIGN_OR_RETURN(int size_diff, node_splitter.BuildChunksReturnSize());
approx_node_size -= size_diff;
if (approx_node_size + library_size > max_size) {
TF_ASSIGN_OR_RETURN(int size_diff,
library_splitter.BuildChunksReturnSize());
library_size -= size_diff;
}
}
if (g->ByteSizeLong() > max_size) {
LargeNodeSplitter<FunctionDefLibrary> entire_library_splitter(
g->mutable_library(), this, &library_field);
int index = 1;
entire_library_splitter.SetChunkIndex(&index);
TF_RETURN_IF_ERROR(entire_library_splitter.BuildChunks());
}
return absl::OkStatus();
}
}
} | #include "tensorflow/tools/proto_splitter/cc/graph_def_splitter.h"
#include <cstdint>
#include <memory>
#include <string>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/tools/proto_splitter/cc/max_size.h"
#include "tensorflow/tools/proto_splitter/cc/test_util.h"
#include "tensorflow/tools/proto_splitter/cc/util.h"
#include "tensorflow/tools/proto_splitter/testdata/test_message.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tools::proto_splitter {
namespace {
using ::tensorflow::proto_splitter::ChunkedMessage;
#define EXPECT_CHUNK_SIZES(chunks, max_size) \
do { \
for (auto chunk : *chunks) { \
if (std::holds_alternative<std::shared_ptr<tsl::protobuf::Message>>( \
chunk)) { \
EXPECT_LE(std::get<std::shared_ptr<tsl::protobuf::Message>>(chunk) \
->ByteSizeLong(), \
max_size); \
} else if (std::holds_alternative<tsl::protobuf::Message*>(chunk)) { \
EXPECT_LE(std::get<tsl::protobuf::Message*>(chunk)->ByteSizeLong(), \
max_size); \
} \
} \
} while (0)
TEST(GraphDefSplitterTest, TestLargeConstant) {
GraphDef proto;
const std::string graph_def_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-large-constant.pb");
int64_t max_size = 500;
DebugSetMaxSize(max_size);
TF_EXPECT_OK(tensorflow::ReadBinaryProto(tensorflow::Env::Default(),
graph_def_path, &proto));
EXPECT_GE(proto.ByteSizeLong(), GetMaxSize());
std::string large_constant_1, large_constant_2;
const std::variant<std::string, absl::Cord>& tensor_constant_1 =
proto.node(2).attr().at("value").tensor().tensor_content();
const std::variant<std::string, absl::Cord>& tensor_constant_2 =
proto.node(4).attr().at("value").tensor().tensor_content();
if (std::holds_alternative<std::string>(tensor_constant_1)) {
large_constant_1 = std::get<std::string>(tensor_constant_1);
} else {
absl::CopyCordToString(std::get<absl::Cord>(tensor_constant_1),
&large_constant_1);
}
if (std::holds_alternative<std::string>(tensor_constant_2)) {
large_constant_2 = std::get<std::string>(tensor_constant_2);
} else {
absl::CopyCordToString(std::get<absl::Cord>(tensor_constant_2),
&large_constant_2);
}
GraphDefSplitter splitter(&proto);
TF_ASSERT_OK_AND_ASSIGN(auto x, splitter.Split());
ChunkedMessage* chunked_message = x.chunked_message;
ASSERT_NE(chunked_message, nullptr);
EXPECT_THAT(*chunked_message,
EqualsProto(R"pb(chunk_index: 0
chunked_fields {
field_tag { field: 1 }
field_tag { index: 2 }
field_tag { field: 5 }
field_tag { map_key { s: "value" } }
field_tag { field: 8 }
field_tag { field: 4 }
message { chunk_index: 1 }
}
chunked_fields {
field_tag { field: 1 }
field_tag { index: 4 }
field_tag { field: 5 }
field_tag { map_key { s: "value" } }
field_tag { field: 8 }
field_tag { field: 4 }
message { chunk_index: 2 }
})pb"));
std::vector<MessageBytes>* chunks = x.chunks;
ASSERT_NE(chunks, nullptr);
EXPECT_CHUNK_SIZES(chunks, max_size);
EXPECT_THAT((*chunks)[1],
::testing::VariantWith<std::string>(large_constant_1));
EXPECT_THAT((*chunks)[2],
::testing::VariantWith<std::string>(large_constant_2));
}
TEST(GraphDefSplitterTest, TestLargeNodes) {
GraphDef proto;
const std::string graph_def_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-large-nodes.pb");
int64_t max_size = 200;
DebugSetMaxSize(max_size);
TF_EXPECT_OK(tensorflow::ReadBinaryProto(tensorflow::Env::Default(),
graph_def_path, &proto));
EXPECT_GE(proto.ByteSize(), GetMaxSize());
NodeDef node_1 = proto.node(1);
NodeDef node_2 = proto.node(2);
NodeDef node_3 = proto.node(3);
NodeDef node_5 = proto.node(5);
GraphDefSplitter splitter(&proto);
TF_ASSERT_OK_AND_ASSIGN(auto x, splitter.Split());
ChunkedMessage* chunked_message = x.chunked_message;
ASSERT_NE(chunked_message, nullptr);
EXPECT_THAT(*chunked_message, EqualsProto(R"pb(chunk_index: 0
chunked_fields {
field_tag { field: 1 }
field_tag { index: 1 }
message { chunk_index: 1 }
}
chunked_fields {
field_tag { field: 1 }
field_tag { index: 2 }
message { chunk_index: 2 }
}
chunked_fields {
field_tag { field: 1 }
field_tag { index: 3 }
message { chunk_index: 3 }
}
chunked_fields {
field_tag { field: 1 }
field_tag { index: 5 }
message { chunk_index: 4 }
})pb"));
std::vector<MessageBytes>* chunks = x.chunks;
ASSERT_NE(chunks, nullptr);
EXPECT_CHUNK_SIZES(chunks, max_size);
EXPECT_TRUE(std::holds_alternative<std::shared_ptr<tsl::protobuf::Message>>(
(*chunks)[1]));
EXPECT_TRUE(std::holds_alternative<std::shared_ptr<tsl::protobuf::Message>>(
(*chunks)[2]));
EXPECT_TRUE(std::holds_alternative<std::shared_ptr<tsl::protobuf::Message>>(
(*chunks)[3]));
EXPECT_TRUE(std::holds_alternative<std::shared_ptr<tsl::protobuf::Message>>(
(*chunks)[4]));
EXPECT_THAT(
*std::get<std::shared_ptr<tsl::protobuf::Message>>((*chunks)[1]).get(),
EqualsProto(node_1));
EXPECT_THAT(
*std::get<std::shared_ptr<tsl::protobuf::Message>>((*chunks)[2]).get(),
EqualsProto(node_2));
EXPECT_THAT(
*std::get<std::shared_ptr<tsl::protobuf::Message>>((*chunks)[3]).get(),
EqualsProto(node_3));
EXPECT_THAT(
*std::get<std::shared_ptr<tsl::protobuf::Message>>((*chunks)[4]).get(),
EqualsProto(node_5));
}
TEST(GraphDefSplitterTest, TestLotsNodes) {
GraphDef proto;
const std::string graph_def_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-lots-nodes.pb");
int64_t max_size = 96 * 5;
DebugSetMaxSize(max_size);
TF_EXPECT_OK(tensorflow::ReadBinaryProto(tensorflow::Env::Default(),
graph_def_path, &proto));
EXPECT_GE(proto.ByteSize(), GetMaxSize());
int expected_node_size = proto.node_size();
GraphDefSplitter splitter(&proto);
TF_ASSERT_OK_AND_ASSIGN(auto x, splitter.Split());
ChunkedMessage* chunked_message = x.chunked_message;
ASSERT_NE(chunked_message, nullptr);
EXPECT_THAT(
*chunked_message,
EqualsProto(R"pb(chunk_index: 0
chunked_fields { message { chunk_index: 1 } }
chunked_fields { message { chunk_index: 2 } }
chunked_fields { message { chunk_index: 3 } }
chunked_fields { message { chunk_index: 4 } })pb"));
std::vector<MessageBytes>* chunks = x.chunks;
ASSERT_NE(chunks, nullptr);
EXPECT_CHUNK_SIZES(chunks, max_size);
int actual_node_size = 0;
for (MessageBytes& chunk : *chunks) {
GraphDef* message = nullptr;
if (std::holds_alternative<std::shared_ptr<tsl::protobuf::Message>>(
chunk)) {
message = tsl::protobuf::DynamicCastToGenerated<GraphDef>(
std::get<std::shared_ptr<tsl::protobuf::Message>>(chunk).get());
} else if (std::holds_alternative<tsl::protobuf::Message*>(chunk)) {
message = tsl::protobuf::DynamicCastToGenerated<GraphDef>(
std::get<tsl::protobuf::Message*>(chunk));
} else {
EXPECT_FALSE(std::holds_alternative<std::string>(chunk));
}
actual_node_size += message->node_size();
}
EXPECT_EQ(actual_node_size, expected_node_size);
}
TEST(GraphDefSplitterTest, TestFunctionLotsOfNodes) {
GraphDef proto;
const std::string graph_def_path = io::JoinPath(
testing::TensorFlowSrcRoot(), "tools/proto_splitter/testdata",
"function-lots-of-nodes.pb");
int64_t max_size = 500;
DebugSetMaxSize(max_size);
TF_EXPECT_OK(tensorflow::ReadBinaryProto(tensorflow::Env::Default(),
graph_def_path, &proto));
EXPECT_GE(proto.ByteSize(), GetMaxSize());
GraphDefSplitter splitter(&proto);
TF_ASSERT_OK_AND_ASSIGN(auto x, splitter.Split());
std::vector<MessageBytes>* chunks = x.chunks;
ASSERT_NE(chunks, nullptr);
EXPECT_CHUNK_SIZES(chunks, max_size);
}
TEST(GraphDefSplitterTest, TestFunctionLargeNodes) {
GraphDef proto;
const std::string graph_def_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "function-large-nodes.pb");
int64_t max_size = 200;
DebugSetMaxSize(max_size);
TF_EXPECT_OK(tensorflow::ReadBinaryProto(tensorflow::Env::Default(),
graph_def_path, &proto));
EXPECT_GE(proto.ByteSize(), GetMaxSize());
GraphDefSplitter splitter(&proto);
TF_ASSERT_OK_AND_ASSIGN(auto x, splitter.Split());
std::vector<MessageBytes>* chunks = x.chunks;
ASSERT_NE(chunks, nullptr);
EXPECT_CHUNK_SIZES(chunks, max_size);
}
TEST(GraphDefSplitterTest, TestGraphAndFunction) {
GraphDef proto;
const std::string graph_def_path = io::JoinPath(
testing::TensorFlowSrcRoot(), "tools/proto_splitter/testdata",
"graph-def-and-function.pb");
int64_t max_size = 200;
DebugSetMaxSize(max_size);
TF_EXPECT_OK(tensorflow::ReadBinaryProto(tensorflow::Env::Default(),
graph_def_path, &proto));
EXPECT_GE(proto.ByteSize(), GetMaxSize());
GraphDefSplitter splitter(&proto);
TF_ASSERT_OK_AND_ASSIGN(auto x, splitter.Split());
std::vector<MessageBytes>* chunks = x.chunks;
ASSERT_NE(chunks, nullptr);
EXPECT_CHUNK_SIZES(chunks, max_size);
TF_ASSERT_OK(splitter.Write("/tmp/hoi"));
}
}
}
} |
1,282 | cpp | tensorflow/tensorflow | saved_model_splitter | tensorflow/tools/proto_splitter/cc/saved_model_splitter.cc | tensorflow/tools/proto_splitter/cc/saved_model_splitter_test.cc | #ifndef TENSORFLOW_TOOLS_PROTO_SPLITTER_CC_SAVED_MODEL_SPLITTER_H_
#define TENSORFLOW_TOOLS_PROTO_SPLITTER_CC_SAVED_MODEL_SPLITTER_H_
#include "tensorflow/tools/proto_splitter/cc/composable_splitter.h"
namespace tensorflow {
namespace tools::proto_splitter {
class SavedModelSplitter : public ComposableSplitter {
public:
using ComposableSplitter::ComposableSplitter;
absl::Status BuildChunks() override;
};
}
}
#endif
#include "tensorflow/tools/proto_splitter/cc/saved_model_splitter.h"
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saved_model.pb.h"
#include "tensorflow/tools/proto_splitter/cc/graph_def_splitter.h"
#include "tensorflow/tools/proto_splitter/cc/large_node_splitter.h"
#include "tensorflow/tools/proto_splitter/cc/max_size.h"
#include "tensorflow/tools/proto_splitter/cc/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/protobuf.h"
namespace tensorflow {
namespace tools::proto_splitter {
using namespace std::string_literals;
absl::Status SavedModelSplitter::BuildChunks() {
TF_RETURN_IF_ERROR(SetMessageAsBaseChunk());
SavedModel* sm = tsl::protobuf::DynamicCastToGenerated<SavedModel>(message());
int max_size = GetMaxSize();
if (GetInitialSize() < max_size) return absl::OkStatus();
std::vector<FieldType> fields_to_graph_def = {"meta_graphs"s, 0,
"graph_def"s};
GraphDefSplitter graph_def_splitter(
sm->mutable_meta_graphs(0)->mutable_graph_def(), this,
&fields_to_graph_def);
TF_RETURN_IF_ERROR(graph_def_splitter.BuildChunks());
if (sm->ByteSizeLong() < max_size) return absl::OkStatus();
LargeNodeSplitter<GraphDef> entire_graph_splitter(
sm->mutable_meta_graphs(0)->mutable_graph_def(), this,
&fields_to_graph_def);
int index = 1;
entire_graph_splitter.SetChunkIndex(&index);
TF_RETURN_IF_ERROR(entire_graph_splitter.BuildChunks());
return absl::OkStatus();
}
}
} | #include "tensorflow/tools/proto_splitter/cc/saved_model_splitter.h"
#include <cstdint>
#include <memory>
#include <string>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/saved_model.pb.h"
#include "tensorflow/tools/proto_splitter/cc/max_size.h"
#include "tensorflow/tools/proto_splitter/cc/util.h"
#include "tensorflow/tools/proto_splitter/testdata/test_message.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tools::proto_splitter {
namespace {
#define EXPECT_CHUNK_SIZES(chunks, max_size) \
do { \
for (auto chunk : *chunks) { \
if (std::holds_alternative<std::shared_ptr<tsl::protobuf::Message>>( \
chunk)) { \
EXPECT_LE(std::get<std::shared_ptr<tsl::protobuf::Message>>(chunk) \
->ByteSizeLong(), \
max_size); \
} else if (std::holds_alternative<tsl::protobuf::Message*>(chunk)) { \
EXPECT_LE(std::get<tsl::protobuf::Message*>(chunk)->ByteSizeLong(), \
max_size); \
} \
} \
} while (0)
std::string NonChunkedSavedModel() {
return io::JoinPath(testing::TensorFlowSrcRoot(), "cc", "saved_model",
"testdata", "chunked_saved_model", "non_chunked_model",
"saved_model.pb");
}
TEST(SavedModelSplitterTest, TestSplit) {
SavedModel proto;
int64_t max_size = 80000;
DebugSetMaxSize(max_size);
TF_EXPECT_OK(tensorflow::ReadBinaryProto(tensorflow::Env::Default(),
NonChunkedSavedModel(), &proto));
EXPECT_GE(proto.ByteSizeLong(), GetMaxSize());
SavedModelSplitter splitter(&proto);
TF_ASSERT_OK_AND_ASSIGN(auto x, splitter.Split());
std::vector<MessageBytes>* chunks = x.chunks;
ASSERT_NE(chunks, nullptr);
EXPECT_EQ(2, chunks->size());
EXPECT_CHUNK_SIZES(chunks, max_size);
}
}
}
} |
1,283 | cpp | tensorflow/tensorflow | partial_run_mgr | tensorflow/core/distributed_runtime/partial_run_mgr.cc | tensorflow/core/distributed_runtime/partial_run_mgr_test.cc | #ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_PARTIAL_RUN_MGR_H_
#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_PARTIAL_RUN_MGR_H_
#include <unordered_map>
#include "tensorflow/core/distributed_runtime/worker_interface.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
class PartialRunMgr {
public:
bool FindOrCreate(int step_id, CancellationManager** cancellation_manager);
void ExecutorDone(int step_id, const Status& executor_status);
void PartialRunDone(int step_id, StatusCallback done, const Status& status);
private:
struct PartialRunState {
std::unique_ptr<CancellationManager> cancellation_manager;
bool executor_done = false;
StatusCallback final_callback = nullptr;
Status final_status;
};
mutex mu_;
std::unordered_map<int, std::unique_ptr<PartialRunState>>
step_id_to_partial_run_ TF_GUARDED_BY(mu_);
};
}
#endif
#include "tensorflow/core/distributed_runtime/partial_run_mgr.h"
namespace tensorflow {
bool PartialRunMgr::FindOrCreate(int step_id,
CancellationManager** cancellation_manager) {
mutex_lock l(mu_);
auto it = step_id_to_partial_run_.find(step_id);
if (it != step_id_to_partial_run_.end()) {
*cancellation_manager = it->second->cancellation_manager.get();
return false;
}
std::unique_ptr<PartialRunState> partial_run =
std::make_unique<PartialRunState>();
partial_run->cancellation_manager = std::make_unique<CancellationManager>();
*cancellation_manager = partial_run->cancellation_manager.get();
step_id_to_partial_run_[step_id] = std::move(partial_run);
return true;
}
void PartialRunMgr::ExecutorDone(int step_id, const Status& executor_status) {
StatusCallback done;
Status callback_status;
{
mutex_lock l(mu_);
auto run_it = step_id_to_partial_run_.find(step_id);
if (run_it == step_id_to_partial_run_.end()) {
return;
}
done = std::move(run_it->second->final_callback);
if (!executor_status.ok()) {
run_it->second->final_status = executor_status;
}
callback_status = run_it->second->final_status;
run_it->second->executor_done = true;
}
if (done != nullptr) {
done(callback_status);
mutex_lock l(mu_);
step_id_to_partial_run_.erase(step_id);
}
}
void PartialRunMgr::PartialRunDone(int step_id, StatusCallback done,
const Status& status) {
Status callback_status;
{
mutex_lock l(mu_);
auto run_it = step_id_to_partial_run_.find(step_id);
if (run_it == step_id_to_partial_run_.end()) {
return;
}
run_it->second->final_status.Update(status);
if (!run_it->second->executor_done) {
run_it->second->final_callback = std::move(done);
return;
}
callback_status = run_it->second->final_status;
}
done(callback_status);
mutex_lock l(mu_);
step_id_to_partial_run_.erase(step_id);
}
} | #include "tensorflow/core/distributed_runtime/partial_run_mgr.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(PartialRunMgrFindOrCreate, Create) {
PartialRunMgr partial_run_mgr;
int step_id = 1;
CancellationManager* cancellation_manager;
partial_run_mgr.FindOrCreate(step_id, &cancellation_manager);
EXPECT_TRUE(cancellation_manager != nullptr);
}
TEST(PartialRunMgrFindOrCreate, Find) {
PartialRunMgr partial_run_mgr;
int step_id = 1;
CancellationManager* cancellation_manager;
partial_run_mgr.FindOrCreate(step_id, &cancellation_manager);
CancellationManager* found_cancellation_manager;
partial_run_mgr.FindOrCreate(step_id, &found_cancellation_manager);
EXPECT_EQ(cancellation_manager, found_cancellation_manager);
}
TEST(PartialRunMgrFindOrCreate, NewCreate) {
PartialRunMgr partial_run_mgr;
int step_id = 1;
CancellationManager* cancellation_manager;
partial_run_mgr.FindOrCreate(step_id, &cancellation_manager);
int new_step_id = 2;
CancellationManager* new_cancellation_manager;
partial_run_mgr.FindOrCreate(new_step_id, &new_cancellation_manager);
EXPECT_NE(cancellation_manager, new_cancellation_manager);
}
TEST(PartialRunMgr, PartialRunRemoved) {
PartialRunMgr partial_run_mgr;
int step_id = 1;
CancellationManager* cancellation_manager;
partial_run_mgr.FindOrCreate(step_id, &cancellation_manager);
int called = 0;
partial_run_mgr.PartialRunDone(
step_id, [&called](Status status) { called++; }, absl::OkStatus());
partial_run_mgr.ExecutorDone(step_id, absl::OkStatus());
partial_run_mgr.PartialRunDone(
step_id, [&called](Status status) { called++; }, absl::OkStatus());
partial_run_mgr.ExecutorDone(step_id, absl::OkStatus());
EXPECT_EQ(1, called);
}
struct StatusTestParam {
Status executor_status;
Status partial_run_status;
Status expected_status;
};
class StatusPropagationTest : public ::testing::TestWithParam<StatusTestParam> {
protected:
PartialRunMgr partial_run_mgr_;
Notification invoked_;
Status status_;
void set_status(const Status& status) {
status_ = status;
invoked_.Notify();
}
Status status() {
invoked_.WaitForNotification();
return status_;
}
};
TEST_P(StatusPropagationTest, ExecutorDoneFirst) {
StatusTestParam param = GetParam();
int step_id = 1;
CancellationManager* cancellation_manager;
partial_run_mgr_.FindOrCreate(step_id, &cancellation_manager);
partial_run_mgr_.ExecutorDone(step_id, param.executor_status);
partial_run_mgr_.PartialRunDone(step_id,
[this](Status status) { set_status(status); },
param.partial_run_status);
EXPECT_EQ(status(), param.expected_status);
}
TEST_P(StatusPropagationTest, PartialRunDoneFirst) {
StatusTestParam param = GetParam();
int step_id = 1;
CancellationManager* cancellation_manager;
partial_run_mgr_.FindOrCreate(step_id, &cancellation_manager);
partial_run_mgr_.PartialRunDone(step_id,
[this](Status status) { set_status(status); },
param.partial_run_status);
partial_run_mgr_.ExecutorDone(step_id, param.executor_status);
EXPECT_EQ(status(), param.expected_status);
}
Status ExecutorError() { return errors::Internal("executor error"); }
Status PartialRunError() { return errors::Internal("partial run error"); }
INSTANTIATE_TEST_SUITE_P(
PartialRunMgr, StatusPropagationTest,
::testing::Values(
StatusTestParam{absl::OkStatus(), absl::OkStatus(), absl::OkStatus()},
StatusTestParam{ExecutorError(), absl::OkStatus(), ExecutorError()},
StatusTestParam{absl::OkStatus(), PartialRunError(), PartialRunError()},
StatusTestParam{ExecutorError(), PartialRunError(), ExecutorError()}));
}
} |
1,284 | cpp | tensorflow/tensorflow | recent_request_ids | tensorflow/core/distributed_runtime/recent_request_ids.cc | tensorflow/core/distributed_runtime/recent_request_ids_test.cc | #ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RECENT_REQUEST_IDS_H_
#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RECENT_REQUEST_IDS_H_
#include <string>
#include <unordered_set>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/distributed_runtime/message_wrappers.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/worker.pb.h"
namespace tensorflow {
class RecentRequestIds {
public:
explicit RecentRequestIds(int num_tracked_request_ids, int num_shards = 1);
Status TrackUnique(int64_t request_id, const string& method_name,
const protobuf::Message& request);
template <typename RequestWrapper>
Status TrackUnique(int64_t request_id, const string& method_name,
const RequestWrapper* wrapper);
private:
bool Insert(int64_t request_id);
struct IndexBucket {
mutex mu;
int next_index TF_GUARDED_BY(mu) = 0;
std::vector<int64_t> circular_buffer TF_GUARDED_BY(mu);
absl::flat_hash_set<int64_t> set TF_GUARDED_BY(mu);
};
std::vector<IndexBucket> index_buckets_;
};
template <typename RequestWrapper>
Status RecentRequestIds::TrackUnique(int64_t request_id,
const string& method_name,
const RequestWrapper* wrapper) {
if (Insert(request_id)) {
return absl::OkStatus();
} else {
return errors::Aborted("The same ", method_name,
" request was received twice. ",
wrapper->ToProto().ShortDebugString());
}
}
}
#endif
#include "tensorflow/core/distributed_runtime/recent_request_ids.h"
#include <utility>
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
RecentRequestIds::RecentRequestIds(int num_tracked_request_ids, int num_shards)
: index_buckets_(num_shards > 0 ? num_shards : 1) {
DCHECK(num_tracked_request_ids >= num_shards);
const int per_bucket_size = num_tracked_request_ids / index_buckets_.size();
for (auto& bucket : index_buckets_) {
mutex_lock l(bucket.mu);
bucket.circular_buffer.resize(per_bucket_size);
bucket.set.reserve(per_bucket_size);
}
}
bool RecentRequestIds::Insert(int64_t request_id) {
if (request_id == 0) {
return true;
}
const int bucket_index = request_id % index_buckets_.size();
auto& bucket = index_buckets_[bucket_index];
mutex_lock l(bucket.mu);
const bool inserted = bucket.set.insert(request_id).second;
if (!inserted) {
return false;
}
bucket.set.erase(bucket.circular_buffer[bucket.next_index]);
bucket.circular_buffer[bucket.next_index] = request_id;
bucket.next_index = (bucket.next_index + 1) % bucket.circular_buffer.size();
return true;
}
Status RecentRequestIds::TrackUnique(int64_t request_id,
const string& method_name,
const protobuf::Message& request) {
if (Insert(request_id)) {
return absl::OkStatus();
} else {
return errors::Aborted("The same ", method_name,
" request was received twice. ",
request.ShortDebugString());
}
}
} | #include "tensorflow/core/distributed_runtime/recent_request_ids.h"
#include <algorithm>
#include "tensorflow/core/distributed_runtime/request_id.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/worker.pb.h"
namespace tensorflow {
Status TrackUnique(int64_t request_id, RecentRequestIds* recent_request_ids) {
RecvTensorRequest request;
request.set_request_id(request_id);
return recent_request_ids->TrackUnique(request_id, "recent_request_ids_test",
request);
}
TEST(RecentRequestIds, Zero) {
RecentRequestIds recent_request_ids(1);
EXPECT_TRUE(TrackUnique(0, &recent_request_ids).ok());
EXPECT_TRUE(TrackUnique(0, &recent_request_ids).ok());
EXPECT_TRUE(TrackUnique(0, &recent_request_ids).ok());
}
TEST(RecentRequestIds, Unordered) {
RecentRequestIds recent_request_ids(6);
std::vector<int64_t> numbers = {53754, 23351, 164101, 7476,
162432, 130761, 164102};
for (int i = 0; i < 6; ++i) {
TF_EXPECT_OK(TrackUnique(numbers[i], &recent_request_ids));
for (int j = 0; j <= i; ++j) {
EXPECT_FALSE(TrackUnique(numbers[j], &recent_request_ids).ok())
<< "i=" << i << " j=" << j;
}
}
TF_EXPECT_OK(TrackUnique(numbers[6], &recent_request_ids));
for (int i = 1; i < 7; ++i) {
EXPECT_FALSE(TrackUnique(numbers[i], &recent_request_ids).ok())
<< "i=" << i;
}
TF_EXPECT_OK(TrackUnique(numbers[0], &recent_request_ids));
}
void TestOrdered(int num_request_ids, int num_shards) {
RecentRequestIds recent_request_ids(num_request_ids, num_shards);
for (int i = 1; i < 101; ++i) {
TF_EXPECT_OK(TrackUnique(i, &recent_request_ids));
for (int j = std::max(1, i - num_request_ids % num_shards + 1); j <= i;
++j) {
EXPECT_FALSE(TrackUnique(j, &recent_request_ids).ok())
<< "i=" << i << " j=" << j;
}
}
}
TEST(RecentRequestIds, Ordered2Shard1) { TestOrdered(2, 1); }
TEST(RecentRequestIds, Ordered3Shard1) { TestOrdered(3, 1); }
TEST(RecentRequestIds, Ordered4Shard1) { TestOrdered(4, 1); }
TEST(RecentRequestIds, Ordered5Shard1) { TestOrdered(5, 1); }
TEST(RecentRequestIds, Ordered10Shard3) { TestOrdered(10, 3); }
TEST(RecentRequestIds, Ordered11Shard3) { TestOrdered(11, 3); }
TEST(RecentRequestIds, Ordered12Shard4) { TestOrdered(12, 4); }
TEST(RecentRequestIds, Ordered100Shard8) { TestOrdered(100, 8); }
static void BM_TrackUnique(::testing::benchmark::State& state) {
RecentRequestIds recent_request_ids(100000);
RecvTensorRequest request;
for (auto s : state) {
TF_CHECK_OK(recent_request_ids.TrackUnique(GetUniqueRequestId(),
"BM_TrackUnique", request));
}
}
BENCHMARK(BM_TrackUnique);
} |
1,285 | cpp | tensorflow/tensorflow | request_id | tensorflow/core/distributed_runtime/request_id.cc | tensorflow/core/distributed_runtime/request_id_test.cc | #ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_REQUEST_ID_H_
#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_REQUEST_ID_H_
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/random.h"
namespace tensorflow {
int64_t GetUniqueRequestId();
}
#endif
#include "tensorflow/core/distributed_runtime/request_id.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
int64_t GetUniqueRequestId() {
int64_t request_id = 0;
while (request_id == 0) {
request_id = tsl::random::ThreadLocalNew64();
}
return request_id;
}
} | #include "tensorflow/core/distributed_runtime/request_id.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(GetUniqueRequestId, Basic) {
for (int i = 0; i < 1000000; ++i) {
EXPECT_NE(GetUniqueRequestId(), 0);
}
}
} |
1,286 | cpp | tensorflow/tensorflow | tensor_coding | tensorflow/core/platform/tensor_coding.cc | tensorflow/core/distributed_runtime/tensor_coding_test.cc | #ifndef TENSORFLOW_CORE_PLATFORM_TENSOR_CODING_H_
#define TENSORFLOW_CORE_PLATFORM_TENSOR_CODING_H_
#include <string>
#include "tensorflow/core/platform/platform.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace port {
void AssignRefCounted(StringPiece src, core::RefCounted* obj, std::string* out);
inline void CopyToArray(const std::string& src, char* dst) {
memcpy(dst, src.data(), src.size());
}
inline void CopySubrangeToArray(const std::string& src, size_t pos, size_t n,
char* dst) {
if (pos >= src.size()) return;
memcpy(dst, src.data() + pos, std::min(n, src.size() - pos));
}
void EncodeStringList(const tstring* strings, int64_t n, std::string* out);
bool DecodeStringList(const std::string& src, tstring* strings, int64_t n);
void CopyFromArray(std::string* s, const char* base, size_t bytes);
class StringListEncoder {
public:
virtual ~StringListEncoder() = default;
virtual void Append(const protobuf::MessageLite& m) = 0;
virtual void Append(const std::string& s) = 0;
virtual void Finalize() = 0;
};
class StringListDecoder {
public:
virtual ~StringListDecoder() = default;
virtual bool ReadSizes(std::vector<uint32>* sizes) = 0;
virtual const char* Data(uint32 size) = 0;
};
std::unique_ptr<StringListEncoder> NewStringListEncoder(string* out);
std::unique_ptr<StringListDecoder> NewStringListDecoder(const string& in);
#if defined(TENSORFLOW_PROTOBUF_USES_CORD)
void AssignRefCounted(StringPiece src, core::RefCounted* obj, absl::Cord* out);
inline void CopyToArray(const absl::Cord& src, char* dst) {
src.CopyToArray(dst);
}
inline void CopySubrangeToArray(const absl::Cord& src, int64_t pos, int64_t n,
char* dst) {
src.Subcord(pos, n).CopyToArray(dst);
}
void EncodeStringList(const tstring* strings, int64_t n, absl::Cord* out);
bool DecodeStringList(const absl::Cord& src, std::string* strings, int64_t n);
bool DecodeStringList(const absl::Cord& src, tstring* strings, int64_t n);
void CopyFromArray(absl::Cord* c, const char* base, size_t bytes);
std::unique_ptr<StringListEncoder> NewStringListEncoder(absl::Cord* out);
std::unique_ptr<StringListDecoder> NewStringListDecoder(const absl::Cord& in);
#endif
}
}
#endif
#include "tensorflow/core/platform/tensor_coding.h"
#include <vector>
#include "tensorflow/core/platform/coding.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/stringpiece.h"
#if defined(TENSORFLOW_PROTOBUF_USES_CORD)
#include "strings/cord_varint.h"
#endif
namespace tensorflow {
namespace port {
void AssignRefCounted(StringPiece src, core::RefCounted* obj, string* out) {
out->assign(src.data(), src.size());
}
void EncodeStringList(const tstring* strings, int64_t n, string* out) {
out->clear();
for (int i = 0; i < n; ++i) {
core::PutVarint32(out, strings[i].size());
}
for (int i = 0; i < n; ++i) {
out->append(strings[i]);
}
}
bool DecodeStringList(const string& src, tstring* strings, int64_t n) {
std::vector<uint32> sizes(n);
StringPiece reader(src);
int64_t tot = 0;
for (auto& v : sizes) {
if (!core::GetVarint32(&reader, &v)) return false;
tot += v;
}
if (tot != static_cast<int64_t>(reader.size())) {
return false;
}
tstring* data = strings;
for (int64_t i = 0; i < n; ++i, ++data) {
auto size = sizes[i];
if (size > reader.size()) {
return false;
}
data->assign(reader.data(), size);
reader.remove_prefix(size);
}
return true;
}
void CopyFromArray(string* s, const char* base, size_t bytes) {
s->assign(base, bytes);
}
class StringListEncoderImpl : public StringListEncoder {
public:
explicit StringListEncoderImpl(string* out) : out_(out) {}
~StringListEncoderImpl() override = default;
void Append(const protobuf::MessageLite& m) override {
core::PutVarint32(out_, m.ByteSizeLong());
tensorflow::string serialized_message;
m.AppendToString(&serialized_message);
strings::StrAppend(&rest_, serialized_message);
}
void Append(const string& s) override {
core::PutVarint32(out_, s.length());
strings::StrAppend(&rest_, s);
}
void Finalize() override { strings::StrAppend(out_, rest_); }
private:
string* out_;
string rest_;
};
class StringListDecoderImpl : public StringListDecoder {
public:
explicit StringListDecoderImpl(const string& in) : reader_(in) {}
~StringListDecoderImpl() override = default;
bool ReadSizes(std::vector<uint32>* sizes) override {
int64_t total = 0;
for (auto& size : *sizes) {
if (!core::GetVarint32(&reader_, &size)) return false;
total += size;
}
if (total != static_cast<int64_t>(reader_.size())) {
return false;
}
return true;
}
const char* Data(uint32 size) override {
const char* data = reader_.data();
reader_.remove_prefix(size);
return data;
}
private:
StringPiece reader_;
};
std::unique_ptr<StringListEncoder> NewStringListEncoder(string* out) {
return std::unique_ptr<StringListEncoder>(new StringListEncoderImpl(out));
}
std::unique_ptr<StringListDecoder> NewStringListDecoder(const string& in) {
return std::unique_ptr<StringListDecoder>(new StringListDecoderImpl(in));
}
#if defined(TENSORFLOW_PROTOBUF_USES_CORD)
void AssignRefCounted(StringPiece src, core::RefCounted* obj, absl::Cord* out) {
obj->Ref();
*out = absl::MakeCordFromExternal(src, [obj] { obj->Unref(); });
}
void EncodeStringList(const tstring* strings, int64_t n, absl::Cord* out) {
out->Clear();
for (int i = 0; i < n; ++i) {
::strings::CordAppendVarint(strings[i].size(), out);
}
for (int i = 0; i < n; ++i) {
out->Append(strings[i]);
}
}
bool DecodeStringList(const absl::Cord& src, string* strings, int64_t n) {
std::vector<uint32> sizes(n);
CordReader reader(src);
int64_t tot = 0;
for (auto& v : sizes) {
if (!::strings::CordReaderReadVarint(&reader, &v)) return false;
tot += v;
}
if (tot != reader.Available()) {
return false;
}
string* data = strings;
for (int i = 0; i < n; ++i, ++data) {
auto size = sizes[i];
if (size > reader.Available()) {
return false;
}
gtl::STLStringResizeUninitialized(data, size);
reader.ReadN(size, gtl::string_as_array(data));
}
return true;
}
bool DecodeStringList(const absl::Cord& src, tstring* strings, int64_t n) {
std::vector<uint32> sizes(n);
CordReader reader(src);
int64_t tot = 0;
for (auto& v : sizes) {
if (!::strings::CordReaderReadVarint(&reader, &v)) return false;
tot += v;
}
if (tot != reader.Available()) {
return false;
}
tstring* data = strings;
for (int i = 0; i < n; ++i, ++data) {
auto size = sizes[i];
if (size > reader.Available()) {
return false;
}
data->resize_uninitialized(size);
reader.ReadN(size, data->data());
}
return true;
}
void CopyFromArray(absl::Cord* c, const char* base, size_t bytes) {
*c = absl::string_view(base, bytes);
}
class CordStringListEncoderImpl : public StringListEncoder {
public:
explicit CordStringListEncoderImpl(absl::Cord* out) : out_(out) {}
~CordStringListEncoderImpl() override = default;
void Append(const protobuf::MessageLite& m) override {
::strings::CordAppendVarint(m.ByteSizeLong(), out_);
m.AppendToString(&rest_);
}
void Append(const string& s) override {
::strings::CordAppendVarint(s.length(), out_);
rest_.append(s.data(), s.size());
}
void Finalize() override { out_->Append(rest_); }
private:
absl::Cord* out_;
string rest_;
};
class CordStringListDecoderImpl : public StringListDecoder {
public:
explicit CordStringListDecoderImpl(const absl::Cord& in) : reader_(in) {}
~CordStringListDecoderImpl() override = default;
bool ReadSizes(std::vector<uint32>* sizes) override {
int64_t total = 0;
for (auto& size : *sizes) {
if (!::strings::CordReaderReadVarint(&reader_, &size)) return false;
total += size;
}
if (total != static_cast<int64_t>(reader_.Available())) {
return false;
}
return true;
}
const char* Data(uint32 size) override {
tmp_.resize(size);
reader_.ReadN(size, tmp_.data());
return tmp_.data();
}
private:
CordReader reader_;
std::vector<char> tmp_;
};
std::unique_ptr<StringListEncoder> NewStringListEncoder(absl::Cord* out) {
return std::unique_ptr<StringListEncoder>(new CordStringListEncoderImpl(out));
}
std::unique_ptr<StringListDecoder> NewStringListDecoder(const absl::Cord& in) {
return std::unique_ptr<StringListDecoder>(new CordStringListDecoderImpl(in));
}
#endif
}
} | #include "tensorflow/core/distributed_runtime/tensor_coding.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/worker.pb.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
class DummyDevice : public DeviceBase {
public:
explicit DummyDevice(Env* env) : DeviceBase(env) {
attr_.set_device_type("CPU");
}
const DeviceAttributes& attributes() const override { return attr_; }
Allocator* GetAllocator(AllocatorAttributes attr) override {
return cpu_allocator();
}
private:
DeviceAttributes attr_;
};
class StringSource : public TensorResponse::Source {
public:
explicit StringSource(const string* s, int block_size)
: s_(s), stream_(nullptr), block_size_(block_size) {}
~StringSource() override { DeleteStream(); }
protobuf::io::ZeroCopyInputStream* contents() override {
DeleteStream();
stream_ = new (&space_)
protobuf::io::ArrayInputStream(s_->data(), s_->size(), block_size_);
return stream_;
}
void DeleteStream() {
if (stream_) {
stream_->~ArrayInputStream();
}
}
private:
const string* s_;
protobuf::io::ArrayInputStream* stream_;
char space_[sizeof(protobuf::io::ArrayInputStream)];
int block_size_;
};
class TensorResponseTest : public ::testing::Test {
public:
void Validate(const Tensor& src, bool is_dead, bool use_tensor_content) {
RecvTensorResponse proto;
proto.set_is_dead(is_dead);
proto.set_send_start_micros(123456);
if (use_tensor_content) {
src.AsProtoTensorContent(proto.mutable_tensor());
} else {
src.AsProtoField(proto.mutable_tensor());
}
string encoded;
proto.AppendToString(&encoded);
StringSource source(&encoded, 1024);
TensorResponse response;
DummyDevice cpu_device(Env::Default());
response.InitAlloc(&cpu_device, AllocatorAttributes());
for (int i = 0; i < 2; i++) {
Status s = response.ParseFrom(&source);
EXPECT_TRUE(s.ok());
const RecvTensorResponse& meta = response.metadata();
EXPECT_EQ(meta.is_dead(), is_dead);
EXPECT_EQ(meta.send_start_micros(), 123456);
const Tensor& result = response.tensor();
EXPECT_EQ(result.dtype(), src.dtype());
EXPECT_EQ(result.shape().DebugString(), src.shape().DebugString());
EXPECT_EQ(result.DebugString(), src.DebugString());
}
}
template <typename T>
void DoTest(DataType dt) {
gtl::InlinedVector<T, 4> v;
LOG(ERROR) << "DT: " << static_cast<int>(dt);
for (int elems = 0; elems <= 10000; elems++) {
if (elems < 100 || (elems % 1000 == 0)) {
Tensor a(dt, TensorShape({1, static_cast<int64_t>(v.size())}));
test::FillValues<T>(&a, v);
Validate(a, (elems == 0), true);
}
v.push_back(static_cast<T>(elems));
}
}
void DoTestForStrings(DataType dt) {
gtl::InlinedVector<tstring, 4> v;
LOG(ERROR) << "DT: string";
for (int elems = 0; elems <= 10000; elems++) {
if (elems < 100 || (elems % 1000 == 0)) {
Tensor a(dt, TensorShape({1, static_cast<int64_t>(v.size())}));
test::FillValues<tstring>(&a, v);
Validate(a, (elems == 0), true);
}
v.push_back(strings::StrCat("This is string ", elems));
}
}
};
TEST_F(TensorResponseTest, Simple) {
DoTest<float>(DT_FLOAT);
DoTest<double>(DT_DOUBLE);
DoTest<int32>(DT_INT32);
DoTest<uint16>(DT_UINT16);
DoTest<uint8>(DT_UINT8);
DoTest<int16>(DT_INT16);
DoTest<int8>(DT_INT8);
DoTest<complex64>(DT_COMPLEX64);
DoTest<complex128>(DT_COMPLEX128);
DoTest<int64_t>(DT_INT64);
DoTest<bool>(DT_BOOL);
DoTest<qint8>(DT_QINT8);
DoTest<quint8>(DT_QUINT8);
DoTest<qint16>(DT_QINT16);
DoTest<quint16>(DT_QUINT16);
DoTest<qint32>(DT_QINT32);
DoTest<bfloat16>(DT_BFLOAT16);
DoTest<Eigen::half>(DT_HALF);
}
TEST_F(TensorResponseTest, StringTensor) { DoTestForStrings(DT_STRING); }
string MakeFloatTensorTestCase(int num_elems) {
std::vector<int8> v(num_elems);
for (int i = 0; i < num_elems; i++) {
v[i] = i % 10;
}
Tensor src(DT_INT8, TensorShape({1, static_cast<int64_t>(v.size())}));
test::FillValues<int8>(&src, v);
RecvTensorResponse proto;
proto.set_is_dead(false);
proto.set_send_start_micros(123456);
src.AsProtoTensorContent(proto.mutable_tensor());
string encoded;
proto.AppendToString(&encoded);
return encoded;
}
static void BM_TensorResponse(::testing::benchmark::State& state) {
const int arg = state.range(0);
string encoded = MakeFloatTensorTestCase(arg);
DummyDevice cpu_device(Env::Default());
size_t bytes = 0;
for (auto i : state) {
TensorResponse response;
response.InitAlloc(&cpu_device, AllocatorAttributes());
StringSource source(&encoded, -1);
Status s = response.ParseFrom(&source);
bytes = response.tensor().TotalBytes();
}
state.SetLabel(strings::StrCat("Bytes: ", bytes));
}
BENCHMARK(BM_TensorResponse)->Arg(0)->Arg(1000)->Arg(100000);
static void BM_TensorViaTensorProto(::testing::benchmark::State& state) {
const int arg = state.range(0);
std::string encoded = MakeFloatTensorTestCase(arg);
size_t bytes = 0;
for (auto s : state) {
RecvTensorResponse r;
r.ParseFromString(encoded);
Tensor t;
CHECK(t.FromProto(r.tensor()));
bytes = t.TotalBytes();
}
state.SetLabel(strings::StrCat("Bytes: ", bytes));
}
BENCHMARK(BM_TensorViaTensorProto)->Arg(0)->Arg(1000)->Arg(100000);
} |
1,287 | cpp | tensorflow/tensorflow | collective_rma_distributed | tensorflow/core/distributed_runtime/collective_rma_distributed.cc | tensorflow/core/distributed_runtime/collective_rma_distributed_test.cc | #ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_COLLECTIVE_RMA_DISTRIBUTED_H_
#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_COLLECTIVE_RMA_DISTRIBUTED_H_
#include "tensorflow/core/common_runtime/collective_rma_local.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/unbounded_work_queue.h"
namespace tensorflow {
class WorkerCacheInterface;
class CollectiveRemoteAccessDistributed : public CollectiveRemoteAccessLocal {
public:
CollectiveRemoteAccessDistributed(
const DeviceMgr* dev_mgr, DeviceResolverInterface* dev_resolver,
std::shared_ptr<UnboundedWorkQueue> work_queue,
WorkerCacheInterface* worker_cache, int64_t step_id, string task_name)
: CollectiveRemoteAccessLocal(dev_mgr, dev_resolver, step_id),
worker_cache_(worker_cache),
work_queue_(std::move(work_queue)),
task_name_(std::move(task_name)) {}
~CollectiveRemoteAccessDistributed() override {}
void RecvFromPeer(const string& peer_device, const string& peer_task,
bool peer_is_local, const string& key, Device* to_device,
DeviceContext* to_device_ctx,
const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor,
const DeviceLocality& client_locality,
int dev_to_dev_stream_index,
CancellationManager* cancellation_manager,
const StatusCallback& done) override;
void CheckPeerHealth(const string& peer_task, int64_t timeout_in_ms,
const StatusCallback& done) override;
void StartAbort(const Status& s) override;
protected:
WorkerCacheInterface* worker_cache_;
std::shared_ptr<UnboundedWorkQueue> work_queue_;
CancellationManager abortion_cancel_mgr_;
string task_name_;
};
}
#endif
#include "tensorflow/core/distributed_runtime/collective_rma_distributed.h"
#include <memory>
#include "absl/status/status.h"
#include "tensorflow/core/common_runtime/base_collective_executor.h"
#include "tensorflow/core/common_runtime/copy_tensor.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/distributed_runtime/call_options.h"
#include "tensorflow/core/distributed_runtime/cancellable_call.h"
#include "tensorflow/core/distributed_runtime/request_id.h"
#include "tensorflow/core/distributed_runtime/worker_cache.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/protobuf_internal.h"
#include "tensorflow/core/profiler/lib/scoped_memory_debug_annotation.h"
#include "tensorflow/core/protobuf/transport_options.pb.h"
#include "tensorflow/core/protobuf/worker.pb.h"
namespace tensorflow {
namespace {
class RecvBufCall : public CancellableCall {
public:
RecvBufCall(int64_t step_id, const string& peer_device,
const string& peer_task, const string& key, Device* to_device,
DeviceContext* to_device_ctx,
const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor,
const DeviceLocality& client_locality,
const DeviceAttributes& server_attributes,
CancellationManager* cancel_mgr, WorkerCacheInterface* wc)
: CancellableCall(cancel_mgr, peer_task, wc) {
req_.set_step_id(step_id);
req_.set_buf_rendezvous_key(key);
*req_.mutable_client_locality() = client_locality;
*req_.mutable_server_locality() = server_attributes.locality();
req_.set_num_bytes(to_tensor->TotalBytes());
req_.set_buf_ptr(reinterpret_cast<int64_t>(DMAHelper::base(to_tensor)));
req_.set_src_device(peer_device);
req_.set_src_incarnation(server_attributes.incarnation());
req_.set_dst_device(to_device->name());
req_.set_request_id(GetUniqueRequestId());
}
~RecvBufCall() override {}
void IssueCall(const StatusCallback& done) override {
wi_->RecvBufAsync(&opts_, &req_, &resp_, done);
}
RecvBufRequest req_;
RecvBufResponse resp_;
};
void PopulateTensorFromExtra(const RecvBufRespExtra& extra,
Tensor* cpu_tensor) {
char* head = reinterpret_cast<char*>(DMAHelper::base(cpu_tensor));
for (const auto& tensor_content_chunk : extra.tensor_content()) {
memcpy(head, std::string(tensor_content_chunk).data(),
tensor_content_chunk.size());
head += tensor_content_chunk.size();
}
}
Status PopulateTensorFromResponse(const RecvBufResponse& response,
Tensor* cpu_tensor) {
const bool has_transport_options = response.has_transport_options();
if (!has_transport_options) return absl::OkStatus();
const int64_t total_bytes = cpu_tensor->TotalBytes();
int64_t num_bytes = 0;
RecvBufRespExtra extra;
response.transport_options().UnpackTo(&extra);
for (const auto& chunk : extra.tensor_content()) {
num_bytes += chunk.size();
}
if (num_bytes != total_bytes) {
return errors::Internal("Tensor Size Mismatch: RecvBufResponse returned ",
num_bytes,
" bytes, expected: ", cpu_tensor->TotalBytes());
}
PopulateTensorFromExtra(extra, cpu_tensor);
return absl::OkStatus();
}
}
void CollectiveRemoteAccessDistributed::RecvFromPeer(
const string& peer_device, const string& peer_task, bool peer_is_local,
const string& key, Device* to_device, DeviceContext* to_device_ctx,
const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor,
const DeviceLocality& client_locality, int dev_to_dev_stream_index,
CancellationManager* cancellation_manager, const StatusCallback& done) {
if (peer_is_local) {
CollectiveRemoteAccessLocal::RecvFromPeer(
peer_device, peer_task, peer_is_local, key, to_device, to_device_ctx,
to_alloc_attr, to_tensor, client_locality, dev_to_dev_stream_index,
cancellation_manager, done);
return;
}
struct State {
DeviceAttributes server_attributes;
std::unique_ptr<RecvBufCall> call;
std::unique_ptr<Tensor> cpu_tensor;
};
State* state = new State;
DeviceAttributes server_attributes;
Status s = dev_resolver_->GetDeviceAttributes(peer_device,
&state->server_attributes);
if (!s.ok()) {
delete state;
done(s);
return;
}
Tensor* dst_tensor = nullptr;
Device* cpu_dev = nullptr;
if (to_device->tensorflow_accelerator_device_info()) {
Status status = dev_mgr_->LookupDevice("CPU:0", &cpu_dev);
if (!status.ok()) {
delete state;
done(s);
return;
}
AllocatorAttributes cpu_attr;
cpu_attr.set_gpu_compatible(true);
tsl::profiler::ScopedMemoryDebugAnnotation op_annotation(
"CollectiveRemoteAccessDistributed::RecvFromPeer"
"::recv_buf_callback",
step_id_, "dynamic", to_tensor->dtype(),
[to_tensor]() { return to_tensor->shape().DebugString(); });
state->cpu_tensor =
std::make_unique<Tensor>(cpu_dev->GetAllocator(cpu_attr),
to_tensor->dtype(), to_tensor->shape());
dst_tensor = state->cpu_tensor.get();
} else {
dst_tensor = to_tensor;
}
auto recv_buf_callback =
[this, state, to_device, to_alloc_attr, to_device_ctx, to_tensor, cpu_dev,
dev_to_dev_stream_index, dst_tensor, done](const Status& s) {
if (s.ok()) {
Status status =
PopulateTensorFromResponse(state->call->resp_, dst_tensor);
if (!status.ok()) {
done(status);
delete state;
return;
}
if (to_device->tensorflow_accelerator_device_info()) {
AllocatorAttributes cpu_attr;
cpu_attr.set_gpu_compatible(true);
CopyTensor::ViaDMA("",
nullptr , to_device_ctx, cpu_dev,
to_device, cpu_attr, to_alloc_attr, dst_tensor,
to_tensor, dev_to_dev_stream_index,
[this, state, done](const Status& s) {
delete state;
work_queue_->Schedule([s, done] { done(s); });
});
return;
}
}
delete state;
done(s);
};
state->call.reset(new RecvBufCall(
step_id_, peer_device, peer_task, key, to_device, to_device_ctx,
to_alloc_attr, dst_tensor, client_locality, state->server_attributes,
cancellation_manager, worker_cache_));
CancellationToken abortion_token =
abortion_cancel_mgr_.get_cancellation_token();
bool already_aborted = !abortion_cancel_mgr_.RegisterCallback(
abortion_token, [state] { state->call->Cancel(); });
if (already_aborted) {
recv_buf_callback(errors::Cancelled("collective ops already aborted"));
} else {
state->call->Start([this, abortion_token,
done = std::move(recv_buf_callback)](const Status& s) {
abortion_cancel_mgr_.DeregisterCallback(abortion_token);
done(s);
});
}
}
void CollectiveRemoteAccessDistributed::CheckPeerHealth(
const string& peer_task, int64_t timeout_in_ms,
const StatusCallback& done) {
if (peer_task == task_name_) {
done(absl::OkStatus());
return;
}
WorkerInterface* wi = worker_cache_->GetOrCreateWorker(peer_task);
if (wi == nullptr) {
done(errors::InvalidArgument(peer_task,
" not found. It's probably invalid. The "
"valid form is /job:xxx/replica:0/task:N"));
return;
}
auto opts = new CallOptions();
opts->SetTimeout(timeout_in_ms);
auto req = new GetStatusRequest();
auto resp = new GetStatusResponse();
wi->GetStatusAsync(
opts, req, resp, true,
[this, opts, req, resp, wi, peer_task, done](Status s) {
std::vector<DeviceAttributes> cached_attrs;
if (s.ok()) {
s = dev_resolver_->GetAllDeviceAttributes(peer_task, &cached_attrs);
}
if (s.ok()) {
absl::flat_hash_set<uint64> remote_incarnations;
for (const DeviceAttributes& da : resp->device_attributes()) {
remote_incarnations.insert(da.incarnation());
}
for (const DeviceAttributes& attr : cached_attrs) {
if (!remote_incarnations.contains(attr.incarnation())) {
s = errors::FailedPrecondition(
attr.name(), " with incarnation ", attr.incarnation(),
" is not available. This usually means ", peer_task,
" has restarted");
break;
}
}
} else if (absl::IsNotFound(s)) {
s = absl::OkStatus();
}
delete opts;
delete req;
delete resp;
worker_cache_->ReleaseWorker(peer_task, wi);
done(s);
});
}
void CollectiveRemoteAccessDistributed::StartAbort(const Status& s) {
CollectiveRemoteAccessLocal::StartAbort(s);
abortion_cancel_mgr_.StartCancel();
}
} | #include "tensorflow/core/distributed_runtime/collective_rma_distributed.h"
#include "google/protobuf/any.pb.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/distributed_runtime/device_resolver_distributed.h"
#include "tensorflow/core/distributed_runtime/test_utils.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mem.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/transport_options.pb.h"
#include "tensorflow/core/protobuf/worker.pb.h"
namespace tensorflow {
namespace {
class FakeAllocator : public Allocator {
public:
string Name() override { return "fake"; }
void* AllocateRaw(size_t alignment, size_t num_bytes) override {
return port::AlignedMalloc(num_bytes, alignment);
}
void DeallocateRaw(void* ptr) override { return port::AlignedFree(ptr); }
};
static std::unique_ptr<Device> NewDevice(const string& type, const string& name,
Allocator* allocator) {
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& attr, Allocator* allocator)
: Device(nullptr, attr), allocator_(allocator) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes) override { return allocator_; }
private:
Allocator* const allocator_;
};
DeviceAttributes attr;
attr.set_name(name);
attr.set_device_type(type);
attr.mutable_locality()->set_numa_node(3);
attr.set_incarnation(random::New64());
return std::make_unique<FakeDevice>(attr, allocator);
}
static int64_t kStepId = 123;
class FakeWorker : public TestWorkerInterface {
public:
FakeWorker(const string& name, DeviceMgr* dev_mgr,
DeviceResolverDistributed* dres, bool is_failed,
bool set_tensor_in_extra)
: name_(name),
device_mgr_(dev_mgr),
device_resolver_(dres),
buf_rendezvous_(kStepId, dev_mgr),
is_failed_(is_failed),
set_tensor_in_extra_(set_tensor_in_extra) {}
BufRendezvous* buf_rendezvous() { return &buf_rendezvous_; }
void GetStatusAsync(CallOptions* opts, const GetStatusRequest* request,
GetStatusResponse* response, bool fail_fast,
StatusCallback done) override {
if (is_failed_) {
done(errors::Unavailable("peer down"));
return;
}
std::vector<DeviceAttributes> dev_attr;
device_mgr_->ListDeviceAttributes(&dev_attr);
for (const auto& da : dev_attr) {
*response->add_device_attributes() = da;
}
done(absl::OkStatus());
}
void RecvBufAsync(CallOptions* opts, const RecvBufRequest* request,
RecvBufResponse* response, StatusCallback done) override {
if (is_failed_) {
done(errors::Unavailable("peer down"));
return;
}
opts->SetCancelCallback([this]() {
SchedClosure([this]() {
Env::Default()->SleepForMicroseconds(100);
buf_rendezvous_.StartAbort(errors::Internal("Cancelled"));
});
});
VLOG(2) << "ConsumeBuf key=" << request->buf_rendezvous_key()
<< " src_device=" << request->src_device()
<< " src_incarnation=" << request->src_incarnation();
buf_rendezvous_.ConsumeBuf(
request->buf_rendezvous_key(), request->src_device(),
request->src_incarnation(),
[this, opts, request, response, done](const Status& status,
BufRendezvous::Hook* h) {
Status s = status;
if (s.ok()) {
opts->ClearCancelCallback();
int64_t num_bytes = h->prod_value->TotalBytes();
if (set_tensor_in_extra_) {
RecvBufRespExtra extra;
extra.add_tensor_content(string(
reinterpret_cast<const char*>(DMAHelper::base(h->prod_value)),
num_bytes));
response->mutable_transport_options()->PackFrom(extra);
} else {
if (request->num_bytes() != num_bytes) {
s = errors::Internal("Tensor Size Mismatch.");
} else {
memcpy(reinterpret_cast<void*>(request->buf_ptr()),
DMAHelper::base(h->prod_value), num_bytes);
}
}
}
done(s);
if (h) BufRendezvous::DoneWithHook(h);
},
nullptr );
}
private:
string name_;
DeviceMgr* device_mgr_;
DeviceResolverDistributed* device_resolver_;
BufRendezvous buf_rendezvous_;
bool is_failed_;
const bool set_tensor_in_extra_;
};
class FakeCache : public TestWorkerCache {
public:
bool GetDeviceLocalityNonBlocking(const string& device,
DeviceLocality* locality) override {
return false;
}
void GetDeviceLocalityAsync(const string& device, DeviceLocality* locality,
StatusCallback done) override {
string task_name;
string dev_part;
if (!DeviceNameUtils::SplitDeviceName(device, &task_name, &dev_part)) {
done(errors::Internal("failed to parse device name"));
return;
}
auto it = workers_.find(task_name);
if (it == workers_.end()) {
done(errors::Internal("failed to find worker ", task_name));
return;
}
WorkerInterface* wi = it->second;
GetStatusRequest req;
GetStatusResponse resp;
Status status = wi->GetStatus(&req, &resp);
if (!status.ok()) {
done(status);
return;
}
for (const auto& it : resp.device_attributes()) {
if (it.name() == device) {
*locality = it.locality();
done(absl::OkStatus());
return;
}
}
done(errors::Internal("device not found: ", device));
}
};
enum TEST_PARAM_DEVICE_TYPE {
TEST_PARAM_DEVICE_TYPE_CPU = 0,
TEST_PARAM_DEVICE_TYPE_GPU,
};
enum TEST_PARAM_TENSOR_LOC {
TEST_PARAM_TENSOR_LOC_AT_BUF_PTR = 0,
TEST_PARAM_TENSOR_LOC_IN_EXTRA,
};
class CollRMADistTest
: public ::testing::TestWithParam<
std::tuple<TEST_PARAM_DEVICE_TYPE, TEST_PARAM_TENSOR_LOC>> {
protected:
CollRMADistTest()
: work_queue_(
std::make_shared<UnboundedWorkQueue>(Env::Default(), "test")) {}
~CollRMADistTest() override {
for (DeviceMgr* dm : device_mgrs_) {
delete dm;
}
for (auto it : dev_resolvers_) {
delete it.second;
}
for (FakeWorker* w : workers_) {
delete w;
}
}
void SetUp() override {
const int num_workers = 2;
const int num_devices = 1;
string device_type = "CPU";
string dev0_worker_name;
for (int w = 0; w < num_workers; ++w) {
string name = strings::StrCat("/job:worker/replica:0/task:", w);
if (w == 0) {
dev0_worker_name = name;
}
DefineWorker(name, device_type, num_devices);
}
rma_.reset(new CollectiveRemoteAccessDistributed(
device_mgrs_[0], dev_resolvers_[dev0_worker_name], work_queue_, &wc_,
kStepId, "/job:worker/replica:0/task:0"));
const int kNumElts = 8;
expected_value_ = Tensor(DT_FLOAT, {kNumElts});
to_tensor_ = Tensor(DT_FLOAT, {kNumElts});
large_response_ = Tensor(DT_FLOAT, {2 * kNumElts});
auto exp_alias = expected_value_.flat<float>();
auto to_alias = to_tensor_.flat<float>();
auto large_response_alias = large_response_.flat<float>();
for (int i = 0; i < kNumElts; ++i) {
exp_alias(i) = i;
to_alias(i) = -1;
}
for (int i = 0; i < 2 * kNumElts; ++i) {
large_response_alias(i) = -2;
}
}
void ResolveDeviceAttributes() {
for (auto& dev_resolver_item : dev_resolvers_) {
DeviceResolverDistributed* dev_resolver = dev_resolver_item.second;
for (const auto& item : dev_by_task_) {
TF_CHECK_OK(dev_resolver->UpdateDeviceAttributes(item.second));
}
}
}
void DefineWorker(const string& worker_name, const string& device_type,
int num_devices, bool is_failed = false) {
std::vector<std::unique_ptr<Device>> devices;
for (int i = 0; i < num_devices; ++i) {
devices.push_back(NewDevice(
device_type,
strings::StrCat(worker_name, "/device:", device_type, ":", i),
&fake_allocator_));
}
DeviceMgr* dev_mgr = new StaticDeviceMgr(std::move(devices));
device_mgrs_.push_back(dev_mgr);
std::vector<DeviceAttributes>* dv = &dev_by_task_[worker_name];
dv->clear();
for (auto d : dev_mgr->ListDevices()) {
dv->push_back(d->attributes());
}
DeviceResolverDistributed* dev_res = new DeviceResolverDistributed(dev_mgr);
dev_resolvers_[worker_name] = dev_res;
FakeWorker* fw =
new FakeWorker(worker_name, dev_mgr, dev_res, is_failed,
std::get<TEST_PARAM_TENSOR_LOC>(GetParam()) ==
TEST_PARAM_TENSOR_LOC_IN_EXTRA);
workers_.push_back(fw);
wc_.AddWorker(worker_name, fw);
}
void RestartWorker(const string& worker_name, const string& device_type,
int num_devices, bool is_failed = false) {
auto it = dev_resolvers_.find(worker_name);
if (it != dev_resolvers_.end()) {
delete it->second;
dev_resolvers_.erase(it);
}
DefineWorker(worker_name, device_type, num_devices, is_failed);
}
void ValidateResultTensor() {
ASSERT_EQ(expected_value_.NumElements(), to_tensor_.NumElements());
for (int i = 0; i < to_tensor_.NumElements(); ++i) {
EXPECT_FLOAT_EQ(expected_value_.flat<float>()(i),
to_tensor_.flat<float>()(i));
}
}
void ValidateResultTensorUnchanged() {
for (int i = 0; i < to_tensor_.NumElements(); ++i) {
EXPECT_FLOAT_EQ(-1, to_tensor_.flat<float>()(i));
}
}
void MaybeSetGPUDevice(Device* dst_device) {
if (std::get<TEST_PARAM_DEVICE_TYPE>(GetParam()) ==
TEST_PARAM_DEVICE_TYPE_GPU) {
dst_device->set_tensorflow_accelerator_device_info(
&accelerator_device_info_);
}
}
FakeCache wc_;
CancellationManager cm_;
std::vector<DeviceMgr*> device_mgrs_;
std::unordered_map<string, DeviceResolverDistributed*> dev_resolvers_;
std::unordered_map<string, std::vector<DeviceAttributes>> dev_by_task_;
std::shared_ptr<UnboundedWorkQueue> work_queue_;
std::vector<FakeWorker*> workers_;
std::unique_ptr<CollectiveRemoteAccessDistributed> rma_;
mutex mu_;
int num_done_ TF_GUARDED_BY(mu_);
condition_variable done_;
CallOptions opts_;
DeviceLocality device_locality_;
AllocatorAttributes alloc_attr_;
FakeAllocator fake_allocator_;
DeviceBase::AcceleratorDeviceInfo accelerator_device_info_;
Tensor expected_value_;
Tensor large_response_;
Tensor to_tensor_;
};
TEST_P(CollRMADistTest, ProdFirstOK) {
ResolveDeviceAttributes();
Notification consumer_note;
Notification producer_note;
Status consumer_status;
Status producer_status;
FakeWorker* wi = workers_[1];
const string kBufKey = "fake_buf_key";
wi->buf_rendezvous()->ProvideBuf(
kBufKey, nullptr , nullptr , &expected_value_,
AllocatorAttributes(),
[&producer_note, &producer_status](const Status& s) {
producer_status.Update(s);
producer_note.Notify();
},
nullptr );
Device* dst_device = nullptr;
string dev_name = "CPU:0";
TF_EXPECT_OK(device_mgrs_[0]->LookupDevice(dev_name, &dst_device));
DeviceContext* to_device_ctx = nullptr;
MaybeSetGPUDevice(dst_device);
rma_->RecvFromPeer(
"/job:worker/replica:0/task:1/device:" + dev_name,
"/job:worker/replica:0/task:1",
false,
kBufKey, dst_device, to_device_ctx, alloc_attr_, &to_tensor_,
device_locality_, 0 ,
nullptr ,
[&consumer_status, &consumer_note](const Status& s) {
consumer_status = s;
consumer_note.Notify();
});
consumer_note.WaitForNotification();
TF_EXPECT_OK(consumer_status);
producer_note.WaitForNotification();
TF_EXPECT_OK(producer_status);
ValidateResultTensor();
}
TEST_P(CollRMADistTest, ConsFirstOK) {
ResolveDeviceAttributes();
Notification consumer_note;
Notification producer_note;
Status consumer_status;
Status producer_status;
FakeWorker* wi = workers_[1];
const string kBufKey = "fake_buf_key";
Device* dst_device = nullptr;
string dev_name = "CPU:0";
TF_EXPECT_OK(device_mgrs_[0]->LookupDevice(dev_name, &dst_device));
MaybeSetGPUDevice(dst_device);
DeviceContext* to_device_ctx = nullptr;
rma_->RecvFromPeer(
"/job:worker/replica:0/task:1/device:" + dev_name,
"/job:worker/replica:0/task:1",
false,
kBufKey, dst_device, to_device_ctx, alloc_attr_, &to_tensor_,
device_locality_, 0 ,
nullptr ,
[&consumer_status, &consumer_note](const Status& s) {
consumer_status = s;
consumer_note.Notify();
});
wi->buf_rendezvous()->ProvideBuf(
kBufKey, nullptr , nullptr , &expected_value_,
AllocatorAttributes(),
[&producer_note, &producer_status](const Status& s) {
producer_status.Update(s);
producer_note.Notify();
},
nullptr );
consumer_note.WaitForNotification();
TF_EXPECT_OK(consumer_status);
producer_note.WaitForNotification();
TF_EXPECT_OK(producer_status);
ValidateResultTensor();
}
TEST_P(CollRMADistTest, ConsFirstAbort) {
ResolveDeviceAttributes();
Notification consumer_note;
Status consumer_status;
const string kBufKey = "fake_buf_key";
Device* dst_device = nullptr;
string dev_name = "CPU:0";
TF_EXPECT_OK(device_mgrs_[0]->LookupDevice(dev_name, &dst_device));
MaybeSetGPUDevice(dst_device);
DeviceContext* to_device_ctx = nullptr;
rma_->RecvFromPeer(
"/job:worker/replica:0/task:1/device:" + dev_name,
"/job:worker/replica:0/task:1",
false,
kBufKey, dst_device, to_device_ctx, alloc_attr_, &to_tensor_,
device_locality_, 0 ,
nullptr ,
[&consumer_status, &consumer_note](const Status& s) {
consumer_status = s;
consumer_note.Notify();
});
rma_->StartAbort(errors::Internal("Deliberate Failure"));
consumer_note.WaitForNotification();
EXPECT_EQ(consumer_status.message(), "Cancelled");
}
TEST_P(CollRMADistTest, ResponseTooLarge) {
ResolveDeviceAttributes();
Notification consumer_note;
Notification producer_note;
Status consumer_status;
Status producer_status;
FakeWorker* wi = workers_[1];
const string kBufKey = "fake_buf_key";
wi->buf_rendezvous()->ProvideBuf(
kBufKey, nullptr , nullptr , &large_response_,
AllocatorAttributes(),
[&producer_note, &producer_status](const Status& s) {
producer_status.Update(s);
producer_note.Notify();
},
nullptr );
Device* dst_device = nullptr;
string dev_name = "CPU:0";
TF_EXPECT_OK(device_mgrs_[0]->LookupDevice(dev_name, &dst_device));
DeviceContext* to_device_ctx = nullptr;
MaybeSetGPUDevice(dst_device);
rma_->RecvFromPeer(
"/job:worker/replica:0/task:1/device:" + dev_name,
"/job:worker/replica:0/task:1",
false,
kBufKey, dst_device, to_device_ctx, alloc_attr_, &to_tensor_,
device_locality_, 0 ,
nullptr ,
[&consumer_status, &consumer_note](const Status& s) {
consumer_status = s;
consumer_note.Notify();
});
consumer_note.WaitForNotification();
EXPECT_THAT(consumer_status.message(),
::testing::HasSubstr("Tensor Size Mismatch"));
producer_note.WaitForNotification();
TF_EXPECT_OK(producer_status);
ValidateResultTensorUnchanged();
}
TEST_P(CollRMADistTest, WorkerRestart) {
ResolveDeviceAttributes();
Notification consumer_note;
Notification producer_note;
Status consumer_status;
Status producer_status;
FakeWorker* wi = workers_[1];
const string buf_key = "fake_buf_key";
Device* dst_device = nullptr;
string dev_name = "CPU:0";
TF_EXPECT_OK(device_mgrs_[0]->LookupDevice(dev_name, &dst_device));
MaybeSetGPUDevice(dst_device);
DeviceContext* to_device_ctx = nullptr;
rma_->RecvFromPeer(
"/job:worker/replica:0/task:1/device:" + dev_name,
"/job:worker/replica:0/task:1",
false,
buf_key, dst_device, to_device_ctx, alloc_attr_, &to_tensor_,
device_locality_, 0 ,
nullptr ,
[&consumer_status, &consumer_note](const Status& s) {
consumer_status = s;
consumer_note.Notify();
});
wi->buf_rendezvous()->ProvideBuf(
buf_key, nullptr , nullptr , &expected_value_,
AllocatorAttributes(),
[&producer_note, &producer_status](const Status& s) {
producer_status.Update(s);
producer_note.Notify();
},
nullptr );
consumer_note.WaitForNotification();
TF_EXPECT_OK(consumer_status);
producer_note.WaitForNotification();
TF_EXPECT_OK(producer_status);
ValidateResultTensor();
RestartWorker("/job:worker/replica:0/task:1", "CPU", 1);
Notification post_restart_note;
rma_->RecvFromPeer(
"/job:worker/replica:0/task:1/device:" + dev_name,
"/job:worker/replica:0/task:1",
false,
buf_key, dst_device, to_device_ctx, alloc_attr_, &to_tensor_,
device_locality_, 0 ,
nullptr ,
[&consumer_status, &post_restart_note](const Status& s) {
consumer_status = s;
post_restart_note.Notify();
});
post_restart_note.WaitForNotification();
EXPECT_TRUE(errors::IsFailedPrecondition(consumer_status));
}
TEST_P(CollRMADistTest, CheckHealthOKWithCachedAttr) {
ResolveDeviceAttributes();
Status check_health_status;
Notification check_health_done;
rma_->CheckPeerHealth(
"/job:worker/replica:0/task:1", 0,
[&check_health_status, &check_health_done](const Status s) {
check_health_status = s;
check_health_done.Notify();
});
check_health_done.WaitForNotification();
TF_EXPECT_OK(check_health_status);
}
TEST_P(CollRMADistTest, CheckHealthOKWithoutCachedAttr) {
Status check_health_status;
Notification check_health_done;
rma_->CheckPeerHealth(
"/job:worker/replica:0/task:1", 0,
[&check_health_status, &check_health_done](const Status s) {
check_health_status = s;
check_health_done.Notify();
});
check_health_done.WaitForNotification();
EXPECT_TRUE(check_health_status.ok());
}
TEST_P(CollRMADistTest, CheckHealthRestarted) {
ResolveDeviceAttributes();
RestartWorker("/job:worker/replica:0/task:1", "CPU", 1);
Status check_health_status;
Notification check_health_done;
rma_->CheckPeerHealth(
"/job:worker/replica:0/task:1", 0,
[&check_health_status, &check_health_done](const Status s) {
check_health_status = s;
check_health_done.Notify();
});
check_health_done.WaitForNotification();
EXPECT_TRUE(errors::IsFailedPrecondition(check_health_status));
}
TEST_P(CollRMADistTest, CheckHealthFailedPeer) {
ResolveDeviceAttributes();
RestartWorker("/job:worker/replica:0/task:1", "CPU", 1,
true);
Status check_health_status;
Notification check_health_done;
rma_->CheckPeerHealth(
"/job:worker/replica:0/task:1", 0,
[&check_health_status, &check_health_done](const Status s) {
check_health_status = s;
check_health_done.Notify();
});
check_health_done.WaitForNotification();
EXPECT_TRUE(errors::IsUnavailable(check_health_status));
}
TEST_P(CollRMADistTest, CheckHealthRestartedWithDifferentDevices) {
ResolveDeviceAttributes();
RestartWorker("/job:worker/replica:0/task:1", "GPU", 1);
Status check_health_status;
Notification check_health_done;
rma_->CheckPeerHealth(
"/job:worker/replica:0/task:1", 0,
[&check_health_status, &check_health_done](const Status s) {
check_health_status = s;
check_health_done.Notify();
});
check_health_done.WaitForNotification();
EXPECT_TRUE(errors::IsFailedPrecondition(check_health_status));
}
INSTANTIATE_TEST_SUITE_P(
TensorInBufPtrOrExtra, CollRMADistTest,
::testing::Combine(::testing::Values(TEST_PARAM_TENSOR_LOC_AT_BUF_PTR,
TEST_PARAM_TENSOR_LOC_IN_EXTRA),
::testing::Values(TEST_PARAM_DEVICE_TYPE_CPU,
TEST_PARAM_DEVICE_TYPE_GPU)));
}
} |
1,288 | cpp | tensorflow/tensorflow | server_lib | tensorflow/core/data/service/server_lib.cc | tensorflow/core/distributed_runtime/server_lib_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_SERVER_LIB_H_
#define TENSORFLOW_CORE_DATA_SERVICE_SERVER_LIB_H_
#include <memory>
#include <string>
#include <vector>
#include "grpcpp/server.h"
#include "grpcpp/server_builder.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/data_transfer.h"
#include "tensorflow/core/data/service/export.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/profiler/rpc/profiler_service_impl.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
namespace tensorflow {
namespace data {
class GrpcDispatcherImpl;
class GrpcWorkerImpl;
class GrpcDataServerBase {
public:
GrpcDataServerBase(
int requested_port, const std::string& protocol,
const std::string& server_type,
std::vector<std::unique_ptr<::grpc::ServerBuilderOption>> options = {});
virtual ~GrpcDataServerBase() = default;
Status Start();
void Stop();
void Join();
int BoundPort();
virtual ServerStateExport ExportState() const = 0;
protected:
virtual void AddDataServiceToBuilder(::grpc::ServerBuilder& builder) = 0;
void AddProfilerServiceToBuilder(::grpc::ServerBuilder& builder);
virtual Status StartServiceInternal() = 0;
virtual void StopServiceInternal() {}
int bound_port() { return bound_port_; }
const int requested_port_;
const std::string protocol_;
const std::string server_type_;
private:
int bound_port_;
bool started_ = false;
bool stopped_ = false;
std::unique_ptr<::grpc::Server> server_;
std::unique_ptr<grpc::ProfilerService::Service> profiler_service_ = nullptr;
std::vector<std::unique_ptr<::grpc::ServerBuilderOption>> server_options_;
};
struct SnapshotStreamInfoWrapper {
SnapshotStreamInfoWrapper() = default;
explicit SnapshotStreamInfoWrapper(const SnapshotStreamInfo& info)
: index(info.index()), state(info.state()) {}
int64_t index;
int64_t state;
};
class DispatchGrpcDataServer : public GrpcDataServerBase {
public:
explicit DispatchGrpcDataServer(
const experimental::DispatcherConfig& config,
std::vector<std::unique_ptr<::grpc::ServerBuilderOption>> options = {});
~DispatchGrpcDataServer() override;
Status NumWorkers(int* num_workers);
size_t NumActiveIterations();
Status SnapshotStreams(const std::string& path,
std::vector<SnapshotStreamInfoWrapper>* streams);
ServerStateExport ExportState() const override;
protected:
void AddDataServiceToBuilder(::grpc::ServerBuilder& builder) override;
Status StartServiceInternal() override;
void StopServiceInternal() override;
private:
const experimental::DispatcherConfig config_;
GrpcDispatcherImpl* service_;
};
struct SnapshotTaskProgressWrapper {
SnapshotTaskProgressWrapper() = default;
explicit SnapshotTaskProgressWrapper(const SnapshotTaskProgress& progress)
: snapshot_task_base_path(progress.snapshot_task().base_path()),
snapshot_task_stream_index(progress.snapshot_task().stream_index()),
completed(progress.completed()) {}
std::string snapshot_task_base_path;
int64_t snapshot_task_stream_index;
bool completed;
};
class WorkerGrpcDataServer : public GrpcDataServerBase {
public:
explicit WorkerGrpcDataServer(
const experimental::WorkerConfig& config,
std::vector<std::unique_ptr<::grpc::ServerBuilderOption>> options = {});
~WorkerGrpcDataServer() override;
Status NumTasks(int* num_tasks);
Status SnapshotTaskProgresses(
std::vector<SnapshotTaskProgressWrapper>* snapshot_task_progresses);
ServerStateExport ExportState() const override;
protected:
void AddDataServiceToBuilder(::grpc::ServerBuilder& builder) override;
Status StartServiceInternal() override;
void StopServiceInternal() override;
private:
void MaybeStartAlternativeDataTransferServer(
std::vector<DataTransferServerInfo>& transfer_servers);
const experimental::WorkerConfig config_;
GrpcWorkerImpl* service_;
std::shared_ptr<DataTransferServer> transfer_server_;
};
Status NewDispatchServer(const experimental::DispatcherConfig& config,
std::unique_ptr<DispatchGrpcDataServer>& out_server);
Status NewWorkerServer(const experimental::WorkerConfig& config,
std::unique_ptr<WorkerGrpcDataServer>& out_server);
}
}
#endif
#include "tensorflow/core/data/service/server_lib.h"
#include <iterator>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "grpcpp/server.h"
#include "grpcpp/server_builder.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/credentials_factory.h"
#include "tensorflow/core/data/service/export.pb.h"
#include "tensorflow/core/data/service/grpc_dispatcher_impl.h"
#include "tensorflow/core/data/service/grpc_util.h"
#include "tensorflow/core/data/service/grpc_worker_impl.h"
#include "tensorflow/core/data/service/worker_client.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/str_util.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kPortPlaceholder[] = "%port%";
constexpr char kDataTransferPortPlaceholder[] = "%dts_port%";
}
GrpcDataServerBase::GrpcDataServerBase(
int port, const std::string& protocol, const std::string& server_type,
std::vector<std::unique_ptr<::grpc::ServerBuilderOption>> options)
: requested_port_(port),
protocol_(protocol),
server_type_(server_type),
bound_port_(port),
server_options_(std::move(options)) {}
Status GrpcDataServerBase::Start() {
if (stopped_) {
return errors::FailedPrecondition(
"Server cannot be started after it has been stopped.");
}
if (started_) {
return absl::OkStatus();
}
::grpc::ServerBuilder builder;
for (std::unique_ptr<::grpc::ServerBuilderOption>& option : server_options_) {
builder.SetOption(std::move(option));
}
server_options_.clear();
std::shared_ptr<::grpc::ServerCredentials> credentials;
TF_RETURN_IF_ERROR(
CredentialsFactory::CreateServerCredentials(protocol_, &credentials));
builder.AddListeningPort(strings::StrCat("0.0.0.0:", requested_port_),
credentials, &bound_port_);
builder.SetMaxReceiveMessageSize(-1);
AddDataServiceToBuilder(builder);
AddProfilerServiceToBuilder(builder);
server_ = builder.BuildAndStart();
if (!server_) {
return errors::Internal("Could not start gRPC server");
}
TF_RETURN_IF_ERROR(StartServiceInternal());
started_ = true;
LOG(INFO) << "Started tf.data " << server_type_
<< " running at 0.0.0.0:" << BoundPort();
return absl::OkStatus();
}
void GrpcDataServerBase::Stop() {
if (stopped_) {
return;
}
if (server_) {
StopServiceInternal();
server_->Shutdown();
LOG(INFO) << "Shut down " << server_type_ << " server running at port "
<< BoundPort();
}
stopped_ = true;
}
void GrpcDataServerBase::Join() { server_->Wait(); }
int GrpcDataServerBase::BoundPort() { return bound_port(); }
void GrpcDataServerBase::AddProfilerServiceToBuilder(
::grpc::ServerBuilder& builder) {
profiler_service_ = profiler::CreateProfilerService();
builder.RegisterService(profiler_service_.get());
}
DispatchGrpcDataServer::DispatchGrpcDataServer(
const experimental::DispatcherConfig& config,
std::vector<std::unique_ptr<::grpc::ServerBuilderOption>> options)
: GrpcDataServerBase(config.port(), config.protocol(), "DispatchServer",
std::move(options)),
config_(config) {}
DispatchGrpcDataServer::~DispatchGrpcDataServer() { delete service_; }
void DispatchGrpcDataServer::AddDataServiceToBuilder(
::grpc::ServerBuilder& builder) {
service_ = std::make_unique<GrpcDispatcherImpl>(config_, builder).release();
}
Status DispatchGrpcDataServer::StartServiceInternal() {
return service_->Start();
}
void DispatchGrpcDataServer::StopServiceInternal() { service_->Stop(); }
Status DispatchGrpcDataServer::NumWorkers(int* num_workers) {
GetWorkersRequest req;
GetWorkersResponse resp;
::grpc::ServerContext ctx;
::grpc::Status s = service_->GetWorkers(&ctx, &req, &resp);
if (!s.ok()) {
return grpc_util::WrapError("Failed to get workers", s);
}
*num_workers = resp.workers_size();
return absl::OkStatus();
}
Status DispatchGrpcDataServer::SnapshotStreams(
const std::string& path, std::vector<SnapshotStreamInfoWrapper>* streams) {
GetSnapshotStreamsRequest req;
req.set_path(path);
GetSnapshotStreamsResponse resp;
::grpc::ServerContext ctx;
::grpc::Status s = service_->GetSnapshotStreams(&ctx, &req, &resp);
if (!s.ok()) {
return grpc_util::WrapError("Failed to get snapshot streams", s);
}
for (const auto& stream : resp.streams()) {
streams->push_back(SnapshotStreamInfoWrapper(stream));
}
return absl::OkStatus();
}
size_t DispatchGrpcDataServer::NumActiveIterations() {
return service_->NumActiveIterations();
}
ServerStateExport DispatchGrpcDataServer::ExportState() const {
ServerStateExport server_state_export;
*server_state_export.mutable_dispatcher_state_export() =
service_->ExportState();
return server_state_export;
}
WorkerGrpcDataServer::WorkerGrpcDataServer(
const experimental::WorkerConfig& config,
std::vector<std::unique_ptr<::grpc::ServerBuilderOption>> options)
: GrpcDataServerBase(config.port(), config.protocol(), "WorkerServer",
std::move(options)),
config_(config) {}
WorkerGrpcDataServer::~WorkerGrpcDataServer() { delete service_; }
void WorkerGrpcDataServer::AddDataServiceToBuilder(
::grpc::ServerBuilder& builder) {
service_ = std::make_unique<GrpcWorkerImpl>(config_, builder).release();
}
void WorkerGrpcDataServer::MaybeStartAlternativeDataTransferServer(
std::vector<DataTransferServerInfo>& transfer_servers) {
if (config_.data_transfer_protocol().empty() ||
config_.data_transfer_protocol() == kGrpcTransferProtocol) {
return;
}
Status s = DataTransferServer::Build(config_.data_transfer_protocol(),
service_->get_element_getter(),
&transfer_server_);
if (!s.ok()) {
LOG(ERROR) << "failed to build " << config_.data_transfer_protocol()
<< " server for worker " << config_.worker_address() << ": "
<< s;
return;
}
s = transfer_server_->Start(config_);
if (!s.ok()) {
LOG(ERROR) << "failed to start " << config_.data_transfer_protocol()
<< " server for worker " << config_.worker_address() << ": "
<< s;
return;
}
LOG(INFO) << "Data transfer server started at 0.0.0.0:"
<< transfer_server_->Port() << " for protocol "
<< config_.data_transfer_protocol() << " for worker "
<< config_.worker_address();
DataTransferServerInfo alternative_transfer_server;
alternative_transfer_server.set_protocol(config_.data_transfer_protocol());
alternative_transfer_server.set_address(str_util::StringReplace(
config_.data_transfer_address(), kDataTransferPortPlaceholder,
absl::StrCat(transfer_server_->Port()),
false));
absl::StatusOr<std::string> compatibility_info =
transfer_server_->GetCompatibilityInfo();
if (!compatibility_info.ok()) {
LOG(ERROR)
<< "failed to populate compatibility information for worker server "
<< config_.worker_address() << " for protocol "
<< config_.data_transfer_protocol() << ": "
<< compatibility_info.status();
return;
}
alternative_transfer_server.set_compatibility_info(*compatibility_info);
transfer_servers.push_back(alternative_transfer_server);
}
Status WorkerGrpcDataServer::StartServiceInternal() {
std::string base_address = config_.worker_address();
if (base_address.empty()) {
base_address = absl::StrCat("localhost:", kPortPlaceholder);
}
std::string worker_address = str_util::StringReplace(
base_address, kPortPlaceholder, absl::StrCat(bound_port()),
false);
DataTransferServerInfo grpc_transfer_server;
grpc_transfer_server.set_protocol(kGrpcTransferProtocol);
grpc_transfer_server.set_address(worker_address);
std::vector<DataTransferServerInfo> transfer_servers = {grpc_transfer_server};
MaybeStartAlternativeDataTransferServer(transfer_servers);
TF_RETURN_IF_ERROR(service_->Start(worker_address, transfer_servers));
return absl::OkStatus();
}
void WorkerGrpcDataServer::StopServiceInternal() { service_->Stop(); }
Status WorkerGrpcDataServer::NumTasks(int* num_tasks) {
GetWorkerTasksRequest req;
GetWorkerTasksResponse resp;
::grpc::ServerContext ctx;
::grpc::Status s = service_->GetWorkerTasks(&ctx, &req, &resp);
if (!s.ok()) {
return grpc_util::WrapError("Failed to get tasks", s);
}
*num_tasks = resp.tasks_size();
return absl::OkStatus();
}
Status WorkerGrpcDataServer::SnapshotTaskProgresses(
std::vector<SnapshotTaskProgressWrapper>* snapshot_task_progresses) {
GetSnapshotTaskProgressesRequest req;
GetSnapshotTaskProgressesResponse resp;
::grpc::ServerContext ctx;
::grpc::Status s = service_->GetSnapshotTaskProgresses(&ctx, &req, &resp);
if (!s.ok()) {
return grpc_util::WrapError("Failed to get tasks", s);
}
for (const auto& progress : resp.snapshot_task_progresses()) {
snapshot_task_progresses->push_back(SnapshotTaskProgressWrapper(progress));
}
return absl::OkStatus();
}
ServerStateExport WorkerGrpcDataServer::ExportState() const {
ServerStateExport server_state_export;
*server_state_export.mutable_worker_state_export() = service_->ExportState();
return server_state_export;
}
Status NewDispatchServer(const experimental::DispatcherConfig& config,
std::unique_ptr<DispatchGrpcDataServer>& out_server) {
out_server = std::make_unique<DispatchGrpcDataServer>(config);
return absl::OkStatus();
}
Status NewWorkerServer(const experimental::WorkerConfig& config,
std::unique_ptr<WorkerGrpcDataServer>& out_server) {
out_server = std::make_unique<WorkerGrpcDataServer>(config);
return absl::OkStatus();
}
}
} | #include "tensorflow/core/distributed_runtime/server_lib.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
class TestServerFactory : public ServerFactory {
public:
bool AcceptsOptions(const ServerDef& server_def) override {
return server_def.protocol() == "test_protocol";
}
Status NewServer(const ServerDef& server_def, const Options& options,
std::unique_ptr<ServerInterface>* out_server) override {
return absl::OkStatus();
}
};
TEST(ServerLibTest, NewServerFactoryAccepts) {
ServerFactory::Register("TEST_SERVER", new TestServerFactory());
ServerDef server_def;
server_def.set_protocol("test_protocol");
std::unique_ptr<ServerInterface> server;
TF_EXPECT_OK(NewServer(server_def, &server));
}
TEST(ServerLibTest, NewServerNoFactoriesAccept) {
ServerDef server_def;
server_def.set_protocol("fake_protocol");
std::unique_ptr<ServerInterface> server;
Status s = NewServer(server_def, &server);
ASSERT_NE(s, absl::OkStatus());
EXPECT_TRUE(absl::StrContains(
s.message(), "No server factory registered for the given ServerDef"));
EXPECT_TRUE(
absl::StrContains(s.message(), "The available server factories are: ["));
}
} |
1,289 | cpp | tensorflow/tensorflow | cluster_function_library_runtime | tensorflow/core/distributed_runtime/eager/cluster_function_library_runtime.cc | tensorflow/core/distributed_runtime/cluster_function_library_runtime_test.cc | #ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_EAGER_CLUSTER_FUNCTION_LIBRARY_RUNTIME_H_
#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_EAGER_CLUSTER_FUNCTION_LIBRARY_RUNTIME_H_
#include <memory>
#include <utility>
#include <vector>
#include "absl/types/optional.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/common_runtime/eager/eager_operation.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/protobuf/remote_tensor_handle.pb.h"
namespace tensorflow {
class WorkerSession;
namespace eager {
class EagerClusterFunctionLibraryRuntime
: public DistributedFunctionLibraryRuntime {
public:
EagerClusterFunctionLibraryRuntime(const uint64 context_id, EagerContext* ctx,
DeviceMgr* remote_device_mgr)
: context_id_(context_id),
ctx_(ctx),
remote_device_mgr_(remote_device_mgr) {}
~EagerClusterFunctionLibraryRuntime() override{};
void Instantiate(const string& function_name,
const FunctionLibraryDefinition& lib_def, AttrSlice attrs,
const FunctionLibraryRuntime::InstantiateOptions& options,
FunctionLibraryRuntime::LocalHandle* handle,
FunctionLibraryRuntime::DoneCallback done) override;
void Run(const FunctionLibraryRuntime::Options& opts,
FunctionLibraryRuntime::LocalHandle handle,
absl::Span<const Tensor> args, std::vector<Tensor>* rets,
FunctionLibraryRuntime::DoneCallback done) override;
void Run(const FunctionLibraryRuntime::Options& opts,
FunctionLibraryRuntime::LocalHandle handle,
absl::Span<const FunctionArg> args, std::vector<FunctionRet>* rets,
FunctionLibraryRuntime::DoneCallback done) override;
void CleanUp(uint64 step_id, FunctionLibraryRuntime::LocalHandle handle,
FunctionLibraryRuntime::DoneCallback done) override;
DeviceMgr* remote_device_mgr() const override { return remote_device_mgr_; }
private:
const uint64 context_id_;
EagerContext* ctx_;
DeviceMgr* remote_device_mgr_;
struct FunctionData {
const string target;
const absl::optional<std::vector<int>> ret_indices;
core::RefCountPtr<EagerClient> eager_client;
std::unique_ptr<EagerOperation> op;
FunctionData(const string& target,
const absl::optional<std::vector<int>>& ret_indices,
EagerClient* eager_client, std::unique_ptr<EagerOperation> op)
: target(target),
ret_indices(ret_indices),
eager_client(core::RefCountPtr<EagerClient>(eager_client)),
op(std::move(op)) {
eager_client->Ref();
}
};
mutable mutex mu_;
std::vector<FunctionData> function_data_ TF_GUARDED_BY(mu_);
};
DistributedFunctionLibraryRuntime* CreateClusterFLR(
const uint64 context_id, EagerContext* ctx, WorkerSession* worker_session);
}
}
#endif
#include "tensorflow/core/distributed_runtime/eager/cluster_function_library_runtime.h"
#include <map>
#include <memory>
#include <utility>
#include <variant>
#include <vector>
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/common_runtime/eager/eager_operation.h"
#include "tensorflow/core/distributed_runtime/call_options.h"
#include "tensorflow/core/distributed_runtime/eager/eager_client.h"
#include "tensorflow/core/distributed_runtime/worker_session.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_def_util.h"
namespace tensorflow {
namespace eager {
namespace {
void StripDefaultAttributesInRegisterFunctionOp(
RegisterFunctionOp* register_function) {
StripDefaultAttributes(
*OpRegistry::Global(),
register_function->mutable_function_def()->mutable_node_def());
for (auto& function :
*register_function->mutable_library()->mutable_function()) {
StripDefaultAttributes(*OpRegistry::Global(), function.mutable_node_def());
}
}
}
void EagerClusterFunctionLibraryRuntime::Instantiate(
const string& function_name, const FunctionLibraryDefinition& lib_def,
AttrSlice attrs, const FunctionLibraryRuntime::InstantiateOptions& options,
FunctionLibraryRuntime::LocalHandle* handle,
FunctionLibraryRuntime::DoneCallback done) {
auto target = options.target;
auto released_op = std::make_unique<EagerOperation>(ctx_);
Status s =
released_op->Reset(function_name.c_str(), target.c_str(), true, nullptr);
if (!s.ok()) {
done(s);
return;
}
if (!released_op->is_function()) {
done(errors::Internal(function_name, " is not a function."));
return;
}
VLOG(1) << "CFLR::Instantiate: " << function_name << " on " << target
<< " (this: " << this << ")";
core::RefCountPtr<eager::EagerClient> eager_client;
s = ctx_->GetClient(target, &eager_client);
if (!s.ok()) {
done(s);
return;
}
if (eager_client == nullptr) {
done(errors::InvalidArgument("Could not find eager client for target: ",
target));
return;
}
const FunctionLibraryDefinition& func_lib_def =
options.lib_def ? *options.lib_def : lib_def;
auto request = std::make_shared<EnqueueRequest>();
auto response = std::make_shared<EnqueueResponse>();
request->set_context_id(context_id_);
RegisterFunctionOp* register_function =
request->add_queue()->mutable_register_function();
*register_function->mutable_function_def() =
*func_lib_def.Find(function_name);
register_function->set_is_component_function(true);
*register_function->mutable_library() =
func_lib_def.ReachableDefinitions(register_function->function_def())
.ToProto();
StripDefaultAttributesInRegisterFunctionOp(register_function);
const absl::optional<std::vector<int>>& ret_indices = options.ret_indices;
eager_client->EnqueueAsync(
nullptr, request.get(), response.get(),
[this, request, response, handle, released_op = released_op.release(),
target, ret_indices, eager_client = eager_client.get(),
done](const Status& s) {
{
mutex_lock l(mu_);
*handle = function_data_.size();
function_data_.emplace_back(target, ret_indices, eager_client,
absl::WrapUnique(released_op));
}
done(s);
});
}
void EagerClusterFunctionLibraryRuntime::Run(
const FunctionLibraryRuntime::Options& opts,
FunctionLibraryRuntime::LocalHandle handle, absl::Span<const Tensor> args,
std::vector<Tensor>* rets, FunctionLibraryRuntime::DoneCallback done) {
std::vector<FunctionArg> function_args;
for (const auto& tensor : args) {
function_args.push_back(tensor);
}
std::vector<FunctionRet>* function_rets = new std::vector<FunctionRet>;
Run(opts, handle, function_args, function_rets,
[rets, function_rets, done = std::move(done)](const Status& s) {
Status status = s;
if (status.ok()) {
for (const auto& t : *function_rets) {
if (t.index() == 0) {
rets->push_back(std::get<Tensor>(t));
} else {
status.Update(
errors::Internal("Expect a Tensor as a remote function "
"output but got a TensorShape."));
break;
}
}
}
delete function_rets;
done(status);
});
}
void EagerClusterFunctionLibraryRuntime::Run(
const FunctionLibraryRuntime::Options& opts,
FunctionLibraryRuntime::LocalHandle handle,
absl::Span<const FunctionArg> args, std::vector<FunctionRet>* rets,
FunctionLibraryRuntime::DoneCallback done) {
FunctionData* function_data = nullptr;
{
mutex_lock l(mu_);
DCHECK_LE(handle, function_data_.size());
function_data = &function_data_[handle];
}
EagerClient* eager_client = function_data->eager_client.get();
if (eager_client == nullptr) {
done(errors::Internal("Could not find eager client"));
return;
}
EagerOperation* op = function_data->op.get();
if (!op->Inputs().empty()) {
done(errors::Internal("Inputs should not be set during instantiation."));
return;
}
auto request = std::make_shared<RunComponentFunctionRequest>();
auto response = std::make_shared<RunComponentFunctionResponse>();
request->set_context_id(context_id_);
eager::Operation* remote_op = request->mutable_operation();
if (function_data->ret_indices.has_value()) {
for (const int ret_index : function_data->ret_indices.value()) {
request->add_output_num(ret_index);
}
}
for (const auto& arg : args) {
if (arg.index() == 0) {
std::get<Tensor>(arg).AsProtoTensorContent(
remote_op->add_op_inputs()->mutable_tensor());
} else {
remote_op->add_op_inputs()->mutable_remote_handle()->Swap(
std::get<RemoteTensorHandle*>(arg));
}
}
if (opts.op_id.has_value()) {
remote_op->set_id(opts.op_id.value());
} else {
remote_op->set_id(kInvalidOpId);
}
remote_op->set_is_function(true);
remote_op->set_is_component_function(true);
remote_op->set_func_step_id(opts.step_id);
remote_op->set_name(op->Name());
op->Attrs().FillAttrValueMap(remote_op->mutable_attrs());
remote_op->set_device(function_data->target);
CancellationManager* cm = opts.cancellation_manager;
CancellationToken token = 0;
auto call_opts = std::make_shared<CallOptions>();
call_opts->SetTimeout(
ctx_->session_options().config.operation_timeout_in_ms());
if (cm != nullptr) {
token = cm->get_cancellation_token();
const bool already_cancelled = !cm->RegisterCallback(
token,
[call_opts, request, response, done]() { call_opts->StartCancel(); });
if (already_cancelled) {
done(errors::Cancelled("EagerClusterFunctionLibraryRuntime::Run"));
return;
}
}
eager_client->RunComponentFunctionAsync(
call_opts.get(), request.get(), response.get(),
[request, response, rets, call_opts, cm, token,
done = std::move(done)](const Status& s) {
if (cm != nullptr) {
cm->TryDeregisterCallback(token);
}
if (!s.ok()) {
done(s);
return;
}
if (!response->shape().empty() && !response->tensor().empty()) {
done(errors::Internal(
"Both shape and tensor are specified in the same response"));
return;
}
for (const auto& shape : response->shape()) {
rets->push_back(shape);
}
for (const auto& tensor_proto : response->tensor()) {
Tensor t;
if (t.FromProto(tensor_proto)) {
rets->push_back(std::move(t));
} else {
done(errors::Internal("Could not convert tensor proto: ",
tensor_proto.DebugString()));
return;
}
}
done(absl::OkStatus());
});
}
void EagerClusterFunctionLibraryRuntime::CleanUp(
uint64 step_id, FunctionLibraryRuntime::LocalHandle handle,
FunctionLibraryRuntime::DoneCallback done) {
FunctionData* function_data = nullptr;
{
mutex_lock l(mu_);
DCHECK_LE(handle, function_data_.size());
function_data = &function_data_[handle];
}
EagerClient* eager_client = function_data->eager_client.get();
if (eager_client == nullptr) {
done(errors::Internal("Could not find eager client"));
return;
}
auto request = std::make_shared<EnqueueRequest>();
auto response = std::make_shared<EnqueueResponse>();
request->set_context_id(context_id_);
CleanupFunctionOp* cleanup_function =
request->add_queue()->mutable_cleanup_function();
cleanup_function->set_step_id(step_id);
eager_client->EnqueueAsync(
nullptr, request.get(), response.get(),
[request, response, done](const Status& status) { done(status); });
}
DistributedFunctionLibraryRuntime* CreateClusterFLR(
const uint64 context_id, EagerContext* ctx, WorkerSession* worker_session) {
return new EagerClusterFunctionLibraryRuntime(
context_id, ctx, worker_session->remote_device_mgr());
}
}
} | #include "tensorflow/core/distributed_runtime/cluster_function_library_runtime.h"
#include <map>
#include "tensorflow/core/common_runtime/function_testlib.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_channel.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_testlib.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.h"
#include "tensorflow/core/distributed_runtime/worker_session.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/equal_graph_def.h"
namespace tensorflow {
class ClusterFunctionLibraryRuntimeTest : public ::testing::Test {
public:
ClusterFunctionLibraryRuntimeTest() {
SessionOptions options;
TF_CHECK_OK(test::TestCluster::MakeTestCluster(
test::TestClusterConfig().Options(options).Jobs(
{test::TestJob{"localhost", 2}}),
&cluster_));
GrpcChannelSpec spec;
std::map<int, string> host_ports;
int i = 0;
for (const auto& target : cluster_->targets("localhost")) {
host_ports[i++] = target;
}
TF_CHECK_OK(spec.AddHostPortsJob("localhost", host_ports));
ChannelCreationFunction channel_func =
ConvertToChannelCreationFunction(NewHostPortGrpcChannel);
grpc_worker_env_.reset(CreateGrpcWorkerEnv());
std::shared_ptr<GrpcChannelCache> channel_cache(
NewGrpcChannelCache(spec, channel_func));
std::unique_ptr<WorkerCacheInterface> worker_cache(
NewGrpcWorkerCache(channel_cache, grpc_worker_env_.get()));
worker_session_.reset(new WorkerSession(
"cluster_test_session", "/job:localhost/replica:0/task:0",
std::move(worker_cache), std::unique_ptr<DeviceMgr>(),
std::unique_ptr<GraphMgr>(), nullptr,
[](WorkerSession* worker_session, bool called,
DeviceMgr* remote_device_mgr) { return nullptr; }));
cluster_flr_.reset(new ClusterFunctionLibraryRuntime(worker_session_.get(),
true, nullptr));
}
Status ConstructFunctionGraphHelper(
const OpDef& sig, test::function::Attrs attrs,
const FunctionLibraryRuntime::InstantiateOptions& options,
const FunctionLibraryDefinition& lib_def, GraphDef* g,
std::vector<string>* send_keys, std::vector<string>* recv_keys) {
return ClusterFunctionLibraryRuntime::ConstructFunctionGraph(
sig, attrs, options, lib_def, g, send_keys, recv_keys);
}
void Instantiate(const string& function_name,
const FunctionLibraryDefinition& lib_def,
test::function::Attrs attrs,
const FunctionLibraryRuntime::InstantiateOptions& options,
FunctionLibraryRuntime::LocalHandle* local_handle,
FunctionLibraryRuntime::DoneCallback done) {
cluster_flr_->Instantiate(function_name, lib_def, attrs, options,
local_handle, done);
}
Status InstantiateAndRun(
const string& function_name, const FunctionLibraryDefinition& lib_def,
test::function::Attrs attrs,
const FunctionLibraryRuntime::InstantiateOptions& options,
const std::vector<Tensor>& args, std::vector<Tensor*> rets) {
FunctionLibraryRuntime::LocalHandle handle;
Status status;
Notification instantiate_done;
cluster_flr_->Instantiate(function_name, lib_def, attrs, options, &handle,
[&status, &instantiate_done](const Status& s) {
status = s;
instantiate_done.Notify();
});
instantiate_done.WaitForNotification();
if (!status.ok()) {
return status;
}
Notification done;
FunctionLibraryRuntime::Options opts;
std::vector<Tensor> out;
cluster_flr_->Run(opts, handle, args, &out,
[&status, &done](const Status& s) {
status = s;
done.Notify();
});
done.WaitForNotification();
if (!status.ok()) {
return status;
}
CHECK_EQ(rets.size(), out.size());
for (size_t i = 0; i < rets.size(); ++i) {
*rets[i] = out[i];
}
return absl::OkStatus();
}
protected:
std::unique_ptr<test::TestCluster> cluster_;
std::unique_ptr<WorkerSession> worker_session_;
std::unique_ptr<ClusterFunctionLibraryRuntime> cluster_flr_;
std::unique_ptr<GrpcWorkerEnv> grpc_worker_env_;
};
TEST_F(ClusterFunctionLibraryRuntimeTest, ConstructFunctionGraph) {
GraphDef actual;
std::vector<string> send_keys, recv_keys;
FunctionDefLibrary proto;
*(proto.add_function()) = test::function::Swap();
FunctionLibraryDefinition lib_def(OpRegistry::Global(), proto);
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
instantiate_opts.target = "/job:a/replica:0/task:0/device:CPU:0";
TF_CHECK_OK(ConstructFunctionGraphHelper(
test::function::Swap().signature(), {{"T", DT_FLOAT}}, instantiate_opts,
lib_def, &actual, &send_keys, &recv_keys));
GraphDef expected;
protobuf::TextFormat::ParseFromString(R"(
node {
name: "_recv_i0_0"
op: "_Recv"
device: "/job:a/replica:0/task:0/device:CPU:0"
attr {
key: "client_terminated"
value {
b: true
}
}
attr {
key: "recv_device"
value {
s: "/job:a/replica:0/task:0/device:CPU:0"
}
}
attr {
key: "send_device"
value {
s: "/job:a/replica:0/task:0/device:CPU:0"
}
}
attr {
key: "send_device_incarnation"
value {
i: 1
}
}
attr {
key: "tensor_name"
value {
s: "i0"
}
}
attr {
key: "tensor_type"
value {
type: DT_FLOAT
}
}
}
node {
name: "_recv_i1_1"
op: "_Recv"
device: "/job:a/replica:0/task:0/device:CPU:0"
attr {
key: "client_terminated"
value {
b: true
}
}
attr {
key: "recv_device"
value {
s: "/job:a/replica:0/task:0/device:CPU:0"
}
}
attr {
key: "send_device"
value {
s: "/job:a/replica:0/task:0/device:CPU:0"
}
}
attr {
key: "send_device_incarnation"
value {
i: 1
}
}
attr {
key: "tensor_name"
value {
s: "i1"
}
}
attr {
key: "tensor_type"
value {
type: DT_FLOAT
}
}
}
node {
name: "Func/Swap/input/_0"
op: "Identity"
input: "_recv_i0_0"
device: "/job:a/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
node {
name: "Func/Swap/input/_1"
op: "Identity"
input: "_recv_i1_1"
device: "/job:a/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
node {
name: "Swap/o0"
op: "Identity"
input: "Func/Swap/input/_1"
device: "/job:a/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
node {
name: "Swap/o1"
op: "Identity"
input: "Func/Swap/input/_0"
device: "/job:a/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
node {
name: "Func/Swap/output/_2"
op: "Identity"
input: "Swap/o0"
device: "/job:a/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
node {
name: "Func/Swap/output/_3"
op: "Identity"
input: "Swap/o1"
device: "/job:a/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
node {
name: "_send_o0_0"
op: "_Send"
input: "Func/Swap/output/_2"
device: "/job:a/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "client_terminated"
value {
b: true
}
}
attr {
key: "recv_device"
value {
s: "/job:a/replica:0/task:0/device:CPU:0"
}
}
attr {
key: "send_device"
value {
s: "/job:a/replica:0/task:0/device:CPU:0"
}
}
attr {
key: "send_device_incarnation"
value {
i: 1
}
}
attr {
key: "tensor_name"
value {
s: "o0"
}
}
}
node {
name: "_send_o1_1"
op: "_Send"
input: "Func/Swap/output/_3"
device: "/job:a/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "client_terminated"
value {
b: true
}
}
attr {
key: "recv_device"
value {
s: "/job:a/replica:0/task:0/device:CPU:0"
}
}
attr {
key: "send_device"
value {
s: "/job:a/replica:0/task:0/device:CPU:0"
}
}
attr {
key: "send_device_incarnation"
value {
i: 1
}
}
attr {
key: "tensor_name"
value {
s: "o1"
}
}
}
)",
&expected);
TF_EXPECT_GRAPH_EQ(expected, actual);
}
TEST_F(ClusterFunctionLibraryRuntimeTest, DISABLED_InstantiateAndRun) {
FunctionDefLibrary proto;
*(proto.add_function()) = test::function::XTimesTwoInt32();
FunctionLibraryDefinition lib_def(OpRegistry::Global(), proto);
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
instantiate_opts.target = "/job:localhost/replica:0/task:1/cpu:0";
Tensor y;
auto x = test::AsTensor<int32>({1, 2, 3, 4});
TF_EXPECT_OK(InstantiateAndRun("XTimesTwoInt32", lib_def, {},
instantiate_opts, {x}, {&y}));
test::ExpectTensorEqual<int32>(y, test::AsTensor<int32>({2, 4, 6, 8}));
}
TEST_F(ClusterFunctionLibraryRuntimeTest,
DISABLED_InstantiateAndRunAttrSubstitution) {
FunctionDefLibrary proto;
*(proto.add_function()) = test::function::Swap();
FunctionLibraryDefinition lib_def(OpRegistry::Global(), proto);
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
instantiate_opts.target = "/job:localhost/replica:0/task:1/cpu:0";
Tensor y1, y2;
auto x1 = test::AsTensor<float>({1, 2, 3, 4});
auto x2 = test::AsTensor<float>({4, 3, 2, 1});
TF_EXPECT_OK(InstantiateAndRun("Swap", lib_def, {{"T", DT_FLOAT}},
instantiate_opts, {x1, x2}, {&y1, &y2}));
test::ExpectTensorEqual<float>(y1, test::AsTensor<float>({4, 3, 2, 1}));
test::ExpectTensorEqual<float>(y2, test::AsTensor<float>({1, 2, 3, 4}));
}
} |
1,290 | cpp | tensorflow/tensorflow | remote_device | tensorflow/core/distributed_runtime/remote_device.cc | tensorflow/core/distributed_runtime/remote_device_test.cc | #ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_REMOTE_DEVICE_H_
#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_REMOTE_DEVICE_H_
#include <functional>
#include <string>
#include <vector>
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/stringpiece.h"
namespace tsl {
class Env;
}
namespace tensorflow {
using Env = tsl::Env;
class DeviceAttributes;
class Device;
class WorkerCacheInterface;
typedef std::function<Status(StringPiece name, Device** device)>
LookupLocalDevice;
void AsRemoteDevices(
Env* env,
const protobuf::RepeatedPtrField<DeviceAttributes>& device_attributes,
LookupLocalDevice lookup_local_device,
std::vector<std::unique_ptr<Device>>* remote_devices);
typedef std::function<void(const Status&, std::vector<Device*>*)>
NewRemoteDevicesDone;
void NewRemoteDevices(Env* env, WorkerCacheInterface* worker_cache,
const string& worker_name, NewRemoteDevicesDone done);
std::unique_ptr<Device> NewRemoteDevice(Env* env,
DeviceAttributes device_attribute);
}
#endif
#include "tensorflow/core/distributed_runtime/remote_device.h"
#include <stdlib.h>
#include <vector>
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/common_runtime/renamed_device.h"
#include "tensorflow/core/distributed_runtime/worker_cache.h"
#include "tensorflow/core/distributed_runtime/worker_interface.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/protobuf/worker.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
class RemoteDevice : public Device {
public:
RemoteDevice(Env* env, const DeviceAttributes& da)
: Device(env, da),
local_dev_name_(DeviceNameUtils::LocalName(da.name())) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes attr) override { return nullptr; }
ResourceMgr* resource_manager() override {
LOG(FATAL) << "Accessing the resource manager of a remote device is not "
<< "supported.";
std::abort();
}
bool IsLocal() const override { return false; }
bool IsRemoteCallAllowed() const override { return true; }
private:
const string local_dev_name_;
RemoteDevice(const RemoteDevice&) = delete;
void operator=(const RemoteDevice&) = delete;
};
void AsRemoteDevices(
Env* env,
const protobuf::RepeatedPtrField<DeviceAttributes>& device_attributes,
LookupLocalDevice lookup_local_device,
std::vector<std::unique_ptr<Device>>* remote_devices) {
for (const auto& da : device_attributes) {
Device* local_device;
if (lookup_local_device != nullptr &&
lookup_local_device(da.name(), &local_device).ok()) {
remote_devices->emplace_back(RenamedDevice::NewRenamedDevice(
local_device->name(), local_device, false, false));
} else {
auto d = new RemoteDevice(env, da);
remote_devices->emplace_back(d);
}
}
}
void NewRemoteDevices(Env* env, WorkerCacheInterface* worker_cache,
const string& worker_name, NewRemoteDevicesDone done) {
WorkerInterface* wi = worker_cache->GetOrCreateWorker(worker_name);
if (wi == nullptr) {
std::vector<Device*> empty;
done(errors::NotFound("Device ", worker_name, " is not found."), &empty);
return;
}
struct Call {
GetStatusRequest req;
GetStatusResponse resp;
};
Call* call = new Call;
auto cb = [env, worker_cache, worker_name, done, wi,
call](const Status& status) {
Status s = status;
std::vector<Device*> remote_devices;
auto cleanup = gtl::MakeCleanup(
[&worker_cache, &worker_name, &wi, &done, &remote_devices, &s, call] {
worker_cache->ReleaseWorker(worker_name, wi);
done(s, &remote_devices);
delete call;
});
if (!s.ok()) {
return;
}
DeviceNameUtils::ParsedName worker_name_parsed;
if (!DeviceNameUtils::ParseFullName(worker_name, &worker_name_parsed) ||
!worker_name_parsed.has_job || !worker_name_parsed.has_replica ||
!worker_name_parsed.has_task) {
s = errors::InvalidArgument("Could not parse worker name: ", worker_name);
LOG(WARNING) << s;
return;
}
remote_devices.reserve(call->resp.device_attributes_size());
for (const DeviceAttributes& da : call->resp.device_attributes()) {
DeviceNameUtils::ParsedName device_name_parsed;
CHECK(DeviceNameUtils::ParseFullName(da.name(), &device_name_parsed))
<< "Device attribute name '" << da.name() << "' could not be "
<< "parsed. Device Attribute: " << da.DebugString();
if (device_name_parsed.job == worker_name_parsed.job &&
device_name_parsed.replica == worker_name_parsed.replica &&
device_name_parsed.task == worker_name_parsed.task) {
auto d = new RemoteDevice(env, da);
remote_devices.push_back(d);
} else {
DeviceAttributes da_rewritten = da;
da_rewritten.set_name(DeviceNameUtils::FullName(
worker_name_parsed.job, worker_name_parsed.replica,
worker_name_parsed.task, device_name_parsed.type,
device_name_parsed.id));
auto d = new RemoteDevice(env, da_rewritten);
if (getenv("TPU_NO_POPULATE_DEVICE_LIST_FROM_CLUSTER_SPEC") !=
nullptr) {
if (worker_name_parsed.job == "worker" ||
device_name_parsed.type.find("TPU") == std::string::npos) {
remote_devices.push_back(d);
}
} else {
remote_devices.push_back(d);
}
}
}
};
wi->GetStatusAsync(nullptr, &call->req, &call->resp,
false, cb);
}
std::unique_ptr<Device> NewRemoteDevice(Env* env,
DeviceAttributes device_attribute) {
return std::make_unique<RemoteDevice>(env, device_attribute);
}
} | #include "tensorflow/core/distributed_runtime/remote_device.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_channel.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_testlib.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.h"
#include "tensorflow/core/distributed_runtime/worker_interface.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
const char* const kSession = "remote_session";
class RemoteDeviceTest : public ::testing::Test {
protected:
string remote_name_;
std::unique_ptr<WorkerCacheInterface> worker_cache_;
WorkerInterface* wi_;
std::vector<Device*> devices_;
std::unique_ptr<test::TestCluster> cluster_;
std::unique_ptr<GrpcWorkerEnv> grpc_worker_env_;
RemoteDeviceTest() {
SessionOptions options;
(*options.config.mutable_device_count())["CPU"] = 2;
TF_CHECK_OK(test::TestCluster::MakeTestCluster(options, 1, &cluster_));
const string& hostport = cluster_->targets()[0];
GrpcChannelSpec spec;
TF_CHECK_OK(spec.AddHostPortsJob("localhost", {hostport}));
ChannelCreationFunction channel_func =
ConvertToChannelCreationFunction(NewHostPortGrpcChannel);
std::shared_ptr<GrpcChannelCache> channel_cache(
NewGrpcChannelCache(spec, channel_func));
grpc_worker_env_.reset(CreateGrpcWorkerEnv());
worker_cache_.reset(
NewGrpcWorkerCache(channel_cache, grpc_worker_env_.get()));
remote_name_ = "/job:localhost/replica:0/task:0";
wi_ = worker_cache_->GetOrCreateWorker(remote_name_);
}
~RemoteDeviceTest() override {
worker_cache_->ReleaseWorker(remote_name_, wi_);
}
void SetUp() override {
Notification n;
NewRemoteDevices(Env::Default(), worker_cache_.get(), remote_name_,
[&n, this](const Status& s, std::vector<Device*>* found) {
TF_CHECK_OK(s);
devices_ = *found;
n.Notify();
});
n.WaitForNotification();
EXPECT_EQ(devices_.size(), 2);
std::sort(devices_.begin(), devices_.end(), [](Device* a, Device* b) {
return a->name().compare(b->name()) < 0;
});
}
void TearDown() override {
for (auto d : devices_) delete d;
}
};
TEST_F(RemoteDeviceTest, GetStatus) {
EXPECT_EQ(devices_[0]->name(),
strings::StrCat(remote_name_, "/device:CPU:0"));
EXPECT_EQ(devices_[0]->attributes().device_type(),
DeviceType(DEVICE_CPU).type());
EXPECT_EQ(devices_[0]->attributes().memory_limit(), 256 << 20);
EXPECT_EQ(devices_[1]->name(),
strings::StrCat(remote_name_, "/device:CPU:1"));
EXPECT_EQ(devices_[1]->attributes().memory_limit(), 256 << 20);
}
} |
1,291 | cpp | tensorflow/tensorflow | session_mgr | tensorflow/core/distributed_runtime/session_mgr.cc | tensorflow/core/distributed_runtime/session_mgr_test.cc | #ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_SESSION_MGR_H_
#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_SESSION_MGR_H_
#include <functional>
#include <string>
#include "xla/tsl/distributed_runtime/coordination/coordination_service.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_service_agent.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_service_rpc_handler.h"
#include "tensorflow/core/distributed_runtime/worker_session.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/protobuf/tensorflow_server.pb.h"
#include "tensorflow/core/protobuf/worker.pb.h"
namespace tensorflow {
class WorkerCacheInterface;
struct WorkerEnv;
class SessionMgr {
public:
typedef std::function<Status(const ServerDef&, WorkerCacheInterface**)>
WorkerCacheFactory;
explicit SessionMgr(
WorkerEnv* worker_env, const std::string& default_worker_name,
std::unique_ptr<WorkerCacheInterface> default_worker_cache,
WorkerCacheFactory worker_cache_factory,
tsl::CoordinationServiceRpcHandler* coordination_handler);
~SessionMgr() {}
Status CreateSession(
const std::string& session, const ServerDef& server_def,
bool isolate_session_state,
StatusCallback coordination_error_callback = [](Status s) {
LOG(ERROR) << "Coordination agent is set to error: " << s;
});
Status CreateSession(
const std::string& session, const ServerDef& server_def,
const protobuf::RepeatedPtrField<DeviceAttributes>& device_attributes,
bool isolate_session_state);
Status CreateSession(
const std::string& session, const ServerDef& server_def,
const protobuf::RepeatedPtrField<DeviceAttributes>& device_attributes,
bool isolate_session_state, std::string master_task,
int64_t master_incarnation,
StatusCallback coordination_error_callback = [](Status s) {
LOG(ERROR) << "Coordination agent is set to error: " << s;
});
void ResetDefaultWorkerCache(WorkerCacheInterface* worker_cache);
Status UpdateSession(const std::string& session, const ServerDef& server_def,
const protobuf::RepeatedPtrField<DeviceAttributes>&
cluster_device_attributes);
Status WorkerSessionForSession(const std::string& session_handle,
std::shared_ptr<WorkerSession>* out_session);
std::shared_ptr<WorkerSession> LegacySession();
Status DeleteSession(const std::string& session);
Status DeleteAllSessions();
tsl::CoordinationServiceAgent* GetCoordinationServiceAgent();
static std::string WorkerNameFromServerDef(const ServerDef& server_def);
void SetLogging(bool active);
void RetrieveLogs(int64_t step_id, LoggingResponse* response);
void ClearLogs();
void TeardownCoordinationServiceAgent();
void TeardownCoordinationService();
private:
WorkerEnv* const worker_env_;
std::unique_ptr<WorkerCacheInterface> default_worker_cache_;
std::shared_ptr<WorkerSession> legacy_session_;
std::unique_ptr<tsl::CoordinationServiceInterface> coordination_service_;
std::unique_ptr<tsl::CoordinationServiceAgent> coordination_service_agent_;
bool is_logging_active_ = false;
const WorkerCacheFactory worker_cache_factory_;
tsl::CoordinationServiceRpcHandler* coordination_handler_ = nullptr;
Status WorkerSessionForSessionLocked(
const std::string& session_handle,
std::shared_ptr<WorkerSession>* out_session)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
mutex mu_;
std::map<std::string, std::shared_ptr<WorkerSession>> sessions_
TF_GUARDED_BY(mu_);
struct MasterAssociatedSession {
const int64_t master_incarnation;
const std::string session_handle;
};
std::unordered_multimap<std::string, MasterAssociatedSession>
master_to_associated_sessions_ TF_GUARDED_BY(mu_);
};
}
#endif
#include "tensorflow/core/distributed_runtime/session_mgr.h"
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "xla/tsl/distributed_runtime/coordination/coordination_service.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_service_agent.h"
#include "tensorflow/core/activity_watcher/activity.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/renamed_device.h"
#include "tensorflow/core/distributed_runtime/cluster_function_library_runtime.h"
#include "tensorflow/core/distributed_runtime/error_payloads.h"
#include "tensorflow/core/distributed_runtime/graph_mgr.h"
#include "tensorflow/core/distributed_runtime/remote_device.h"
#include "tensorflow/core/distributed_runtime/worker_cache_wrapper.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/protobuf/cluster.pb.h"
#include "tensorflow/core/protobuf/tensorflow_server.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tsl/protobuf/coordination_config.pb.h"
#include "tsl/protobuf/coordination_service.pb.h"
#include "tsl/protobuf/distributed_runtime_payloads.pb.h"
namespace tensorflow {
namespace {
bool IsMultiClientLeader(const ServerDef& server_def,
const CoordinationServiceConfig& config) {
DeviceNameUtils::ParsedName leader_pn;
DeviceNameUtils::ParseFullName(config.service_leader(), &leader_pn);
return server_def.job_name() == leader_pn.job &&
server_def.task_index() == leader_pn.task;
}
void SetCoordinationServiceLeader(const ServerDef& server_def,
CoordinationServiceConfig* config) {
const std::string& collective_leader = server_def.default_session_config()
.experimental()
.collective_group_leader();
if (!collective_leader.empty()) {
config->set_service_leader(collective_leader);
LOG(INFO) << "No coordination leader is set, using the collective leader "
<< collective_leader;
} else {
const std::string& default_leader =
strings::StrCat("/job:", server_def.job_name(), "/replica:0/task:0");
config->set_service_leader(default_leader);
LOG(INFO) << "No coordination leader is set, using the default leader "
<< default_leader;
}
}
void SetCoordinatedJobList(const ServerDef& server_def,
CoordinationServiceConfig* config) {
for (const auto& job : server_def.cluster().job()) {
tensorflow::CoordinatedJob* coordinated_job =
config->mutable_coordinated_job_list()->Add();
coordinated_job->set_name(job.name());
coordinated_job->set_num_tasks(job.tasks().size());
}
}
}
SessionMgr::SessionMgr(
WorkerEnv* worker_env, const std::string& default_worker_name,
std::unique_ptr<WorkerCacheInterface> default_worker_cache,
WorkerCacheFactory worker_cache_factory,
tsl::CoordinationServiceRpcHandler* coordination_handler)
: worker_env_(worker_env),
default_worker_cache_(std::move(default_worker_cache)),
legacy_session_(WorkerSession::CreateWithBorrowedDeviceMgr(
"", default_worker_name,
std::unique_ptr<WorkerCacheInterface>(
new WorkerCacheWrapper(default_worker_cache_.get())),
worker_env->device_mgr,
std::make_unique<GraphMgr>(worker_env, worker_env->device_mgr),
nullptr,
[](WorkerSession* worker_session, bool create_worker_session_called,
DeviceMgr* remote_device_mgr)
-> std::unique_ptr<DistributedFunctionLibraryRuntime> {
return std::make_unique<ClusterFunctionLibraryRuntime>(
worker_session, create_worker_session_called,
remote_device_mgr);
})),
worker_cache_factory_(std::move(worker_cache_factory)),
coordination_handler_(coordination_handler) {}
std::string SessionMgr::WorkerNameFromServerDef(const ServerDef& server_def) {
return strings::StrCat("/job:", server_def.job_name(),
"/replica:", server_def.replica(),
"/task:", server_def.task_index());
}
Status SessionMgr::CreateSession(const std::string& session,
const ServerDef& server_def,
bool isolate_session_state,
StatusCallback coordination_error_callback) {
return CreateSession(session, server_def, {}, isolate_session_state,
"",
0, coordination_error_callback);
}
Status SessionMgr::CreateSession(
const std::string& session, const ServerDef& server_def,
const protobuf::RepeatedPtrField<DeviceAttributes>&
cluster_device_attributes,
bool isolate_session_state) {
return CreateSession(session, server_def, cluster_device_attributes,
isolate_session_state,
"",
0);
}
Status SessionMgr::CreateSession(
const std::string& session, const ServerDef& server_def,
const protobuf::RepeatedPtrField<DeviceAttributes>&
cluster_device_attributes,
bool isolate_session_state, std::string master_task,
int64_t master_incarnation, StatusCallback coordination_error_callback) {
mutex_lock l(mu_);
if (session.empty()) {
return errors::InvalidArgument("Session must be non-empty.");
}
if (!master_task.empty()) {
auto it_range = master_to_associated_sessions_.equal_range(master_task);
if (it_range.first != it_range.second &&
it_range.first->second.master_incarnation != master_incarnation) {
LOG(INFO) << "When creating WorkerSession for master task " << master_task
<< ", found old WorkerSessions created by the same master task "
<< "with a different incarnation. These sessions will "
<< "be garbage collected. Current WorkerSession count: "
<< sessions_.size();
auto it = it_range.first;
while (it != it_range.second) {
auto session_it = sessions_.find(it->second.session_handle);
if (session_it != sessions_.end()) {
sessions_.erase(session_it);
}
it = master_to_associated_sessions_.erase(it);
}
}
}
WorkerCacheInterface* worker_cache = nullptr;
std::string worker_name;
if (server_def.cluster().job().empty()) {
worker_cache = new WorkerCacheWrapper(default_worker_cache_.get());
worker_name = legacy_session_->worker_name();
} else {
TF_RETURN_IF_ERROR(worker_cache_factory_(server_def, &worker_cache));
worker_name = WorkerNameFromServerDef(server_def);
}
if (worker_cache != nullptr && default_worker_cache_ != nullptr) {
worker_cache->SetLogging(this->is_logging_active_);
}
CHECK(worker_env_->device_mgr)
<< "The WorkerEnv must have a device manager.";
std::vector<Device*> local_devices = worker_env_->device_mgr->ListDevices();
CHECK(!local_devices.empty())
<< "The WorkerEnv must have at least one device in `local_devices`.";
std::shared_ptr<WorkerSession> worker_session;
std::vector<std::unique_ptr<Device>> cluster_devices;
if (isolate_session_state || server_def.cluster().job_size()) {
if (server_def.cluster().job_size()) {
VLOG(1) << "ClusterSpec propagation is enabled.";
}
if (!isolate_session_state) {
VLOG(1) << "Session state isolation is disabled.";
}
std::vector<std::unique_ptr<Device>> renamed_devices;
renamed_devices.reserve(local_devices.size());
for (Device* d : local_devices) {
renamed_devices.push_back(RenamedDevice::NewRenamedDevice(
worker_name, d, false, isolate_session_state));
}
auto device_mgr =
std::make_unique<StaticDeviceMgr>(std::move(renamed_devices));
LookupLocalDevice cb = [&device_mgr](StringPiece name, Device** device) {
return device_mgr->LookupDevice(name, device);
};
AsRemoteDevices(worker_env_->env, cluster_device_attributes, cb,
&cluster_devices);
std::unique_ptr<DynamicDeviceMgr> remote_devices;
if (!cluster_device_attributes.empty()) {
remote_devices = std::make_unique<DynamicDeviceMgr>();
TF_RETURN_IF_ERROR(
remote_devices->AddDevices(std::move(cluster_devices)));
}
auto graph_mgr = std::make_unique<GraphMgr>(worker_env_, device_mgr.get());
worker_session.reset(new WorkerSession(
session, worker_name,
std::unique_ptr<WorkerCacheInterface>(worker_cache),
std::move(device_mgr), std::move(graph_mgr), std::move(remote_devices),
[](WorkerSession* worker_session, bool create_worker_session_called,
DeviceMgr* remote_device_mgr)
-> std::unique_ptr<DistributedFunctionLibraryRuntime> {
return std::make_unique<ClusterFunctionLibraryRuntime>(
worker_session, create_worker_session_called, remote_device_mgr);
}));
} else {
AsRemoteDevices(worker_env_->env, cluster_device_attributes, nullptr,
&cluster_devices);
std::unique_ptr<DynamicDeviceMgr> remote_devices;
if (!cluster_device_attributes.empty()) {
remote_devices = std::make_unique<DynamicDeviceMgr>();
TF_RETURN_IF_ERROR(
remote_devices->AddDevices(std::move(cluster_devices)));
}
auto graph_mgr =
std::make_unique<GraphMgr>(worker_env_, worker_env_->device_mgr);
worker_session = WorkerSession::CreateWithBorrowedDeviceMgr(
session, worker_name,
std::unique_ptr<WorkerCacheInterface>(worker_cache),
worker_env_->device_mgr, std::move(graph_mgr),
std::move(remote_devices),
[](WorkerSession* worker_session, bool create_worker_session_called,
DeviceMgr* remote_device_mgr)
-> std::unique_ptr<DistributedFunctionLibraryRuntime> {
return std::make_unique<ClusterFunctionLibraryRuntime>(
worker_session, create_worker_session_called, remote_device_mgr);
});
}
sessions_.insert(std::make_pair(session, std::move(worker_session)));
if (!master_task.empty()) {
MasterAssociatedSession s{master_incarnation, session};
master_to_associated_sessions_.emplace(master_task, s);
}
CoordinationServiceConfig coordination_config =
server_def.default_session_config().experimental().coordination_config();
if (!coordination_config.service_type().empty() &&
!coordination_config.force_disable() &&
coordination_service_agent_ == nullptr) {
std::unique_ptr<CoordinationClientCache> client_cache;
TF_RETURN_IF_ERROR(worker_cache->GetCoordinationClientCache(&client_cache));
if (coordination_config.service_leader().empty()) {
SetCoordinationServiceLeader(server_def, &coordination_config);
}
if (coordination_config.coordinated_job_list().empty()) {
SetCoordinatedJobList(server_def, &coordination_config);
}
if (IsMultiClientLeader(server_def, coordination_config)) {
coordination_service_ =
tsl::CoordinationServiceInterface::EnableCoordinationService(
worker_env_->env, coordination_config, std::move(client_cache));
if (coordination_handler_ != nullptr) {
coordination_handler_->SetServiceInstance(coordination_service_.get());
}
}
std::unique_ptr<CoordinationClientCache> agent_cache;
TF_RETURN_IF_ERROR(worker_cache->GetCoordinationClientCache(&agent_cache));
coordination_service_agent_ = tsl::CreateCoordinationServiceAgent();
TF_RETURN_IF_ERROR(coordination_service_agent_->Initialize(
worker_env_->env, server_def.job_name(), server_def.task_index(),
coordination_config,
agent_cache->GetOwnedClient(coordination_config.service_leader()),
std::move(coordination_error_callback)));
activity_watcher::MaybeEnableMultiWorkersWatching(
coordination_service_agent_.get());
}
return absl::OkStatus();
}
void SessionMgr::ResetDefaultWorkerCache(WorkerCacheInterface* worker_cache) {
default_worker_cache_.reset(worker_cache);
}
Status SessionMgr::UpdateSession(
const std::string& session, const ServerDef& server_def,
const protobuf::RepeatedPtrField<DeviceAttributes>&
cluster_device_attributes) {
mutex_lock l(mu_);
if (session.empty()) {
return errors::InvalidArgument("Session must be non-empty.");
}
auto it = sessions_.find(session);
if (it == sessions_.end()) {
return errors::InvalidArgument("Cannot update session ", session,
" because it does not exist.");
}
std::shared_ptr<WorkerSession> worker_session = it->second;
WorkerCacheInterface* worker_cache = nullptr;
if (server_def.cluster().job().empty()) {
worker_cache = new WorkerCacheWrapper(default_worker_cache_.get());
} else {
TF_RETURN_IF_ERROR(worker_cache_factory_(server_def, &worker_cache));
}
std::vector<std::string> updated_remote_workers;
worker_cache->ListWorkers(&updated_remote_workers);
std::vector<std::unique_ptr<Device>> cluster_devices;
const DeviceMgr* local_device_mgr = worker_session->device_mgr();
DeviceMgr* remote_device_mgr = worker_session->remote_device_mgr();
std::vector<Device*> curr_remote_devices = remote_device_mgr->ListDevices();
std::vector<std::unique_ptr<Device>> added_remote_devices;
std::vector<Device*> removed_remote_devices;
std::vector<DeviceAttributes> added_cluster_device_attrs;
for (const auto& da : cluster_device_attributes) {
Device* device;
if (!local_device_mgr->LookupDevice(da.name(), &device).ok() &&
!remote_device_mgr->LookupDevice(da.name(), &device).ok()) {
added_cluster_device_attrs.emplace_back(da);
} else if (device != nullptr &&
device->attributes().incarnation() != da.incarnation()) {
removed_remote_devices.emplace_back(device);
added_cluster_device_attrs.emplace_back(da);
}
}
for (Device* device : curr_remote_devices) {
std::string task_name;
DeviceNameUtils::GetTaskName(device->parsed_name(), &task_name);
if (std::find(updated_remote_workers.begin(), updated_remote_workers.end(),
task_name) == updated_remote_workers.end()) {
removed_remote_devices.emplace_back(device);
}
}
protobuf::RepeatedPtrField<DeviceAttributes> added_cluster_device_attrs_pb(
added_cluster_device_attrs.begin(), added_cluster_device_attrs.end());
AsRemoteDevices(worker_env_->env, added_cluster_device_attrs_pb, nullptr,
&added_remote_devices);
TF_RETURN_IF_ERROR(worker_session->UpdateWorkerCacheAndDevices(
std::unique_ptr<WorkerCacheInterface>(worker_cache),
std::move(added_remote_devices), removed_remote_devices));
return absl::OkStatus();
}
Status SessionMgr::DeleteSession(const std::string& session) {
mutex_lock l(mu_);
auto it = sessions_.find(session);
if (it != sessions_.end()) {
sessions_.erase(it);
}
return absl::OkStatus();
}
Status SessionMgr::DeleteAllSessions() {
std::map<std::string, std::shared_ptr<WorkerSession>> tmp_sessions;
{
mutex_lock l(mu_);
swap(sessions_, tmp_sessions);
}
for (auto& session : tmp_sessions) {
session.second.reset();
}
return absl::OkStatus();
}
Status SessionMgr::WorkerSessionForSessionLocked(
const std::string& session_handle,
std::shared_ptr<WorkerSession>* out_session) {
if (session_handle.empty()) {
*out_session = legacy_session_;
} else {
auto it = sessions_.find(session_handle);
if (it == sessions_.end()) {
return errors::AbortedWithPayloads(
strings::StrCat("Session handle is not found: ", session_handle,
". Possibly this worker (\"",
legacy_session_->worker_name(),
"\") just restarted."),
{{kWorkerPossiblyRestarted,
distributed_runtime::WorkerPossiblyRestarted()
.SerializeAsString()}});
} else {
*out_session = it->second;
}
}
return absl::OkStatus();
}
Status SessionMgr::WorkerSessionForSession(
const std::string& session_handle,
std::shared_ptr<WorkerSession>* out_session) {
mutex_lock l(mu_);
return WorkerSessionForSessionLocked(session_handle, out_session);
}
std::shared_ptr<WorkerSession> SessionMgr::LegacySession() {
return legacy_session_;
}
tsl::CoordinationServiceAgent* SessionMgr::GetCoordinationServiceAgent() {
return coordination_service_agent_.get();
}
void SessionMgr::SetLogging(bool active) {
mutex_lock l(mu_);
this->is_logging_active_ = active;
if (legacy_session_) {
auto* worker_cache = legacy_session_->worker_cache();
if (worker_cache) {
worker_cache->SetLogging(active);
}
}
for (const auto& session_kv : sessions_) {
auto session = session_kv.second.get();
if (session) {
auto* worker_cache = session->worker_cache();
if (worker_cache) {
worker_cache->SetLogging(active);
}
}
}
}
void SessionMgr::RetrieveLogs(int64_t step_id, LoggingResponse* response) {
mutex_lock l(mu_);
if (legacy_session_) {
auto* worker_cache = legacy_session_->worker_cache();
if (worker_cache) {
auto step_stats = StepStats();
if (worker_cache->RetrieveLogs(step_id, &step_stats)) {
auto* labeled_step_stats = response->add_step();
labeled_step_stats->set_step_id(step_id);
labeled_step_stats->mutable_step_stats()->Swap(&step_stats);
}
}
}
for (const auto& session_kv : sessions_) {
auto session = session_kv.second.get();
if (session) {
auto* worker_cache = session->worker_cache();
if (worker_cache) {
auto step_stats = StepStats();
if (worker_cache->RetrieveLogs(step_id, &step_stats)) {
auto* labeled_step_stats = response->add_step();
labeled_step_stats->set_step_id(step_id);
labeled_step_stats->mutable_step_stats()->Swap(&step_stats);
}
}
}
}
}
void SessionMgr::ClearLogs() {
mutex_lock l(mu_);
if (legacy_session_) {
auto* worker_cache = legacy_session_->worker_cache();
if (worker_cache) {
worker_cache->ClearLogs();
}
}
for (const auto& session_kv : sessions_) {
auto session = session_kv.second.get();
if (session) {
auto* worker_cache = session->worker_cache();
if (worker_cache) {
worker_cache->ClearLogs();
}
}
}
}
void SessionMgr::TeardownCoordinationService() {
coordination_service_ = nullptr;
}
void SessionMgr::TeardownCoordinationServiceAgent() {
coordination_service_agent_ = nullptr;
}
} | #include "tensorflow/core/distributed_runtime/session_mgr.h"
#include <string>
#include "absl/status/status.h"
#include "tensorflow/core/distributed_runtime/error_payloads.h"
#include "tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.h"
#include "tensorflow/core/distributed_runtime/worker_env.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/cluster.pb.h"
namespace tensorflow {
class FakeDevice : public Device {
private:
explicit FakeDevice(const DeviceAttributes& device_attributes)
: Device(nullptr, device_attributes) {}
public:
Status Sync() override { return errors::Unimplemented("FakeDevice::Sync()"); }
Allocator* GetAllocator(AllocatorAttributes attr) override { return nullptr; }
static std::unique_ptr<Device> MakeCPU(const std::string& name) {
DeviceAttributes device_attributes;
device_attributes.set_name(name);
device_attributes.set_device_type(DeviceType("FakeCPU").type());
return std::unique_ptr<Device>(new FakeDevice(device_attributes));
}
};
class SessionMgrTest : public ::testing::Test {
protected:
SessionMgrTest()
: mgr_(&env_, "/job:mnist/replica:0/task:0",
std::unique_ptr<WorkerCacheInterface>(), factory_,
nullptr) {
device_mgr_ = std::make_unique<DynamicDeviceMgr>(
FakeDevice::MakeCPU("/job:mnist/replica:0/task:0/device:fakecpu:0"));
env_.device_mgr = device_mgr_.get();
}
std::unique_ptr<DeviceMgr> device_mgr_;
WorkerEnv env_;
SessionMgr::WorkerCacheFactory factory_ =
[](const ServerDef& server_def, WorkerCacheInterface** worker_cache) {
*worker_cache = nullptr;
return absl::OkStatus();
};
SessionMgr mgr_;
};
TEST_F(SessionMgrTest, CreateSessionSimple) {
ServerDef server_def;
server_def.set_job_name("worker");
server_def.set_task_index(3);
std::string session_handle = "test_session_handle";
TF_EXPECT_OK(mgr_.CreateSession(session_handle, server_def, true));
std::shared_ptr<WorkerSession> session;
TF_EXPECT_OK(mgr_.WorkerSessionForSession(session_handle, &session));
EXPECT_NE(nullptr, session) << "Session for " << session_handle << "was null";
EXPECT_NE(mgr_.LegacySession(), session);
TF_EXPECT_OK(mgr_.DeleteSession(session_handle));
}
TEST_F(SessionMgrTest, CreateSessionClusterDefWorkerName) {
ServerDef server_def;
server_def.set_job_name("worker");
server_def.set_task_index(3);
auto job = server_def.mutable_cluster()->add_job();
job->set_name("worker");
job->mutable_tasks()->insert({3, "localhost:3333"});
protobuf::RepeatedPtrField<DeviceAttributes> cluster_device_attributes;
DeviceAttributes* local_cpu = cluster_device_attributes.Add();
local_cpu->set_name("/job:worker/replica:0/task:3/device:fakecpu:0");
DeviceAttributes* remote_cpu = cluster_device_attributes.Add();
remote_cpu->set_name("/job:coordinator/replica:0/task:0/device:fakecpu:0");
std::string session_handle = "test_session_handle";
TF_EXPECT_OK(mgr_.CreateSession(session_handle, server_def,
cluster_device_attributes, true));
std::shared_ptr<WorkerSession> session;
TF_EXPECT_OK(mgr_.WorkerSessionForSession(session_handle, &session));
Device* device;
TF_EXPECT_OK(
session->remote_device_mgr()->LookupDevice(local_cpu->name(), &device));
EXPECT_TRUE(device->IsLocal());
EXPECT_NE(nullptr, session) << "Session for " << session_handle << "was null";
EXPECT_EQ("/job:worker/replica:0/task:3", session->worker_name());
TF_EXPECT_OK(mgr_.DeleteSession(session_handle));
}
TEST_F(SessionMgrTest, CreateSessionDefaultWorkerName) {
ServerDef server_def;
std::string session_handle = "test_session_handle";
TF_EXPECT_OK(mgr_.CreateSession(session_handle, server_def, true));
std::shared_ptr<WorkerSession> session;
TF_EXPECT_OK(mgr_.WorkerSessionForSession(session_handle, &session));
EXPECT_NE(nullptr, session) << "Session for " << session_handle << "was null";
EXPECT_EQ("/job:mnist/replica:0/task:0", session->worker_name());
TF_EXPECT_OK(mgr_.DeleteSession(session_handle));
}
TEST_F(SessionMgrTest, CreateSessionIsolateSessionState) {
ServerDef server_def;
server_def.set_job_name("worker");
server_def.set_task_index(3);
TF_EXPECT_OK(mgr_.CreateSession("handle_1", server_def, false));
std::shared_ptr<WorkerSession> session_1;
TF_EXPECT_OK(mgr_.WorkerSessionForSession("handle_1", &session_1));
std::vector<Device*> devices_1 = session_1->device_mgr()->ListDevices();
EXPECT_EQ(1, devices_1.size());
TF_EXPECT_OK(mgr_.CreateSession("handle_2", server_def, false));
std::shared_ptr<WorkerSession> session_2;
TF_EXPECT_OK(mgr_.WorkerSessionForSession("handle_2", &session_2));
std::vector<Device*> devices_2 = session_2->device_mgr()->ListDevices();
EXPECT_EQ(1, devices_2.size());
TF_EXPECT_OK(mgr_.CreateSession("handle_3", server_def, true));
std::shared_ptr<WorkerSession> session_3;
TF_EXPECT_OK(mgr_.WorkerSessionForSession("handle_3", &session_3));
std::vector<Device*> devices_3 = session_3->device_mgr()->ListDevices();
EXPECT_EQ(1, devices_3.size());
TF_EXPECT_OK(mgr_.CreateSession("handle_4", server_def, true));
std::shared_ptr<WorkerSession> session_4;
TF_EXPECT_OK(mgr_.WorkerSessionForSession("handle_4", &session_4));
std::vector<Device*> devices_4 = session_4->device_mgr()->ListDevices();
EXPECT_EQ(1, devices_4.size());
EXPECT_EQ(devices_1[0]->resource_manager(), devices_2[0]->resource_manager());
EXPECT_NE(devices_1[0]->resource_manager(), devices_3[0]->resource_manager());
EXPECT_NE(devices_1[0]->resource_manager(), devices_4[0]->resource_manager());
EXPECT_NE(devices_3[0]->resource_manager(), devices_4[0]->resource_manager());
}
TEST_F(SessionMgrTest, CreateSessionWithMasterName) {
ServerDef server_def;
server_def.set_job_name("worker");
server_def.set_task_index(3);
auto job = server_def.mutable_cluster()->add_job();
job->set_name("worker");
job->mutable_tasks()->insert({3, "localhost:3333"});
protobuf::RepeatedPtrField<DeviceAttributes> cluster_device_attributes;
const std::string master_name = "/job:master/replica:0/task:1";
const int64_t old_incarnation = random::New64();
const int64_t new_incarnation = random::New64();
std::string sess_handle1 = "test_session_handle_1";
TF_EXPECT_OK(mgr_.CreateSession(sess_handle1, server_def,
cluster_device_attributes, true, master_name,
old_incarnation));
std::string sess_handle2 = "test_session_handle_2";
TF_EXPECT_OK(mgr_.CreateSession(sess_handle2, server_def,
cluster_device_attributes, true, master_name,
old_incarnation));
std::shared_ptr<WorkerSession> session;
TF_EXPECT_OK(mgr_.WorkerSessionForSession(sess_handle1, &session));
EXPECT_NE(nullptr, session) << "Session for " << sess_handle1 << "was null";
TF_EXPECT_OK(mgr_.WorkerSessionForSession(sess_handle2, &session));
EXPECT_NE(nullptr, session) << "Session for " << sess_handle2 << "was null";
std::string sess_handle3 = "test_session_handle_3";
TF_EXPECT_OK(mgr_.CreateSession(sess_handle3, server_def,
cluster_device_attributes, true, master_name,
new_incarnation));
EXPECT_NE(mgr_.WorkerSessionForSession(sess_handle1, &session),
absl::OkStatus())
<< "Session for " << sess_handle1
<< " should have been garbage collected.";
EXPECT_NE(mgr_.WorkerSessionForSession(sess_handle2, &session),
absl::OkStatus())
<< "Session for " << sess_handle2
<< " should have been garbage collected.";
TF_EXPECT_OK(mgr_.WorkerSessionForSession(sess_handle3, &session));
EXPECT_NE(nullptr, session) << "Session for " << sess_handle3 << "was null";
TF_EXPECT_OK(mgr_.DeleteSession(sess_handle2));
TF_EXPECT_OK(mgr_.DeleteSession(sess_handle3));
}
TEST_F(SessionMgrTest, CreateSessionWithoutMasterName) {
ServerDef server_def;
server_def.set_job_name("worker");
server_def.set_task_index(3);
auto job = server_def.mutable_cluster()->add_job();
job->set_name("worker");
job->mutable_tasks()->insert({3, "localhost:3333"});
protobuf::RepeatedPtrField<DeviceAttributes> cluster_device_attributes;
std::string sess_handle1 = "test_session_handle_no_master_1";
TF_EXPECT_OK(mgr_.CreateSession(sess_handle1, server_def,
cluster_device_attributes, true, "", 0));
std::string sess_handle2 = "test_session_handle_no_master_2";
TF_EXPECT_OK(mgr_.CreateSession(sess_handle2, server_def,
cluster_device_attributes, true, "", 0));
std::shared_ptr<WorkerSession> session;
TF_EXPECT_OK(mgr_.WorkerSessionForSession(sess_handle1, &session));
EXPECT_NE(nullptr, session) << "Session for " << sess_handle1 << "was null";
TF_EXPECT_OK(mgr_.WorkerSessionForSession(sess_handle2, &session));
EXPECT_NE(nullptr, session) << "Session for " << sess_handle2 << "was null";
TF_EXPECT_OK(mgr_.DeleteSession(sess_handle1));
TF_EXPECT_OK(mgr_.DeleteSession(sess_handle2));
}
TEST_F(SessionMgrTest, LegacySession) {
std::string session_handle = "";
std::shared_ptr<WorkerSession> session;
TF_EXPECT_OK(mgr_.WorkerSessionForSession(session_handle, &session));
EXPECT_EQ(mgr_.LegacySession(), session);
TF_EXPECT_OK(mgr_.DeleteSession(session_handle));
}
TEST_F(SessionMgrTest, UnknownSessionHandle) {
std::string session_handle = "unknown_session_handle";
std::shared_ptr<WorkerSession> session;
Status s = mgr_.WorkerSessionForSession(session_handle, &session);
EXPECT_TRUE(absl::IsAborted(s));
EXPECT_TRUE(absl::StrContains(s.message(), "Session handle is not found"));
EXPECT_TRUE(s.GetPayload(kWorkerPossiblyRestarted).has_value());
}
TEST_F(SessionMgrTest, WorkerNameFromServerDef) {
ServerDef server_def;
server_def.set_job_name("worker");
server_def.set_task_index(3);
std::string worker_name = SessionMgr::WorkerNameFromServerDef(server_def);
EXPECT_EQ("/job:worker/replica:0/task:3", worker_name);
}
TEST_F(SessionMgrTest, DeleteLegacySession) {
TF_EXPECT_OK(mgr_.DeleteSession(""));
}
} |
1,292 | cpp | tensorflow/tensorflow | device_resolver_distributed | tensorflow/core/distributed_runtime/device_resolver_distributed.cc | tensorflow/core/distributed_runtime/device_resolver_distributed_test.cc | #ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_DEVICE_RESOLVER_DISTRIBUTED_H_
#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_DEVICE_RESOLVER_DISTRIBUTED_H_
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/framework/collective.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
class DeviceMgr;
class WorkerCacheInterface;
class DeviceResolverDistributed : public DeviceResolverInterface {
public:
explicit DeviceResolverDistributed(const DeviceMgr* dev_mgr);
Status GetDeviceAttributes(const string& device,
DeviceAttributes* attributes) override;
Status GetAllDeviceAttributes(
const string& task, std::vector<DeviceAttributes>* attributes) override;
Status UpdateDeviceAttributes(
const std::vector<DeviceAttributes>& attributes) override;
protected:
const string task_name_;
mutex mu_;
absl::flat_hash_map<string, DeviceAttributes> attr_table_ TF_GUARDED_BY(mu_);
};
}
#endif
#include "tensorflow/core/distributed_runtime/device_resolver_distributed.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
DeviceResolverDistributed::DeviceResolverDistributed(const DeviceMgr* dev_mgr) {
mutex_lock l(mu_);
for (Device* device : dev_mgr->ListDevices()) {
attr_table_[device->name()] = device->attributes();
}
}
Status DeviceResolverDistributed::GetDeviceAttributes(
const string& device, DeviceAttributes* attributes) {
mutex_lock l(mu_);
auto it = attr_table_.find(device);
if (it == attr_table_.end()) {
return errors::NotFound(device, " not found");
}
*attributes = it->second;
return absl::OkStatus();
}
Status DeviceResolverDistributed::GetAllDeviceAttributes(
const string& task, std::vector<DeviceAttributes>* attributes) {
mutex_lock l(mu_);
attributes->clear();
for (const auto& it : attr_table_) {
const string& device_name = it.first;
if (DeviceNameUtils::IsSameAddressSpace(task, device_name)) {
attributes->push_back(it.second);
}
}
if (attributes->empty()) {
return errors::NotFound(task, " not found in the cache");
}
return absl::OkStatus();
}
Status DeviceResolverDistributed::UpdateDeviceAttributes(
const std::vector<DeviceAttributes>& attributes) {
mutex_lock l(mu_);
for (const DeviceAttributes& attr : attributes) {
auto item = attr_table_.insert({attr.name(), attr});
auto it = item.first;
bool success = item.second;
if (!success && it->second.incarnation() != attr.incarnation()) {
return errors::FailedPrecondition(
attr.name(),
"exists in cache with a different incarnation. "
"This usually means the remote worker has restarted");
}
}
return absl::OkStatus();
}
} | #include "tensorflow/core/distributed_runtime/device_resolver_distributed.h"
#include "absl/memory/memory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/distributed_runtime/test_utils.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/random.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace {
using ::testing::Property;
using ::testing::UnorderedElementsAre;
std::unique_ptr<Device> NewDevice(const string& type, const string& name) {
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& attr) : Device(nullptr, attr) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes) override { return nullptr; }
};
DeviceAttributes attr;
attr.set_name(name);
attr.set_device_type(type);
attr.set_incarnation(random::New64());
return std::make_unique<FakeDevice>(attr);
}
class DeviceResDistTest : public ::testing::Test {
protected:
void SetUp() override {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(
NewDevice("CPU", "/job:worker/replica:0/task:0/device:CPU:0"));
devices.push_back(
NewDevice("CPU", "/job:worker/replica:0/task:0/device:CPU:1"));
dev_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices));
dev_resolver_ =
std::make_unique<DeviceResolverDistributed>(dev_mgr_.get());
std::vector<DeviceAttributes> attributes;
attributes.push_back(
NewDevice("CPU", "/job:worker/replica:0/task:1/device:CPU:0")
->attributes());
attributes.push_back(
NewDevice("CPU", "/job:worker/replica:0/task:1/device:CPU:1")
->attributes());
TF_ASSERT_OK(dev_resolver_->UpdateDeviceAttributes(attributes));
}
std::unique_ptr<DeviceMgr> dev_mgr_;
std::unique_ptr<DeviceResolverDistributed> dev_resolver_;
};
TEST_F(DeviceResDistTest, GetDeviceAttributesLocal) {
DeviceAttributes attributes;
TF_ASSERT_OK(dev_resolver_->GetDeviceAttributes(
"/job:worker/replica:0/task:0/device:CPU:0", &attributes));
EXPECT_EQ(attributes.name(), "/job:worker/replica:0/task:0/device:CPU:0");
}
TEST_F(DeviceResDistTest, GetDeviceAttributesLocalUnknown) {
DeviceAttributes attributes;
EXPECT_TRUE(errors::IsNotFound(dev_resolver_->GetDeviceAttributes(
"/job:worker/replica:0/task:0/device:CPU:9", &attributes)));
}
TEST_F(DeviceResDistTest, GetAllDeviceAttributes) {
std::vector<DeviceAttributes> attributes;
TF_ASSERT_OK(dev_resolver_->GetAllDeviceAttributes(
"/job:worker/replica:0/task:0", &attributes));
EXPECT_THAT(attributes,
UnorderedElementsAre(
Property(&DeviceAttributes::name,
"/job:worker/replica:0/task:0/device:CPU:0"),
Property(&DeviceAttributes::name,
"/job:worker/replica:0/task:0/device:CPU:1")));
TF_ASSERT_OK(dev_resolver_->GetAllDeviceAttributes(
"/job:worker/replica:0/task:1", &attributes));
EXPECT_THAT(attributes,
UnorderedElementsAre(
Property(&DeviceAttributes::name,
"/job:worker/replica:0/task:1/device:CPU:0"),
Property(&DeviceAttributes::name,
"/job:worker/replica:0/task:1/device:CPU:1")));
}
TEST_F(DeviceResDistTest, GetAllDeviceAttributesUnknown) {
std::vector<DeviceAttributes> attributes;
EXPECT_TRUE(errors::IsNotFound(dev_resolver_->GetAllDeviceAttributes(
"/job:worker/replica:0/task:3", &attributes)));
}
TEST_F(DeviceResDistTest, UpdateDeviceAttributes) {
std::vector<DeviceAttributes> attributes;
attributes.push_back(
NewDevice("CPU", "/job:worker/replica:0/task:2/device:CPU:0")
->attributes());
attributes.push_back(
NewDevice("CPU", "/job:worker/replica:0/task:2/device:CPU:1")
->attributes());
TF_ASSERT_OK(dev_resolver_->UpdateDeviceAttributes(attributes));
TF_ASSERT_OK(dev_resolver_->GetAllDeviceAttributes(
"/job:worker/replica:0/task:2", &attributes));
EXPECT_THAT(attributes,
UnorderedElementsAre(
Property(&DeviceAttributes::name,
"/job:worker/replica:0/task:2/device:CPU:0"),
Property(&DeviceAttributes::name,
"/job:worker/replica:0/task:2/device:CPU:1")));
TF_ASSERT_OK(dev_resolver_->GetAllDeviceAttributes(
"/job:worker/replica:0/task:0", &attributes));
EXPECT_THAT(attributes,
UnorderedElementsAre(
Property(&DeviceAttributes::name,
"/job:worker/replica:0/task:0/device:CPU:0"),
Property(&DeviceAttributes::name,
"/job:worker/replica:0/task:0/device:CPU:1")));
}
TEST_F(DeviceResDistTest, UpdateDeviceAttributesExisting) {
std::vector<DeviceAttributes> attributes;
TF_ASSERT_OK(dev_resolver_->GetAllDeviceAttributes(
"/job:worker/replica:0/task:0", &attributes));
TF_ASSERT_OK(dev_resolver_->UpdateDeviceAttributes(attributes));
}
TEST_F(DeviceResDistTest, UpdateDeviceAttributesDifferentIncarnation) {
std::vector<DeviceAttributes> attributes;
attributes.push_back(
NewDevice("CPU", "/job:worker/replica:0/task:0/device:CPU:0")
->attributes());
attributes.push_back(
NewDevice("CPU", "/job:worker/replica:0/task:0/device:CPU:1")
->attributes());
EXPECT_TRUE(errors::IsFailedPrecondition(
dev_resolver_->UpdateDeviceAttributes(attributes)));
}
}
} |
1,293 | cpp | tensorflow/tensorflow | message_wrappers | tensorflow/core/distributed_runtime/message_wrappers.cc | tensorflow/core/distributed_runtime/message_wrappers_test.cc | #ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_MESSAGE_WRAPPERS_H_
#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_MESSAGE_WRAPPERS_H_
#include "absl/status/status.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/master.pb.h"
#include "tensorflow/core/protobuf/worker.pb.h"
namespace tensorflow {
class RunStepRequestWrapper {
public:
virtual ~RunStepRequestWrapper() {}
virtual const string& session_handle() const = 0;
virtual const string& partial_run_handle() const = 0;
virtual size_t num_feeds() const = 0;
virtual const string& feed_name(size_t i) const = 0;
virtual Status FeedValue(size_t i, Tensor* out_tensor) const = 0;
virtual Status FeedValue(size_t i, TensorProto* out_tensor) const = 0;
virtual size_t num_fetches() const = 0;
virtual const string& fetch_name(size_t i) const = 0;
virtual size_t num_targets() const = 0;
virtual const string& target_name(size_t i) const = 0;
virtual const RunOptions& options() const = 0;
virtual bool store_errors_in_response_body() const = 0;
virtual int64_t request_id() const = 0;
virtual string DebugString() const = 0;
virtual const RunStepRequest& ToProto() const = 0;
};
class MutableRunStepRequestWrapper : public RunStepRequestWrapper {
public:
virtual void set_session_handle(const string& handle) = 0;
virtual void set_partial_run_handle(const string& handle) = 0;
virtual void add_feed(const string& name, const Tensor& value) = 0;
virtual void add_fetch(const string& name) = 0;
virtual void add_target(const string& name) = 0;
virtual RunOptions* mutable_options() = 0;
virtual void set_store_errors_in_response_body(bool store_errors) = 0;
};
class InMemoryRunStepRequest : public MutableRunStepRequestWrapper {
public:
const string& session_handle() const override;
const string& partial_run_handle() const override;
size_t num_feeds() const override;
const string& feed_name(size_t i) const override;
Status FeedValue(size_t i, Tensor* out_tensor) const override;
Status FeedValue(size_t i, TensorProto* out_tensor) const override;
size_t num_fetches() const override;
const string& fetch_name(size_t i) const override;
size_t num_targets() const override;
const string& target_name(size_t i) const override;
const RunOptions& options() const override;
string DebugString() const override;
const RunStepRequest& ToProto() const override;
bool store_errors_in_response_body() const override;
int64_t request_id() const override;
void set_session_handle(const string& handle) override;
void set_partial_run_handle(const string& handle) override;
void add_feed(const string& name, const Tensor& value) override;
void add_fetch(const string& name) override;
void add_target(const string& name) override;
RunOptions* mutable_options() override;
void set_store_errors_in_response_body(bool store_errors) override;
private:
string session_handle_;
string partial_run_handle_;
gtl::InlinedVector<std::pair<string, Tensor>, 4> feeds_;
gtl::InlinedVector<string, 4> fetches_;
gtl::InlinedVector<string, 4> targets_;
RunOptions options_;
bool store_errors_in_response_body_ = false;
mutable std::unique_ptr<RunStepRequest> proto_version_;
};
class MutableProtoRunStepRequest : public MutableRunStepRequestWrapper {
public:
const string& session_handle() const override;
const string& partial_run_handle() const override;
size_t num_feeds() const override;
const string& feed_name(size_t i) const override;
Status FeedValue(size_t i, Tensor* out_tensor) const override;
Status FeedValue(size_t i, TensorProto* out_tensor) const override;
size_t num_fetches() const override;
const string& fetch_name(size_t i) const override;
size_t num_targets() const override;
const string& target_name(size_t i) const override;
const RunOptions& options() const override;
string DebugString() const override;
const RunStepRequest& ToProto() const override;
bool store_errors_in_response_body() const override;
int64_t request_id() const override;
void set_session_handle(const string& handle) override;
void set_partial_run_handle(const string& handle) override;
void add_feed(const string& name, const Tensor& value) override;
void add_fetch(const string& name) override;
void add_target(const string& name) override;
RunOptions* mutable_options() override;
void set_store_errors_in_response_body(bool store_errors) override;
private:
RunStepRequest request_;
friend class MasterInterface;
};
class ProtoRunStepRequest : public RunStepRequestWrapper {
public:
ProtoRunStepRequest(const RunStepRequest* request);
const string& session_handle() const override;
const string& partial_run_handle() const override;
size_t num_feeds() const override;
const string& feed_name(size_t i) const override;
Status FeedValue(size_t i, Tensor* out_tensor) const override;
Status FeedValue(size_t i, TensorProto* out_tensor) const override;
size_t num_fetches() const override;
const string& fetch_name(size_t i) const override;
size_t num_targets() const override;
const string& target_name(size_t i) const override;
const RunOptions& options() const override;
string DebugString() const override;
const RunStepRequest& ToProto() const override;
bool store_errors_in_response_body() const override;
int64_t request_id() const override;
private:
const RunStepRequest* const request_;
};
class RunGraphRequestWrapper {
public:
virtual ~RunGraphRequestWrapper() {}
virtual const string& session_handle() const = 0;
virtual bool create_worker_session_called() const = 0;
virtual const string& graph_handle() const = 0;
virtual int64_t step_id() const = 0;
virtual const ExecutorOpts& exec_opts() const = 0;
virtual size_t num_sends() const = 0;
virtual const string& send_key(size_t i) const = 0;
virtual Status SendValue(size_t i, Tensor* out_tensor) const = 0;
virtual size_t num_recvs() const = 0;
virtual const string& recv_key(size_t i) const = 0;
virtual bool is_partial() const = 0;
virtual bool is_last_partial_run() const = 0;
virtual bool store_errors_in_response_body() const = 0;
virtual int64_t request_id() const = 0;
virtual const RunGraphRequest& ToProto() const = 0;
};
class MutableRunGraphRequestWrapper : public RunGraphRequestWrapper {
public:
virtual void set_session_handle(const string& handle) = 0;
virtual void set_create_worker_session_called(bool called) = 0;
virtual void set_graph_handle(const string& handle) = 0;
virtual void set_step_id(int64_t step_id) = 0;
virtual ExecutorOpts* mutable_exec_opts() = 0;
virtual Status AddSendFromRunStepRequest(
const RunStepRequestWrapper& run_step_request, size_t i,
const string& send_key) = 0;
virtual Status AddSendFromRunCallableRequest(
const RunCallableRequest& run_callable_request, size_t i,
const string& send_key) = 0;
virtual void add_recv_key(const string& recv_key) = 0;
virtual void set_is_partial(bool is_partial) = 0;
virtual void set_is_last_partial_run(bool is_last_partial_run) = 0;
virtual void set_store_errors_in_response_body(bool store_errors) = 0;
virtual void set_request_id(int64_t request_id) = 0;
};
class InMemoryRunGraphRequest : public MutableRunGraphRequestWrapper {
public:
const string& session_handle() const override;
const string& graph_handle() const override;
bool create_worker_session_called() const override;
int64_t step_id() const override;
const ExecutorOpts& exec_opts() const override;
size_t num_sends() const override;
const string& send_key(size_t i) const override;
Status SendValue(size_t i, Tensor* out_tensor) const override;
size_t num_recvs() const override;
const string& recv_key(size_t i) const override;
bool is_partial() const override;
bool is_last_partial_run() const override;
const RunGraphRequest& ToProto() const override;
bool store_errors_in_response_body() const override;
int64_t request_id() const override;
void set_session_handle(const string& handle) override;
void set_create_worker_session_called(bool called) override;
void set_graph_handle(const string& handle) override;
void set_step_id(int64_t step_id) override;
ExecutorOpts* mutable_exec_opts() override;
Status AddSendFromRunStepRequest(
const RunStepRequestWrapper& run_step_request, size_t i,
const string& send_key) override;
Status AddSendFromRunCallableRequest(
const RunCallableRequest& run_callable_request, size_t i,
const string& send_key) override;
void add_recv_key(const string& recv_key) override;
void set_is_partial(bool is_partial) override;
void set_is_last_partial_run(bool is_last_partial_run) override;
void set_store_errors_in_response_body(bool store_errors) override;
void set_request_id(int64_t request_id) override;
private:
string session_handle_;
bool create_worker_session_called_ = false;
string graph_handle_;
int64_t step_id_;
ExecutorOpts exec_opts_;
gtl::InlinedVector<std::pair<string, Tensor>, 4> sends_;
gtl::InlinedVector<string, 4> recvs_;
bool is_partial_ = false;
bool is_last_partial_run_ = false;
bool store_errors_in_response_body_ = false;
int64_t request_id_ = 0;
mutable std::unique_ptr<RunGraphRequest> proto_version_;
};
class MutableProtoRunGraphRequest : public MutableRunGraphRequestWrapper {
public:
const string& session_handle() const override;
bool create_worker_session_called() const override;
const string& graph_handle() const override;
int64_t step_id() const override;
const ExecutorOpts& exec_opts() const override;
size_t num_sends() const override;
const string& send_key(size_t i) const override;
Status SendValue(size_t i, Tensor* out_tensor) const override;
size_t num_recvs() const override;
const string& recv_key(size_t i) const override;
bool is_partial() const override;
bool is_last_partial_run() const override;
bool store_errors_in_response_body() const override;
int64_t request_id() const override;
const RunGraphRequest& ToProto() const override;
void set_session_handle(const string& handle) override;
void set_create_worker_session_called(bool called) override;
void set_graph_handle(const string& handle) override;
void set_step_id(int64_t step_id) override;
ExecutorOpts* mutable_exec_opts() override;
Status AddSendFromRunStepRequest(
const RunStepRequestWrapper& run_step_request, size_t i,
const string& send_key) override;
Status AddSendFromRunCallableRequest(
const RunCallableRequest& run_callable_request, size_t i,
const string& send_key) override;
void add_recv_key(const string& recv_key) override;
void set_is_partial(bool is_partial) override;
void set_is_last_partial_run(bool is_last_partial_run) override;
void set_store_errors_in_response_body(bool store_errors) override;
void set_request_id(int64_t request_id) override;
private:
RunGraphRequest request_;
};
class ProtoRunGraphRequest : public RunGraphRequestWrapper {
public:
ProtoRunGraphRequest(const RunGraphRequest* request);
const string& session_handle() const override;
bool create_worker_session_called() const override;
const string& graph_handle() const override;
int64_t step_id() const override;
const ExecutorOpts& exec_opts() const override;
size_t num_sends() const override;
const string& send_key(size_t i) const override;
Status SendValue(size_t i, Tensor* out_tensor) const override;
size_t num_recvs() const override;
const string& recv_key(size_t i) const override;
bool is_partial() const override;
bool is_last_partial_run() const override;
bool store_errors_in_response_body() const override;
int64_t request_id() const override;
const RunGraphRequest& ToProto() const override;
private:
const RunGraphRequest* const request_;
};
class MutableRunGraphResponseWrapper {
public:
virtual ~MutableRunGraphResponseWrapper() {}
virtual size_t num_recvs() const = 0;
virtual const string& recv_key(size_t i) const = 0;
virtual Status RecvValue(size_t i, TensorProto* out_tensor) = 0;
virtual Status RecvValue(size_t i, Tensor* out_tensor) = 0;
virtual void AddRecv(const string& key, const Tensor& value) = 0;
virtual StepStats* mutable_step_stats() = 0;
virtual CostGraphDef* mutable_cost_graph() = 0;
virtual size_t num_partition_graphs() const = 0;
virtual GraphDef* mutable_partition_graph(size_t i) = 0;
virtual void AddPartitionGraph(const GraphDef& partition_graph) = 0;
virtual Status status() const = 0;
virtual absl::StatusCode status_code() const = 0;
virtual void set_status(const Status& status) = 0;
protected:
virtual RunGraphResponse* get_proto() = 0;
friend class WorkerInterface;
};
class InMemoryRunGraphResponse : public MutableRunGraphResponseWrapper {
public:
size_t num_recvs() const override;
const string& recv_key(size_t i) const override;
Status RecvValue(size_t i, TensorProto* out_tensor) override;
Status RecvValue(size_t i, Tensor* out_tensor) override;
void AddRecv(const string& key, const Tensor& value) override;
StepStats* mutable_step_stats() override;
CostGraphDef* mutable_cost_graph() override;
size_t num_partition_graphs() const override;
GraphDef* mutable_partition_graph(size_t i) override;
void AddPartitionGraph(const GraphDef& partition_graph) override;
Status status() const override;
absl::StatusCode status_code() const override;
void set_status(const Status& status) override;
protected:
RunGraphResponse* get_proto() override;
private:
gtl::InlinedVector<std::pair<string, Tensor>, 4> recvs_;
StepStats step_stats_;
CostGraphDef cost_graph_;
std::vector<GraphDef> partition_graphs_;
Status status_;
};
class OwnedProtoRunGraphResponse : public MutableRunGraphResponseWrapper {
public:
size_t num_recvs() const override;
const string& recv_key(size_t i) const override;
Status RecvValue(size_t i, TensorProto* out_tensor) override;
Status RecvValue(size_t i, Tensor* out_tensor) override;
void AddRecv(const string& key, const Tensor& value) override;
StepStats* mutable_step_stats() override;
CostGraphDef* mutable_cost_graph() override;
size_t num_partition_graphs() const override;
GraphDef* mutable_partition_graph(size_t i) override;
void AddPartitionGraph(const GraphDef& partition_graph) override;
Status status() const override;
absl::StatusCode status_code() const override;
void set_status(const Status& status) override;
protected:
RunGraphResponse* get_proto() override;
private:
RunGraphResponse response_;
};
class NonOwnedProtoRunGraphResponse : public MutableRunGraphResponseWrapper {
public:
NonOwnedProtoRunGraphResponse(RunGraphResponse* response);
size_t num_recvs() const override;
const string& recv_key(size_t i) const override;
Status RecvValue(size_t i, TensorProto* out_tensor) override;
Status RecvValue(size_t i, Tensor* out_tensor) override;
void AddRecv(const string& key, const Tensor& value) override;
StepStats* mutable_step_stats() override;
CostGraphDef* mutable_cost_graph() override;
size_t num_partition_graphs() const override;
GraphDef* mutable_partition_graph(size_t i) override;
void AddPartitionGraph(const GraphDef& partition_graph) override;
Status status() const override;
absl::StatusCode status_code() const override;
void set_status(const Status& status) override;
protected:
RunGraphResponse* get_proto() override;
private:
RunGraphResponse* const response_;
};
class MutableRunStepResponseWrapper {
public:
virtual ~MutableRunStepResponseWrapper();
virtual size_t num_tensors() const = 0;
virtual const string& tensor_name(size_t i) const = 0;
virtual Status TensorValue(size_t i, Tensor* out_tensor) const = 0;
virtual Status AddTensorFromRunGraphResponse(
const string& name, MutableRunGraphResponseWrapper* run_graph_response,
size_t i) = 0;
virtual const RunMetadata& metadata() const = 0;
virtual RunMetadata* mutable_metadata() = 0;
virtual Status status() const = 0;
virtual absl::StatusCode status_code() const = 0;
virtual void set_status(const Status& status) = 0;
protected:
virtual RunStepResponse* get_proto() = 0;
friend class MasterInterface;
};
class InMemoryRunStepResponse : public MutableRunStepResponseWrapper {
public:
size_t num_tensors() const override;
const string& tensor_name(size_t i) const override;
Status TensorValue(size_t i, Tensor* out_tensor) const override;
Status AddTensorFromRunGraphResponse(
const string& name, MutableRunGraphResponseWrapper* run_graph_response,
size_t i) override;
const RunMetadata& metadata() const override;
RunMetadata* mutable_metadata() override;
Status status() const override;
absl::StatusCode status_code() const override;
void set_status(const Status& status) override;
protected:
RunStepResponse* get_proto() override;
private:
gtl::InlinedVector<std::pair<string, Tensor>, 4> tensors_;
RunMetadata metadata_;
Status status_;
};
class OwnedProtoRunStepResponse : public MutableRunStepResponseWrapper {
public:
size_t num_tensors() const override;
const string& tensor_name(size_t i) const override;
Status TensorValue(size_t i, Tensor* out_tensor) const override;
Status AddTensorFromRunGraphResponse(
const string& name, MutableRunGraphResponseWrapper* run_graph_response,
size_t i) override;
const RunMetadata& metadata() const override;
RunMetadata* mutable_metadata() override;
Status status() const override;
absl::StatusCode status_code() const override;
void set_status(const Status& status) override;
protected:
RunStepResponse* get_proto() override;
private:
RunStepResponse response_;
};
class NonOwnedProtoRunStepResponse : public MutableRunStepResponseWrapper {
public:
NonOwnedProtoRunStepResponse(RunStepResponse* response);
size_t num_tensors() const override;
const string& tensor_name(size_t i) const override;
Status TensorValue(size_t i, Tensor* out_tensor) const override;
Status AddTensorFromRunGraphResponse(
const string& name, MutableRunGraphResponseWrapper* run_graph_response,
size_t i) override;
const RunMetadata& metadata() const override;
RunMetadata* mutable_metadata() override;
Status status() const override;
absl::StatusCode status_code() const override;
void set_status(const Status& status) override;
protected:
RunStepResponse* get_proto() override;
private:
RunStepResponse* response_;
};
bool ParseTensorProtoToTensor(const TensorProto& tensor_proto,
Tensor* out_tensor);
}
#endif
#include "tensorflow/core/distributed_runtime/message_wrappers.h"
#include "absl/status/status.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/named_tensor.pb.h"
namespace tensorflow {
bool ParseTensorProtoToTensor(const TensorProto& tensor_proto,
Tensor* out_tensor) {
if (tensor_proto.dtype() > 0 && tensor_proto.dtype() <= DataType_MAX) {
Tensor parsed(tensor_proto.dtype());
if (parsed.FromProto(cpu_allocator(), tensor_proto)) {
*out_tensor = parsed;
return true;
}
}
return false;
}
const string& InMemoryRunStepRequest::session_handle() const {
return session_handle_;
}
void InMemoryRunStepRequest::set_session_handle(const string& handle) {
session_handle_ = handle;
}
const string& InMemoryRunStepRequest::partial_run_handle() const {
return partial_run_handle_;
}
void InMemoryRunStepRequest::set_partial_run_handle(const string& handle) {
partial_run_handle_ = handle;
}
size_t InMemoryRunStepRequest::num_feeds() const { return feeds_.size(); }
const string& InMemoryRunStepRequest::feed_name(size_t i) const {
return feeds_[i].first;
}
Status InMemoryRunStepRequest::FeedValue(size_t i, Tensor* out_tensor) const {
*out_tensor = feeds_[i].second;
return absl::OkStatus();
}
Status InMemoryRunStepRequest::FeedValue(size_t i, | #include "tensorflow/core/distributed_runtime/message_wrappers.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
namespace tensorflow {
namespace {
Tensor TensorA() {
Tensor a_tensor(DT_INT32, TensorShape({2, 2}));
test::FillValues<int32>(&a_tensor, {3, 2, -1, 0});
return a_tensor;
}
Tensor TensorB() {
Tensor b_tensor(DT_INT32, TensorShape({1, 2}));
test::FillValues<int32>(&b_tensor, {1, 2});
return b_tensor;
}
void BuildRunStepRequest(MutableRunStepRequestWrapper* request) {
request->set_session_handle("handle");
request->set_partial_run_handle("partial_handle");
request->add_feed("feed_a:0", TensorA());
request->add_feed("feed_b:0", TensorB());
request->add_fetch("fetch_x:0");
request->add_fetch("fetch_y:0");
request->add_target("target_i");
request->add_target("target_j");
request->mutable_options()->set_timeout_in_ms(37);
}
void CheckRunStepRequest(const RunStepRequestWrapper& request) {
EXPECT_EQ("handle", request.session_handle());
EXPECT_EQ("partial_handle", request.partial_run_handle());
EXPECT_EQ(2, request.num_feeds());
EXPECT_EQ("feed_a:0", request.feed_name(0));
EXPECT_EQ("feed_b:0", request.feed_name(1));
Tensor val;
TF_EXPECT_OK(request.FeedValue(0, &val));
test::ExpectTensorEqual<int32>(TensorA(), val);
TF_EXPECT_OK(request.FeedValue(1, &val));
test::ExpectTensorEqual<int32>(TensorB(), val);
EXPECT_EQ(2, request.num_fetches());
EXPECT_EQ("fetch_x:0", request.fetch_name(0));
EXPECT_EQ("fetch_y:0", request.fetch_name(1));
EXPECT_EQ("target_i", request.target_name(0));
EXPECT_EQ("target_j", request.target_name(1));
EXPECT_EQ(37, request.options().timeout_in_ms());
}
void BuildRunGraphRequest(const RunStepRequestWrapper& run_step_request,
MutableRunGraphRequestWrapper* run_graph_request) {
run_graph_request->set_graph_handle("graph_handle");
run_graph_request->set_step_id(13);
run_graph_request->mutable_exec_opts()->set_record_timeline(true);
TF_EXPECT_OK(run_graph_request->AddSendFromRunStepRequest(run_step_request, 0,
"send_0"));
TF_EXPECT_OK(run_graph_request->AddSendFromRunStepRequest(run_step_request, 1,
"send_1"));
run_graph_request->add_recv_key("recv_2");
run_graph_request->add_recv_key("recv_3");
run_graph_request->set_is_partial(true);
}
void CheckRunGraphRequest(const RunGraphRequestWrapper& request) {
EXPECT_EQ("graph_handle", request.graph_handle());
EXPECT_EQ(13, request.step_id());
EXPECT_FALSE(request.exec_opts().record_costs());
EXPECT_TRUE(request.exec_opts().record_timeline());
EXPECT_FALSE(request.exec_opts().record_partition_graphs());
EXPECT_EQ(2, request.num_sends());
Tensor val;
TF_EXPECT_OK(request.SendValue(0, &val));
test::ExpectTensorEqual<int32>(TensorA(), val);
TF_EXPECT_OK(request.SendValue(1, &val));
test::ExpectTensorEqual<int32>(TensorB(), val);
EXPECT_TRUE(request.is_partial());
EXPECT_FALSE(request.is_last_partial_run());
}
void BuildRunGraphResponse(MutableRunGraphResponseWrapper* run_graph_response) {
run_graph_response->AddRecv("recv_2", TensorA());
run_graph_response->AddRecv("recv_3", TensorB());
run_graph_response->mutable_step_stats()->add_dev_stats()->set_device(
"/cpu:0");
run_graph_response->mutable_cost_graph()->add_node()->set_name("cost_node");
GraphDef graph_def;
graph_def.mutable_versions()->set_producer(1234);
graph_def.mutable_versions()->set_min_consumer(1234);
run_graph_response->AddPartitionGraph(graph_def);
}
void CheckRunGraphResponse(MutableRunGraphResponseWrapper* response) {
ASSERT_EQ(2, response->num_recvs());
EXPECT_EQ("recv_2", response->recv_key(0));
EXPECT_EQ("recv_3", response->recv_key(1));
Tensor val;
TF_EXPECT_OK(response->RecvValue(0, &val));
test::ExpectTensorEqual<int32>(TensorA(), val);
TF_EXPECT_OK(response->RecvValue(1, &val));
test::ExpectTensorEqual<int32>(TensorB(), val);
ASSERT_EQ(1, response->mutable_step_stats()->dev_stats_size());
EXPECT_EQ("/cpu:0", response->mutable_step_stats()->dev_stats(0).device());
ASSERT_EQ(1, response->mutable_cost_graph()->node_size());
EXPECT_EQ("cost_node", response->mutable_cost_graph()->node(0).name());
ASSERT_EQ(1, response->num_partition_graphs());
EXPECT_EQ(1234, response->mutable_partition_graph(0)->versions().producer());
EXPECT_EQ(1234,
response->mutable_partition_graph(0)->versions().min_consumer());
}
void BuildRunStepResponse(MutableRunGraphResponseWrapper* run_graph_response,
MutableRunStepResponseWrapper* run_step_response) {
TF_EXPECT_OK(run_step_response->AddTensorFromRunGraphResponse(
"fetch_x:0", run_graph_response, 0));
TF_EXPECT_OK(run_step_response->AddTensorFromRunGraphResponse(
"fetch_y:0", run_graph_response, 1));
*run_step_response->mutable_metadata()->mutable_step_stats() =
*run_graph_response->mutable_step_stats();
protobuf::RepeatedPtrField<GraphDef>* partition_graph_defs =
run_step_response->mutable_metadata()->mutable_partition_graphs();
for (size_t i = 0; i < run_graph_response->num_partition_graphs(); i++) {
partition_graph_defs->Add()->Swap(
run_graph_response->mutable_partition_graph(i));
}
}
void CheckRunStepResponse(const MutableRunStepResponseWrapper& response) {
ASSERT_EQ(2, response.num_tensors());
EXPECT_EQ("fetch_x:0", response.tensor_name(0));
EXPECT_EQ("fetch_y:0", response.tensor_name(1));
Tensor val;
TF_EXPECT_OK(response.TensorValue(0, &val));
test::ExpectTensorEqual<int32>(TensorA(), val);
TF_EXPECT_OK(response.TensorValue(1, &val));
test::ExpectTensorEqual<int32>(TensorB(), val);
ASSERT_EQ(1, response.metadata().step_stats().dev_stats_size());
EXPECT_EQ("/cpu:0", response.metadata().step_stats().dev_stats(0).device());
ASSERT_EQ(1, response.metadata().partition_graphs_size());
EXPECT_EQ(1234,
response.metadata().partition_graphs(0).versions().producer());
EXPECT_EQ(1234,
response.metadata().partition_graphs(0).versions().min_consumer());
}
TEST(MessageWrappers, RunStepRequest_Basic) {
InMemoryRunStepRequest in_memory_request;
BuildRunStepRequest(&in_memory_request);
CheckRunStepRequest(in_memory_request);
MutableProtoRunStepRequest proto_request;
BuildRunStepRequest(&proto_request);
CheckRunStepRequest(proto_request);
CheckRunStepRequest(ProtoRunStepRequest(&in_memory_request.ToProto()));
CheckRunStepRequest(ProtoRunStepRequest(&proto_request.ToProto()));
}
TEST(MessageWrappers, RunGraphRequest_Basic) {
InMemoryRunStepRequest in_memory_run_step_request;
BuildRunStepRequest(&in_memory_run_step_request);
MutableProtoRunStepRequest mutable_proto_run_step_request;
BuildRunStepRequest(&mutable_proto_run_step_request);
ProtoRunStepRequest proto_run_step_request(
&mutable_proto_run_step_request.ToProto());
{
InMemoryRunGraphRequest request;
BuildRunGraphRequest(in_memory_run_step_request, &request);
CheckRunGraphRequest(request);
CheckRunGraphRequest(ProtoRunGraphRequest(&request.ToProto()));
}
{
InMemoryRunGraphRequest request;
BuildRunGraphRequest(mutable_proto_run_step_request, &request);
CheckRunGraphRequest(request);
CheckRunGraphRequest(ProtoRunGraphRequest(&request.ToProto()));
}
{
InMemoryRunGraphRequest request;
BuildRunGraphRequest(proto_run_step_request, &request);
CheckRunGraphRequest(request);
CheckRunGraphRequest(ProtoRunGraphRequest(&request.ToProto()));
}
{
MutableProtoRunGraphRequest request;
BuildRunGraphRequest(in_memory_run_step_request, &request);
CheckRunGraphRequest(request);
CheckRunGraphRequest(ProtoRunGraphRequest(&request.ToProto()));
}
{
MutableProtoRunGraphRequest request;
BuildRunGraphRequest(mutable_proto_run_step_request, &request);
CheckRunGraphRequest(request);
CheckRunGraphRequest(ProtoRunGraphRequest(&request.ToProto()));
}
{
MutableProtoRunGraphRequest request;
BuildRunGraphRequest(proto_run_step_request, &request);
CheckRunGraphRequest(request);
CheckRunGraphRequest(ProtoRunGraphRequest(&request.ToProto()));
}
}
TEST(MessageWrappers, RunGraphResponse_Basic) {
InMemoryRunGraphResponse in_memory_response;
BuildRunGraphResponse(&in_memory_response);
CheckRunGraphResponse(&in_memory_response);
OwnedProtoRunGraphResponse owned_proto_response;
BuildRunGraphResponse(&owned_proto_response);
CheckRunGraphResponse(&owned_proto_response);
RunGraphResponse response_proto;
NonOwnedProtoRunGraphResponse non_owned_proto_response(&response_proto);
BuildRunGraphResponse(&non_owned_proto_response);
CheckRunGraphResponse(&non_owned_proto_response);
}
TEST(MessageWrappers, RunStepResponse_Basic) {
{
InMemoryRunGraphResponse run_graph_response;
BuildRunGraphResponse(&run_graph_response);
InMemoryRunStepResponse response;
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
InMemoryRunGraphResponse run_graph_response;
BuildRunGraphResponse(&run_graph_response);
OwnedProtoRunStepResponse response;
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
InMemoryRunGraphResponse run_graph_response;
BuildRunGraphResponse(&run_graph_response);
RunStepResponse response_proto;
NonOwnedProtoRunStepResponse response(&response_proto);
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
OwnedProtoRunGraphResponse run_graph_response;
BuildRunGraphResponse(&run_graph_response);
InMemoryRunStepResponse response;
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
OwnedProtoRunGraphResponse run_graph_response;
BuildRunGraphResponse(&run_graph_response);
OwnedProtoRunStepResponse response;
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
OwnedProtoRunGraphResponse run_graph_response;
BuildRunGraphResponse(&run_graph_response);
RunStepResponse response_proto;
NonOwnedProtoRunStepResponse response(&response_proto);
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
RunGraphResponse run_graph_response_proto;
NonOwnedProtoRunGraphResponse run_graph_response(&run_graph_response_proto);
BuildRunGraphResponse(&run_graph_response);
InMemoryRunStepResponse response;
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
RunGraphResponse run_graph_response_proto;
NonOwnedProtoRunGraphResponse run_graph_response(&run_graph_response_proto);
BuildRunGraphResponse(&run_graph_response);
OwnedProtoRunStepResponse response;
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
RunGraphResponse run_graph_response_proto;
NonOwnedProtoRunGraphResponse run_graph_response(&run_graph_response_proto);
BuildRunGraphResponse(&run_graph_response);
RunStepResponse response_proto;
NonOwnedProtoRunStepResponse response(&response_proto);
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
}
}
} |
1,294 | cpp | tensorflow/tensorflow | rpc_collective_executor_mgr | tensorflow/core/distributed_runtime/rpc_collective_executor_mgr.cc | tensorflow/core/distributed_runtime/rpc_collective_executor_mgr_test.cc | #ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_COLLECTIVE_EXECUTOR_MGR_H_
#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_COLLECTIVE_EXECUTOR_MGR_H_
#include "tensorflow/core/common_runtime/collective_executor_mgr.h"
#include "tensorflow/core/framework/collective.h"
namespace tensorflow {
class CollectiveParamResolverDistributed;
class ConfigProto;
class DeviceMgr;
class DeviceResolverDistributed;
class WorkerCacheInterface;
class StepSequenceRequest;
class StepSequenceResponse;
class RpcCollectiveExecutorMgr : public CollectiveExecutorMgr {
public:
RpcCollectiveExecutorMgr(
const ConfigProto& config, const DeviceMgr* dev_mgr,
std::unique_ptr<DeviceResolverDistributed> dev_resolver,
std::unique_ptr<CollectiveParamResolverDistributed> param_resolver,
std::unique_ptr<NcclCommunicatorInterface> nccl_communicator,
WorkerCacheInterface* worker_cache, const string& task_name);
virtual ~RpcCollectiveExecutorMgr();
void GetStepSequenceAsync(const GetStepSequenceRequest* request,
GetStepSequenceResponse* response,
const StatusCallback& done) override;
void RefreshStepIdSequenceAsync(int64_t graph_key,
const StatusCallback& done) override;
int64_t NextStepId(int64_t graph_key) override;
void RetireStepId(int64_t graph_key, int64_t step_id) override;
protected:
virtual CollectiveExecutor* Create(int64_t step_id) override;
WorkerCacheInterface* const worker_cache_;
const string task_name_;
string group_leader_;
friend class RpcCollectiveExecutorMgrTest;
private:
Status UpdateStepSequences(const GetStepSequenceResponse& resp);
struct GraphKeySequence {
explicit GraphKeySequence(int64_t k)
: graph_key_(k), next_step_id_(CollectiveExecutor::kInvalidId) {}
const int64_t graph_key_;
int64_t next_step_id_;
};
mutex sequence_mu_;
gtl::FlatMap<int64_t, GraphKeySequence*> sequence_table_
TF_GUARDED_BY(sequence_mu_);
};
std::unique_ptr<RpcCollectiveExecutorMgr> CreateProdRpcCollectiveExecutorMgr(
const ConfigProto& config, const DeviceMgr* device_mgr,
std::unique_ptr<NcclCommunicatorInterface> nccl_communicator,
WorkerCacheInterface* worker_cache, const string& default_worker_name);
}
#endif
#include "tensorflow/core/distributed_runtime/rpc_collective_executor_mgr.h"
#include "tensorflow/core/common_runtime/base_collective_executor.h"
#include "tensorflow/core/common_runtime/collective_executor_mgr.h"
#include "tensorflow/core/common_runtime/collective_rma_local.h"
#include "tensorflow/core/distributed_runtime/collective_param_resolver_distributed.h"
#include "tensorflow/core/distributed_runtime/collective_rma_distributed.h"
#include "tensorflow/core/distributed_runtime/device_resolver_distributed.h"
#include "tensorflow/core/distributed_runtime/worker_cache.h"
#include "tensorflow/core/lib/random/random.h"
namespace tensorflow {
RpcCollectiveExecutorMgr::RpcCollectiveExecutorMgr(
const ConfigProto& config, const DeviceMgr* dev_mgr,
std::unique_ptr<DeviceResolverDistributed> dev_resolver,
std::unique_ptr<CollectiveParamResolverDistributed> param_resolver,
std::unique_ptr<NcclCommunicatorInterface> nccl_communicator,
WorkerCacheInterface* worker_cache, const string& task_name)
: CollectiveExecutorMgr(config, dev_mgr, std::move(dev_resolver),
std::move(param_resolver),
std::move(nccl_communicator)),
worker_cache_(worker_cache),
task_name_(task_name) {
group_leader_ = (task_name == config.experimental().collective_group_leader())
? ""
: config.experimental().collective_group_leader();
}
RpcCollectiveExecutorMgr::~RpcCollectiveExecutorMgr() {
for (auto it : sequence_table_) {
delete it.second;
}
}
CollectiveExecutor* RpcCollectiveExecutorMgr::Create(int64_t step_id) {
CollectiveRemoteAccessDistributed* rma =
new CollectiveRemoteAccessDistributed(dev_mgr_, dev_resolver_.get(),
work_queue_, worker_cache_, step_id,
task_name_);
return new BaseCollectiveExecutor(this, rma, step_id, dev_mgr_, work_queue_);
}
namespace {
static const int64_t kStepIdMask = (((1uLL << 56) - 1) | (1uLL << 56));
int64_t NewRandomStepId() {
int64_t step_id = random::New64();
step_id &= kStepIdMask;
return step_id;
}
}
void RpcCollectiveExecutorMgr::RefreshStepIdSequenceAsync(
int64_t graph_key, const StatusCallback& done) {
if (group_leader_.empty()) {
mutex_lock l(sequence_mu_);
GraphKeySequence* gks = nullptr;
auto it = sequence_table_.find(graph_key);
if (it == sequence_table_.end()) {
gks = new GraphKeySequence(graph_key);
sequence_table_[graph_key] = gks;
} else {
gks = it->second;
}
gks->next_step_id_ = NewRandomStepId();
done(absl::OkStatus());
} else {
WorkerInterface* wi = worker_cache_->GetOrCreateWorker(group_leader_);
GetStepSequenceRequest* req = new GetStepSequenceRequest;
GetStepSequenceResponse* resp = new GetStepSequenceResponse;
req->add_graph_key(graph_key);
wi->GetStepSequenceAsync(
req, resp, [this, req, resp, done](const Status& s) {
if (!s.ok()) {
LOG(ERROR) << "Bad response [" << s
<< "] from GetStepSequenceAsync call to "
<< group_leader_;
done(s);
} else {
done(UpdateStepSequences(*resp));
}
delete req;
delete resp;
});
}
}
void RpcCollectiveExecutorMgr::GetStepSequenceAsync(
const GetStepSequenceRequest* request, GetStepSequenceResponse* response,
const StatusCallback& done) {
if (!group_leader_.empty()) {
LOG(ERROR) << "GetStepSequence called at non-group-leader";
done(errors::Internal("GetStepSequenceAsync called at non-group-leader"));
} else {
mutex_lock l(sequence_mu_);
for (int64_t graph_key : request->graph_key()) {
auto it = sequence_table_.find(graph_key);
GraphKeySequence* gks = nullptr;
if (it == sequence_table_.end()) {
gks = new GraphKeySequence(graph_key);
gks->next_step_id_ = NewRandomStepId();
sequence_table_[graph_key] = gks;
} else {
gks = it->second;
}
StepSequence* ss = response->add_step_sequence();
ss->set_graph_key(graph_key);
ss->set_next_step_id(gks->next_step_id_);
}
done(absl::OkStatus());
}
}
Status RpcCollectiveExecutorMgr::UpdateStepSequences(
const GetStepSequenceResponse& resp) {
mutex_lock l(sequence_mu_);
for (const StepSequence& ss : resp.step_sequence()) {
GraphKeySequence* gks = nullptr;
auto it = sequence_table_.find(ss.graph_key());
if (it == sequence_table_.end()) {
gks = new GraphKeySequence(ss.graph_key());
sequence_table_[ss.graph_key()] = gks;
} else {
gks = it->second;
}
gks->next_step_id_ = ss.next_step_id();
}
return absl::OkStatus();
}
int64_t RpcCollectiveExecutorMgr::NextStepId(int64_t graph_key) {
mutex_lock l(sequence_mu_);
auto it = sequence_table_.find(graph_key);
if (it != sequence_table_.end()) {
return it->second->next_step_id_;
}
return CollectiveExecutor::kInvalidId;
}
void RpcCollectiveExecutorMgr::RetireStepId(int64_t graph_key,
int64_t step_id) {
mutex_lock l(sequence_mu_);
auto it = sequence_table_.find(graph_key);
if (it != sequence_table_.end()) {
if (step_id == it->second->next_step_id_) {
it->second->next_step_id_ = (it->second->next_step_id_ + 1) & kStepIdMask;
} else {
it->second->next_step_id_ = CollectiveExecutor::kInvalidId;
}
} else {
LOG(ERROR) << "Failed to find graph_key " << graph_key << " to retire.";
}
}
std::unique_ptr<RpcCollectiveExecutorMgr> CreateProdRpcCollectiveExecutorMgr(
const ConfigProto& config, const DeviceMgr* device_mgr,
std::unique_ptr<NcclCommunicatorInterface> nccl_communicator,
WorkerCacheInterface* worker_cache, const string& default_worker_name) {
auto dev_resolver = std::make_unique<DeviceResolverDistributed>(device_mgr);
auto param_resolver = std::make_unique<CollectiveParamResolverDistributed>(
config, device_mgr, dev_resolver.get(), nccl_communicator.get(),
worker_cache, default_worker_name);
return std::make_unique<RpcCollectiveExecutorMgr>(
config, device_mgr, std::move(dev_resolver), std::move(param_resolver),
std::move(nccl_communicator), worker_cache, default_worker_name);
}
} | #include "tensorflow/core/distributed_runtime/rpc_collective_executor_mgr.h"
#include <stdlib.h>
#include <string>
#include <vector>
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/distributed_runtime/collective_param_resolver_distributed.h"
#include "tensorflow/core/distributed_runtime/device_resolver_distributed.h"
#include "tensorflow/core/framework/collective.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/nccl/collective_communicator.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/worker.pb.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
#define NUM_DEVS 3
class RpcCollectiveExecutorMgrTest : public ::testing::Test {
protected:
RpcCollectiveExecutorMgrTest() {
string task_name = "/job:localhost/replica:0/task:0";
SessionOptions options;
options.config.mutable_experimental()->set_collective_group_leader(
task_name);
WorkerCacheInterface* worker_cache = nullptr;
auto* device_count = options.config.mutable_device_count();
device_count->insert({"CPU", NUM_DEVS});
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::AddDevices(options, task_name, &devices));
device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices));
std::unique_ptr<DeviceResolverDistributed> dr(
new DeviceResolverDistributed(device_mgr_.get()));
std::unique_ptr<CollectiveParamResolverDistributed> cpr(
new CollectiveParamResolverDistributed(
options.config, device_mgr_.get(), dr.get(),
nullptr, worker_cache, task_name));
cme_.reset(new RpcCollectiveExecutorMgr(
options.config, device_mgr_.get(), std::move(dr), std::move(cpr),
MaybeCreateNcclCommunicator(options.config), worker_cache, task_name));
}
std::unique_ptr<RpcCollectiveExecutorMgr> cme_;
std::unique_ptr<DeviceMgr> device_mgr_;
};
TEST_F(RpcCollectiveExecutorMgrTest, FindOrCreate) {
CollectiveExecutor::Handle* h =
new CollectiveExecutor::Handle(cme_->FindOrCreate(1), true);
EXPECT_TRUE(h->get());
CollectiveExecutor::Handle* h2 =
new CollectiveExecutor::Handle(cme_->FindOrCreate(1), true);
EXPECT_EQ(h->get(), h2->get());
CollectiveExecutor* ce = h->get();
delete h;
delete h2;
CollectiveExecutor* ce2 = cme_->FindOrCreate(1);
EXPECT_EQ(ce, ce2);
ce2->Unref();
cme_->Cleanup(1);
}
TEST_F(RpcCollectiveExecutorMgrTest, NextStepId) {
int64_t x = cme_->NextStepId(7);
EXPECT_EQ(x, CollectiveExecutor::kInvalidId);
{
Notification note;
Status status;
cme_->RefreshStepIdSequenceAsync(7,
[this, &status, ¬e](const Status& s) {
status = s;
note.Notify();
});
EXPECT_TRUE(status.ok());
}
x = cme_->NextStepId(7);
EXPECT_NE(x, CollectiveExecutor::kInvalidId);
EXPECT_EQ(x, cme_->NextStepId(7));
EXPECT_EQ(x, cme_->NextStepId(7));
cme_->RetireStepId(6, x);
EXPECT_EQ(x, cme_->NextStepId(7));
cme_->RetireStepId(7, x);
int64_t y = cme_->NextStepId(7);
EXPECT_EQ((x + 1) & (((1uLL << 56) - 1) | (1uLL << 56)), y);
{
Notification note;
Status status;
cme_->RefreshStepIdSequenceAsync(7,
[this, &status, ¬e](const Status& s) {
status = s;
note.Notify();
});
note.WaitForNotification();
EXPECT_TRUE(status.ok());
}
int64_t z = cme_->NextStepId(7);
EXPECT_NE(y, z);
EXPECT_GT(llabs(y - z), 3);
}
TEST_F(RpcCollectiveExecutorMgrTest, GetStepSequence) {
int64_t x = cme_->NextStepId(3);
EXPECT_EQ(x, CollectiveExecutor::kInvalidId);
int64_t y = cme_->NextStepId(4);
EXPECT_EQ(y, CollectiveExecutor::kInvalidId);
GetStepSequenceRequest request;
GetStepSequenceResponse response;
request.add_graph_key(3);
request.add_graph_key(4);
{
Notification note;
Status status;
cme_->GetStepSequenceAsync(&request, &response,
[this, &status, ¬e](const Status& s) {
status = s;
note.Notify();
});
note.WaitForNotification();
EXPECT_TRUE(status.ok());
}
ASSERT_EQ(2, response.step_sequence_size());
std::unordered_map<int64_t, int64_t> values;
for (const auto& ss : response.step_sequence()) {
values[ss.graph_key()] = ss.next_step_id();
}
EXPECT_NE(values[3], CollectiveExecutor::kInvalidId);
EXPECT_NE(values[4], CollectiveExecutor::kInvalidId);
response.Clear();
{
Notification note;
Status status;
cme_->GetStepSequenceAsync(&request, &response,
[this, &status, ¬e](const Status& s) {
status = s;
note.Notify();
});
note.WaitForNotification();
EXPECT_TRUE(status.ok());
}
ASSERT_EQ(2, response.step_sequence_size());
for (const auto& ss : response.step_sequence()) {
EXPECT_EQ(values[ss.graph_key()], ss.next_step_id());
}
}
} |
1,295 | cpp | tensorflow/tensorflow | master | tensorflow/core/distributed_runtime/master.cc | tensorflow/core/distributed_runtime/master_test.cc | #ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_MASTER_H_
#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_MASTER_H_
#include <unordered_map>
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/distributed_runtime/call_options.h"
#include "tensorflow/core/distributed_runtime/master_env.h"
#include "tensorflow/core/distributed_runtime/master_session.h"
#include "tensorflow/core/distributed_runtime/recent_request_ids.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/master.pb.h"
#include "tensorflow/core/util/util.h"
namespace tensorflow {
class Master {
public:
explicit Master(MasterEnv* env, double session_gc_seconds);
virtual ~Master();
typedef std::function<void(const Status&)> MyClosure;
void CreateSession(const CreateSessionRequest* req,
CreateSessionResponse* resp, MyClosure done);
void ExtendSession(const ExtendSessionRequest* req,
ExtendSessionResponse* resp, MyClosure done);
void PartialRunSetup(const PartialRunSetupRequest* req,
PartialRunSetupResponse* resp, MyClosure done);
void RunStep(CallOptions* opts, const RunStepRequestWrapper* req,
MutableRunStepResponseWrapper* resp, MyClosure done);
void CloseSession(const CloseSessionRequest* req, CloseSessionResponse* resp,
MyClosure done);
void ListDevices(const ListDevicesRequest* req, ListDevicesResponse* resp,
MyClosure done);
void Reset(const ResetRequest* req, ResetResponse* resp, MyClosure done);
void MakeCallable(const MakeCallableRequest* req, MakeCallableResponse* resp,
MyClosure done);
void RunCallable(CallOptions* opts, const RunCallableRequest* req,
RunCallableResponse* resp, MyClosure done);
void ReleaseCallable(const ReleaseCallableRequest* req,
ReleaseCallableResponse* resp, MyClosure done);
private:
typedef Master ME;
MasterEnv* env_ = nullptr;
mutex mu_;
condition_variable shutdown_cv_;
bool shutdown_ TF_GUARDED_BY(mu_) = false;
Thread* gc_thread_;
std::unordered_map<string, MasterSession*> sessions_ TF_GUARDED_BY(mu_);
MovingAverage last_1000_steps_ TF_GUARDED_BY(mu_);
int64_t step_count_ TF_GUARDED_BY(mu_);
const double session_gc_seconds_;
RecentRequestIds recent_request_ids_;
void CleanupWorkers(const ResetRequest& reset);
void GC();
MasterSession* FindMasterSession(const string& handle);
Master(const Master&) = delete;
void operator=(const Master&) = delete;
};
}
#endif
#include "tensorflow/core/distributed_runtime/master.h"
#include <unordered_set>
#include <vector>
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/distributed_runtime/remote_device.h"
#include "tensorflow/core/distributed_runtime/worker_cache.h"
#include "tensorflow/core/distributed_runtime/worker_interface.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/regexp.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/cluster.pb.h"
#include "tensorflow/core/protobuf/master.pb.h"
#include "tensorflow/core/protobuf/worker.pb.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tsl/protobuf/rpc_options.pb.h"
namespace tensorflow {
namespace {
constexpr char kGrpcPrefixRegex[] = "^grpc.*:
}
Master::Master(MasterEnv* env, double session_gc_seconds)
: env_(env),
last_1000_steps_(1000),
step_count_(0),
session_gc_seconds_(session_gc_seconds),
recent_request_ids_(10000, env_->experimental_num_shards) {
CHECK(!env->local_devices.empty());
DCHECK_GT(env_->experimental_num_shards, 0);
if (session_gc_seconds_ > 0.0) {
gc_thread_ = env_->env->StartThread(ThreadOptions(), "TF_master_GC",
[this]() { GC(); });
} else {
gc_thread_ = nullptr;
}
}
Master::~Master() {
if (gc_thread_) {
mutex_lock l(mu_);
shutdown_ = true;
shutdown_cv_.notify_all();
delete gc_thread_;
}
}
void Master::GC() {
Env* env = Env::Default();
while (true) {
mutex_lock l(mu_);
const int kTimeoutMilliseconds = 10 * 1000;
WaitForMilliseconds(&l, &shutdown_cv_, kTimeoutMilliseconds);
if (shutdown_) {
break;
}
std::vector<string> handles;
const int64_t num_micros =
static_cast<int64_t>(session_gc_seconds_ * 1000000);
for (const auto& entry : sessions_) {
int64_t lat = entry.second->last_access_time_usec();
if (static_cast<int64_t>(env->NowMicros()) - lat > num_micros) {
handles.push_back(entry.first);
auto* sess = entry.second;
SchedClosure([this, sess]() {
LOG(WARNING) << "GC session " << sess->handle() << " after "
<< session_gc_seconds_ << " seconds. "
<< "Note that if you are starting multiple replicas "
<< "on a staggered delay, session_gc_seconds may need "
<< "to be raised.";
sess->GarbageCollect();
});
}
}
for (const auto& handle : handles) sessions_.erase(handle);
}
}
MasterSession* Master::FindMasterSession(const string& handle) {
MasterSession* session = nullptr;
{
mutex_lock l(mu_);
session = gtl::FindPtrOrNull(sessions_, handle);
if (session != nullptr) {
session->Ref();
}
}
return session;
}
class DeviceFinder {
public:
static Status GetRemoteDevices(
const protobuf::RepeatedPtrField<string>& device_filters, MasterEnv* env,
WorkerCacheInterface* worker_cache,
std::vector<std::unique_ptr<Device>>* out_remote) {
DeviceFinder finder(device_filters, env, worker_cache);
finder.Start();
TF_RETURN_IF_ERROR(finder.Wait());
finder.GetRemoteDevices(env->local_devices, out_remote);
return absl::OkStatus();
}
static void GetRemoteWorkers(
const protobuf::RepeatedPtrField<string>& device_filters, MasterEnv* env,
WorkerCacheInterface* worker_cache, std::vector<string>* workers) {
DeviceFinder finder(device_filters, env, worker_cache);
*workers = finder.targets_;
}
private:
explicit DeviceFinder(
const protobuf::RepeatedPtrField<string>& device_filters, MasterEnv* env,
WorkerCacheInterface* worker_cache)
: env_(env), worker_cache_(worker_cache) {
CHECK(worker_cache) << "Worker cache was null!";
auto process_filter = [this](const string& filter) {
DeviceNameUtils::ParsedName parsed;
if (DeviceNameUtils::ParseFullName(filter, &parsed)) {
filters_.push_back(parsed);
} else {
LOG(FATAL) << "Skipping invalid filter: " << filter;
}
};
for (const string& filter : device_filters) {
process_filter(filter);
}
if (filters_.empty()) {
std::vector<string> workers;
worker_cache->ListWorkers(&workers);
std::swap(workers, targets_);
} else {
CHECK_GT(env_->local_devices.size(), 0) << "No local devices provided.";
const string& local_device_name = env_->local_devices[0]->name();
DeviceNameUtils::ParsedName local_parsed_name;
CHECK(DeviceNameUtils::ParseFullName(local_device_name,
&local_parsed_name));
bool all_filters_have_job = true;
std::unordered_set<string> filter_job_names({local_parsed_name.job});
for (const DeviceNameUtils::ParsedName& filter : filters_) {
all_filters_have_job = all_filters_have_job && filter.has_job;
if (filter.has_job) {
filter_job_names.insert(filter.job);
}
}
std::vector<string> workers;
if (all_filters_have_job) {
for (const string& job_name : filter_job_names) {
VLOG(2) << "Selectively listing workers in job: " << job_name;
std::vector<string> workers_in_job;
worker_cache->ListWorkersInJob(job_name, &workers_in_job);
workers.insert(workers.end(), workers_in_job.begin(),
workers_in_job.end());
}
} else {
VLOG(2) << "Listing workers in all jobs because some device "
<< "filter has no job specified. Filters were:";
if (device_filters.empty()) {
VLOG(2) << "- <NO FILTERS>";
} else {
for (const string& filter : device_filters) {
VLOG(2) << "- " << filter;
}
}
worker_cache->ListWorkers(&workers);
}
for (const string& name : workers) {
if (MatchFilters(name) ||
DeviceNameUtils::IsSameAddressSpace(name, local_device_name)) {
targets_.push_back(name);
}
}
}
seen_targets_.assign(targets_.size(), false);
}
~DeviceFinder() {
for (Device* dev : found_) delete dev;
}
void Start() {
LOG(INFO) << "Scanning workers for devices: " << targets_.size()
<< " total workers";
{
mutex_lock l(mu_);
num_pending_ = targets_.size();
if (num_pending_ == 0) {
pending_zero_.notify_all();
}
}
for (size_t i = 0; i < targets_.size(); ++i) {
NewRemoteDevices(
env_->env, worker_cache_, targets_[i],
[this, i](const Status& s, std::vector<Device*>* devices) {
WhenFound(i, s, devices);
});
}
}
const int32 kLoggingPeriodMs = 10 * 1000;
Status Wait() {
mutex_lock l(mu_);
while (num_pending_ != 0) {
pending_zero_.wait_for(l, std::chrono::milliseconds(kLoggingPeriodMs));
if (num_pending_ != 0) {
for (size_t i = 0; i < targets_.size(); ++i) {
if (!seen_targets_[i]) {
LOG(INFO)
<< "CreateSession still waiting for response from worker: "
<< targets_[i];
}
}
}
}
return status_;
}
void GetRemoteDevices(const std::vector<Device*>& local,
std::vector<std::unique_ptr<Device>>* remote) {
std::unordered_set<string> names(local.size());
for (Device* dev : local) names.insert(dev->name());
mutex_lock l(mu_);
for (Device* dev : found_) {
const string& name = dev->name();
if (names.insert(name).second && MatchFilters(name)) {
remote->push_back(std::unique_ptr<Device>(dev));
} else {
delete dev;
}
}
found_.clear();
}
typedef DeviceFinder ME;
const MasterEnv* env_;
WorkerCacheInterface* worker_cache_;
std::vector<DeviceNameUtils::ParsedName> filters_;
mutex mu_;
int num_pending_ TF_GUARDED_BY(mu_);
condition_variable pending_zero_;
std::vector<Device*> found_ TF_GUARDED_BY(mu_);
std::vector<string> targets_;
std::vector<bool> seen_targets_ TF_GUARDED_BY(mu_);
Status status_;
void WhenFound(int target_index, const Status& s,
std::vector<Device*>* devices) {
mutex_lock l(mu_);
seen_targets_[target_index] = true;
if (!s.ok()) {
LOG(ERROR) << "CreateSession failed because worker "
<< targets_[target_index] << " returned error: " << s;
status_.Update(s);
} else {
found_.insert(found_.end(), devices->begin(), devices->end());
devices->clear();
}
--num_pending_;
if (num_pending_ == 0) {
pending_zero_.notify_all();
}
}
bool Intersects(const DeviceNameUtils::ParsedName& x,
const DeviceNameUtils::ParsedName& y) {
return (!x.has_job || !y.has_job || x.job == y.job) &&
(!x.has_replica || !y.has_replica || x.replica == y.replica) &&
(!x.has_task || !y.has_task || x.task == y.task) &&
(!x.has_type || !y.has_type || x.type == y.type) &&
(!x.has_id || !y.has_id || x.id == y.id);
}
bool MatchFilters(const string& name) {
if (filters_.empty()) return true;
DeviceNameUtils::ParsedName x;
if (DeviceNameUtils::ParseFullName(name, &x)) {
for (const auto& filter : filters_) {
if (Intersects(x, filter)) return true;
}
}
return false;
}
DeviceFinder(const DeviceFinder&) = delete;
void operator=(const DeviceFinder&) = delete;
};
void Master::CreateSession(const CreateSessionRequest* req,
CreateSessionResponse* resp, MyClosure done) {
SchedClosure([this, req, resp, done]() {
Status status;
WorkerCacheFactoryOptions worker_cache_factory_options;
auto call_done = gtl::MakeCleanup([&status, &done] { done(status); });
status = ValidateExternalGraphDefSyntax(req->graph_def());
if (!status.ok()) return;
WorkerCacheInterface* worker_cache = nullptr;
std::unique_ptr<WorkerCacheInterface> worker_cache_ptr;
std::unique_ptr<DeviceSet> device_set;
std::unique_ptr<std::vector<std::unique_ptr<Device>>> remote_devices(
new std::vector<std::unique_ptr<Device>>());
const ClusterDef& cluster_def = req->config().cluster_def();
if (!cluster_def.job().empty()) {
worker_cache_factory_options.cluster_def = cluster_def;
string normalized_string(req->target());
RE2::Replace(&normalized_string, kGrpcPrefixRegex, "");
for (auto&& job : cluster_def.job()) {
for (auto&& task : job.tasks()) {
if (task.second == normalized_string) {
if (!worker_cache_factory_options.job_name.empty()) {
status = errors::InvalidArgument(
"Found multiple matching tasks that correspond to "
"to the master. Master target: '",
req->target(),
"'. ClusterDef: ", cluster_def.ShortDebugString());
LOG(ERROR) << status;
return;
}
if (env_->local_devices[0]->parsed_name().job == job.name() &&
env_->local_devices[0]->parsed_name().task == task.first) {
status = errors::InvalidArgument(
"The ClusterSpec names the job and task index to be the same "
"names that were provided when the server booted. This is "
"currently not allowed. Job: ",
job.name(), ", task index: ", task.first);
return;
}
worker_cache_factory_options.job_name = job.name();
worker_cache_factory_options.task_index = task.first;
}
}
}
worker_cache_factory_options.rpc_options = req->config().rpc_options();
status = env_->worker_cache_factory(worker_cache_factory_options,
&worker_cache);
if (!status.ok()) return;
worker_cache_ptr = std::unique_ptr<WorkerCacheInterface>(worker_cache);
status =
DeviceFinder::GetRemoteDevices(req->config().device_filters(), env_,
worker_cache, remote_devices.get());
if (!status.ok()) return;
device_set.reset(new DeviceSet);
for (auto&& d : *remote_devices) {
device_set->AddDevice(d.get());
DeviceNameUtils::ParsedName name = d->parsed_name();
if (name.job == worker_cache_factory_options.job_name &&
name.task == worker_cache_factory_options.task_index &&
name.type == "CPU" && name.id == 0) {
device_set->set_client_device(d.get());
}
}
} else {
worker_cache = env_->worker_cache;
status =
DeviceFinder::GetRemoteDevices(req->config().device_filters(), env_,
worker_cache, remote_devices.get());
if (!status.ok()) return;
device_set = std::make_unique<DeviceSet>();
for (auto&& d : *remote_devices) {
device_set->AddDevice(d.get());
}
int num_local_devices = 0;
for (Device* d : env_->local_devices) {
device_set->AddDevice(d);
if (num_local_devices == 0) {
device_set->set_client_device(d);
}
num_local_devices++;
}
}
CHECK(device_set->client_device()) << "No client device found. Missing "
<< "CPU:0 device?";
SessionOptions options;
options.target = req->target();
options.config = req->config();
options.config.mutable_experimental()
->set_disable_optimize_for_static_graph(true);
std::vector<string> filtered_worker_list;
DeviceFinder::GetRemoteWorkers(req->config().device_filters(), env_,
worker_cache, &filtered_worker_list);
MasterSession* session = env_->master_session_factory(
options, env_, std::move(remote_devices), std::move(worker_cache_ptr),
std::move(device_set), std::move(filtered_worker_list));
GraphDef* gdef =
const_cast<CreateSessionRequest*>(req)->mutable_graph_def();
status = session->Create(std::move(*gdef), cluster_def);
if (!status.ok()) {
session->Close().IgnoreError();
session->Unref();
return;
}
resp->set_session_handle(session->handle());
{
mutex_lock l(mu_);
CHECK(sessions_.insert({session->handle(), session}).second);
}
});
}
void Master::ExtendSession(const ExtendSessionRequest* req,
ExtendSessionResponse* resp, MyClosure done) {
auto session = FindMasterSession(req->session_handle());
if (session == nullptr) {
done(errors::Aborted("Session ", req->session_handle(), " is not found."));
return;
}
SchedClosure([session, req, resp, done]() {
Status status = ValidateExternalGraphDefSyntax(req->graph_def());
if (status.ok()) {
status = session->Extend(req, resp);
}
session->Unref();
done(status);
});
}
void Master::PartialRunSetup(const PartialRunSetupRequest* req,
PartialRunSetupResponse* resp, MyClosure done) {
Status s = recent_request_ids_.TrackUnique(req->request_id(),
"PartialRunSetup (Master)", *req);
if (!s.ok()) {
done(s);
return;
}
auto session = FindMasterSession(req->session_handle());
if (session == nullptr) {
done(errors::Aborted("Session ", req->session_handle(), " is not found."));
return;
}
SchedClosure([session, req, resp, done]() {
Status s = session->PartialRunSetup(req, resp);
session->Unref();
done(s);
});
}
void Master::RunStep(CallOptions* opts, const RunStepRequestWrapper* req,
MutableRunStepResponseWrapper* resp, MyClosure done) {
Status s = recent_request_ids_.TrackUnique(req->request_id(),
"RunStep (Master)", req);
if (!s.ok()) {
done(s);
return;
}
auto start_time = env_->env->NowMicros();
auto session = FindMasterSession(req->session_handle());
if (session == nullptr) {
done(errors::Aborted("Session ", req->session_handle(), " is not found."));
return;
}
SchedClosure([this, start_time, session, opts, req, resp, done]() {
Status status = session->Run(opts, *req, resp);
session->Unref();
uint64 done_time = env_->env->NowMicros();
done(status);
mutex_lock l(mu_);
last_1000_steps_.AddValue((done_time - start_time) / 1e9);
++step_count_;
});
}
void Master::CloseSession(const CloseSessionRequest* req,
CloseSessionResponse* resp, MyClosure done) {
MasterSession* session = nullptr;
{
mu_.lock();
auto iter = sessions_.find(req->session_handle());
if (iter == sessions_.end()) {
mu_.unlock();
done(errors::Aborted(
"Session ", req->session_handle(),
" is not found. Possibly, this master has restarted."));
return;
}
session = iter->second;
sessions_.erase(iter);
mu_.unlock();
}
SchedClosure([session, done]() {
Status s = session->Close();
session->Unref();
done(s);
});
}
void Master::ListDevices(const ListDevicesRequest* req,
ListDevicesResponse* resp, MyClosure done) {
SchedClosure([this, req, resp, done]() {
if (!req->session_handle().empty()) {
auto session = FindMasterSession(req->session_handle());
if (session == nullptr) {
done(errors::InvalidArgument(
"Session ", req->session_handle(),
" is not found. Possibly, this master has restarted."));
return;
}
core::ScopedUnref ref(session);
Status s = session->ListDevices(resp);
done(s);
return;
}
std::vector<std::unique_ptr<Device>> remote_devices;
Status s = DeviceFinder::GetRemoteDevices({}, env_, env_->worker_cache,
&remote_devices);
if (s.ok()) {
for (Device* dev : env_->local_devices) {
*(resp->add_local_device()) = dev->attributes();
}
for (auto&& dev : remote_devices) {
*(resp->add_remote_device()) = dev->attributes();
}
}
done(s);
});
}
void Master::CleanupWorkers(const ResetRequest& reset) {
std::vector<string> worker_names;
DeviceFinder::GetRemoteWorkers(reset.device_filters(), env_,
env_->worker_cache, &worker_names);
if (!worker_names.empty()) {
const int num_workers = worker_names.size();
std::vector<Notification> n(num_workers);
CleanupAllRequest req;
(*req.mutable_container()) = reset.container();
std::vector<CleanupAllResponse> resp(num_workers);
int c = 0;
for (int i = 0; i < num_workers; ++i) {
const string& worker_name = worker_names[i];
auto worker = env_->worker_cache->GetOrCreateWorker(worker_name);
if (worker) {
worker->CleanupAllAsync(
&req, &resp[i], [this, &n, worker_name, worker, c](Status s) {
if (!s.ok()) {
LOG(ERROR) << "Worker CleanupAll failed: " << s;
}
env_->worker_cache->ReleaseWorker(worker_name, worker);
n[c].Notify();
});
} else {
n[c].Notify();
}
++c;
}
for (size_t i = 0; i < n.size(); ++i) {
n[i].WaitForNotification();
}
}
}
void Master::Reset(const ResetRequest* req, ResetResponse* resp,
MyClosure done) {
std::vector<MasterSession*> sessions_to_close;
{
mutex_lock l(mu_);
for (const auto& entry : sessions_) {
sessions_to_close.push_back(entry.second);
}
sessions_.clear();
}
CleanupWorkers(*req);
SchedClosure([sessions_to_close, done]() {
Status s;
for (MasterSession* session : sessions_to_close) {
s.Update(session->Close());
session->Unref();
}
done(s);
});
}
void Master::MakeCallable(const MakeCallableRequest* req,
MakeCallableResponse* resp, MyClosure done) {
Status s = recent_request_ids_.TrackUnique(req->request_id(),
"MakeCallable (Master)", *req);
if (!s.ok()) {
done(s);
return;
}
auto session = FindMasterSession(req->session_handle());
if (session == nullptr) {
done(errors::Aborted("Session ", req->session_handle(), " is not found."));
return;
}
SchedClosure([session, req, resp, done = std::move(done)]() {
Status s = session->MakeCallable(*req, resp);
session->Unref();
done(s);
});
}
void Master::RunCallable(CallOptions* opts, const RunCallableRequest* req,
RunCallableResponse* resp, MyClosure done) {
Status s = recent_request_ids_.TrackUnique(req->request_id(),
"RunCallable (Master)", *req);
if (!s.ok()) {
done(s);
return;
}
auto session = FindMasterSession(req->session_handle());
if (session == nullptr) {
done(errors::Aborted("Session ", req->session_handle(), " is not found."));
return;
}
SchedClosure([session, opts, req, resp, done = std::move(done)]() {
Status s = session->RunCallable(opts, *req, resp);
session->Unref();
done(s);
});
}
void Master::ReleaseCallable(const ReleaseCallableRequest* req,
ReleaseCallableResponse* resp, MyClosure done) {
auto session = FindMasterSession(req->session_handle());
if (session == nullptr) {
done(errors::Aborted("Session ", req->session_handle(), " is not found."));
return;
}
SchedClosure([session, req, resp, done = std::move(done)]() {
Status s = session->ReleaseCallable(*req, resp);
session->Unref();
done(s);
});
}
} | #include "tensorflow/core/distributed_runtime/master.h"
#include <map>
#include <memory>
#include "grpcpp/grpcpp.h"
#include "Eigen/Core"
#include "tensorflow/core/distributed_runtime/rpc/grpc_channel.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_master_service_impl.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_testlib.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_util.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/master.pb.h"
namespace tensorflow {
class MasterTest : public ::testing::Test {
protected:
MasterTest() {
std::vector<string> targets;
SessionOptions options;
(*options.config.mutable_device_count())["CPU"] = 1;
(*options.config.mutable_device_count())["GPU"] = 0;
TF_CHECK_OK(test::TestCluster::MakeTestCluster(
test::TestClusterConfig().Options(options).Jobs(
{test::TestJob{"localhost", 2}}),
&cluster_));
SharedGrpcChannelPtr channel_ptr;
TF_CHECK_OK(NewHostPortGrpcChannel(
cluster_->targets()[0], &options.config.rpc_options(), &channel_ptr));
master_ = grpc::MasterService::NewStub(channel_ptr);
}
std::unique_ptr<test::TestCluster> cluster_;
std::unique_ptr<grpc::MasterService::Stub> master_;
Status CreateSession(const GraphDef& def, string* handle,
int64_t* initial_version) {
::grpc::ClientContext ctx;
CreateSessionRequest req;
*(req.mutable_graph_def()) = def;
req.mutable_config()->set_placement_period(1);
CreateSessionResponse resp;
const Status s = FromGrpcStatus(master_->CreateSession(&ctx, req, &resp));
if (s.ok()) {
*handle = resp.session_handle();
*initial_version = resp.graph_version();
}
return s;
}
Status ExtendSession(const string& handle, const GraphDef& def,
int64_t current_version, int64_t* new_version) {
::grpc::ClientContext ctx;
ExtendSessionRequest req;
req.set_session_handle(handle);
*(req.mutable_graph_def()) = def;
req.set_current_graph_version(current_version);
ExtendSessionResponse resp;
const Status s = FromGrpcStatus(master_->ExtendSession(&ctx, req, &resp));
if (s.ok()) {
*new_version = resp.new_graph_version();
}
return s;
}
Status RunStep(const string& handle,
const std::vector<std::pair<string, const Tensor*> >& feed,
const std::map<string, Tensor*>& fetch) {
::grpc::ClientContext ctx;
RunStepRequest req;
req.set_session_handle(handle);
for (const auto& p : feed) {
const string& feed_name = p.first;
const Tensor* feed_tensor = p.second;
auto f = req.add_feed();
f->set_name(feed_name);
feed_tensor->AsProtoTensorContent(f->mutable_tensor());
}
for (const auto& p : fetch) {
const string& fetch_name = p.first;
req.add_fetch(fetch_name);
}
RunStepResponse resp;
const Status s = FromGrpcStatus(master_->RunStep(&ctx, req, &resp));
if (s.ok()) {
for (const auto& fetch_resp : resp.tensor()) {
auto it = fetch.find(fetch_resp.name());
CHECK(it != fetch.end());
CHECK(it->second->FromProto(fetch_resp.tensor()));
}
}
return s;
}
Status CloseSession(const string& handle) {
::grpc::ClientContext ctx;
CloseSessionRequest req;
req.set_session_handle(handle);
CloseSessionResponse resp;
return FromGrpcStatus(master_->CloseSession(&ctx, req, &resp));
}
Status Reset() {
::grpc::ClientContext ctx;
ResetRequest req;
ResetResponse resp;
return FromGrpcStatus(master_->Reset(&ctx, req, &resp));
}
};
TEST_F(MasterTest, CreateClose) {
GraphDef def;
string handle;
int64_t initial_version;
TF_ASSERT_OK(CreateSession(def, &handle, &initial_version));
EXPECT_TRUE(errors::IsAborted(CloseSession("randombits")));
EXPECT_TRUE(CloseSession(handle).ok());
}
TEST_F(MasterTest, ListDevices) {
::grpc::ClientContext ctx;
ListDevicesRequest req;
ListDevicesResponse resp;
const Status s = FromGrpcStatus(master_->ListDevices(&ctx, req, &resp));
TF_EXPECT_OK(s);
EXPECT_EQ(1, resp.local_device_size());
EXPECT_EQ("CPU", resp.local_device(0).device_type());
}
TEST_F(MasterTest, Reset) {
GraphDef def;
string s1, s2;
int64_t initial_version1, initial_version2;
TF_ASSERT_OK(CreateSession(def, &s1, &initial_version1));
TF_ASSERT_OK(CreateSession(def, &s2, &initial_version2));
EXPECT_TRUE(Reset().ok());
EXPECT_TRUE(errors::IsAborted(CloseSession(s1)));
EXPECT_TRUE(errors::IsAborted(CloseSession(s2)));
}
TEST_F(MasterTest, Extend) {
GraphDef def_0;
string handle;
int64_t initial_version;
TF_ASSERT_OK(CreateSession(def_0, &handle, &initial_version));
Tensor A_expected(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&A_expected, {3.0, 2.0, -1.0, 0.0});
Tensor x_expected(DT_FLOAT, TensorShape({2, 1}));
test::FillValues<float>(&x_expected, {2.0, 2.0});
Graph graph_1(OpRegistry::Global());
test::graph::Constant(&graph_1, A_expected, "A");
GraphDef def_1;
test::graph::ToGraphDef(&graph_1, &def_1);
int64_t version_1;
TF_ASSERT_OK(ExtendSession(handle, def_1, initial_version, &version_1));
EXPECT_GT(version_1, initial_version);
Tensor A(DT_FLOAT, TensorShape({2, 2}));
TF_ASSERT_OK(RunStep(handle, {}, {{"A:0", &A}}));
test::ExpectTensorEqual<float>(A, A_expected);
Graph graph_2(OpRegistry::Global());
test::graph::Constant(&graph_2, x_expected, "x");
GraphDef def_2;
test::graph::ToGraphDef(&graph_2, &def_2);
int64_t version_2;
EXPECT_TRUE(errors::IsAborted(
ExtendSession("randombits", def_2, version_1, &version_2)));
TF_ASSERT_OK(ExtendSession(handle, def_2, version_1, &version_2));
EXPECT_GT(version_2, version_1);
Tensor x(DT_FLOAT, TensorShape({2, 1}));
TF_ASSERT_OK(RunStep(handle, {}, {{"A:0", &A}, {"x:0", &x}}));
test::ExpectTensorEqual<float>(A, A_expected);
test::ExpectTensorEqual<float>(x, x_expected);
TF_ASSERT_OK(CloseSession(handle));
}
TEST_F(MasterTest, ExtendUpdateStatefulFails) {
GraphDef def_0;
string handle;
int64_t initial_version;
TF_ASSERT_OK(CreateSession(def_0, &handle, &initial_version));
Graph graph_1(OpRegistry::Global());
test::graph::Var(&graph_1, DT_FLOAT, TensorShape({512}));
GraphDef def_1;
test::graph::ToGraphDef(&graph_1, &def_1);
int64_t version_1, version_2;
TF_ASSERT_OK(ExtendSession(handle, def_1, initial_version, &version_1));
EXPECT_GT(version_1, initial_version);
EXPECT_TRUE(errors::IsInvalidArgument(
ExtendSession(handle, def_1, version_1, &version_2)));
TF_ASSERT_OK(CloseSession(handle));
}
TEST_F(MasterTest, ExtendTwiceFails) {
GraphDef def_0;
string handle;
int64_t initial_version;
TF_ASSERT_OK(CreateSession(def_0, &handle, &initial_version));
Graph graph_1(OpRegistry::Global());
test::graph::Var(&graph_1, DT_FLOAT, TensorShape({512}));
GraphDef def_1;
test::graph::ToGraphDef(&graph_1, &def_1);
int64_t version_1;
TF_ASSERT_OK(ExtendSession(handle, def_1, initial_version, &version_1));
EXPECT_GT(version_1, initial_version);
EXPECT_TRUE(errors::IsAborted(
ExtendSession(handle, def_1, initial_version, &version_1)));
TF_ASSERT_OK(CloseSession(handle));
}
TEST_F(MasterTest, ConcurrentExtendOnlyOneSucceeds) {
GraphDef def_0;
string handle;
int64_t initial_version;
TF_ASSERT_OK(CreateSession(def_0, &handle, &initial_version));
Graph graph_1(OpRegistry::Global());
test::graph::Var(&graph_1, DT_FLOAT, TensorShape({512}));
GraphDef def_1;
test::graph::ToGraphDef(&graph_1, &def_1);
Notification n;
mutex mu;
int succeeded = 0;
int failed = 0;
auto extend_fn = [this, handle, def_1, initial_version, &n, &mu, &succeeded,
&failed]() {
n.WaitForNotification();
int64_t new_version;
Status s = ExtendSession(handle, def_1, initial_version, &new_version);
EXPECT_TRUE(s.ok() || errors::IsAborted(s));
{
mutex_lock l(mu);
if (s.ok()) {
++succeeded;
} else {
++failed;
}
}
};
{
thread::ThreadPool thread_pool(Env::Default(), "extend_pool", 100);
for (int i = 0; i < 100; ++i) {
thread_pool.Schedule(extend_fn);
}
n.Notify();
}
EXPECT_EQ(failed, 99);
EXPECT_EQ(succeeded, 1);
TF_ASSERT_OK(CloseSession(handle));
}
TEST_F(MasterTest, ConcurrentExtendAndRun) {
Graph graph_0(OpRegistry::Global());
Tensor a_tensor(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&a_tensor, {3, 2, -1, 0});
test::graph::Constant(&graph_0, a_tensor, "A");
GraphDef def_0;
test::graph::ToGraphDef(&graph_0, &def_0);
string handle;
int64_t initial_version;
TF_ASSERT_OK(CreateSession(def_0, &handle, &initial_version));
Graph graph_1(OpRegistry::Global());
Tensor b_tensor(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&b_tensor, {1, 0, 0, 1});
test::graph::Constant(&graph_1, b_tensor, "B");
GraphDef def_1;
test::graph::ToGraphDef(&graph_1, &def_1);
Notification extend_done;
Notification extend_can_start;
auto get_a_fn = [this, handle, &extend_done]() {
Tensor A(DT_FLOAT, TensorShape({2, 2}));
while (!extend_done.HasBeenNotified()) {
TF_ASSERT_OK(RunStep(handle, {}, {{"A:0", &A}}));
}
TF_ASSERT_OK(RunStep(handle, {}, {{"A:0", &A}}));
};
auto get_a_and_b_fn = [this, handle, &extend_done, &extend_can_start]() {
Tensor A(DT_FLOAT, TensorShape({2, 2}));
Tensor B(DT_FLOAT, TensorShape({2, 2}));
EXPECT_TRUE(
errors::IsNotFound(RunStep(handle, {}, {{"A:0", &A}, {"B:0", &B}})));
extend_can_start.Notify();
while (!extend_done.HasBeenNotified()) {
Status s = RunStep(handle, {}, {{"A:0", &A}, {"B:0", &B}});
EXPECT_TRUE(errors::IsNotFound(s) || s.ok());
}
TF_ASSERT_OK(RunStep(handle, {}, {{"A:0", &A}, {"B:0", &B}}));
};
auto extend_fn = [this, handle, def_1, initial_version, &extend_done,
&extend_can_start]() {
extend_can_start.WaitForNotification();
int64_t version_1;
TF_ASSERT_OK(ExtendSession(handle, def_1, initial_version, &version_1));
extend_done.Notify();
};
{
thread::ThreadPool thread_pool(Env::Default(), "extend_pool", 3);
thread_pool.Schedule(get_a_fn);
thread_pool.Schedule(get_a_and_b_fn);
thread_pool.Schedule(extend_fn);
}
TF_ASSERT_OK(CloseSession(handle));
}
TEST_F(MasterTest, EigenProblem) {
Graph graph(OpRegistry::Global());
Tensor a_tensor(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&a_tensor, {3, 2, -1, 0});
Node* a_node = test::graph::Constant(&graph, a_tensor);
Tensor x_tensor(DT_FLOAT, TensorShape({2, 1}));
test::FillValues<float>(&x_tensor, {0, 0});
Node* x_node = test::graph::Constant(&graph, x_tensor);
Node* y_node = test::graph::Matmul(&graph, a_node, x_node, false, false);
GraphDef def;
test::graph::ToGraphDef(&graph, &def);
string handle;
int64_t initial_version;
TF_CHECK_OK(CreateSession(def, &handle, &initial_version));
const Eigen::array<Eigen::DenseIndex, 1> sum_along_dim{0};
const Eigen::array<Eigen::DenseIndex, 2> matrix_transpose{1, 0};
Tensor x(DT_FLOAT, TensorShape({2, 1}));
Tensor y(DT_FLOAT, TensorShape({2, 1}));
Eigen::Tensor<float, 1, Eigen::RowMajor> y_square_sum;
Eigen::Tensor<float, 2, Eigen::RowMajor> y_normalized(2, 1);
y_normalized.setRandom();
Eigen::Tensor<float, 1, Eigen::RowMajor> error_square_sum;
float lambda;
bool converged = false;
while (!converged) {
auto x_matrix = x.matrix<float>();
x_matrix = y_normalized;
TF_EXPECT_OK(
RunStep(handle, {{x_node->name(), &x}}, {{y_node->name() + ":0", &y}}));
auto y_matrix = y.matrix<float>();
{
lambda = y_matrix(0, 0) / x_matrix(0, 0);
y_square_sum = y.matrix<float>().square().sum(sum_along_dim);
const float norm = static_cast<float>(sqrt(y_square_sum(0)));
y_normalized = y_matrix * (1 / norm);
error_square_sum = (x_matrix - y_normalized).square().sum(sum_along_dim);
VLOG(1) << "x = [" << x_matrix.shuffle(matrix_transpose) << "] y = ["
<< y_matrix.shuffle(matrix_transpose) << "] lambda = " << lambda;
converged = sqrt(error_square_sum(0)) < 1e-10;
}
}
EXPECT_NEAR(lambda, 2.0, 0.01);
TF_EXPECT_OK(CloseSession(handle));
}
} |
1,296 | cpp | tensorflow/tensorflow | collective_param_resolver_distributed | tensorflow/core/distributed_runtime/collective_param_resolver_distributed.cc | tensorflow/core/distributed_runtime/collective_param_resolver_distributed_test.cc | #ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_COLLECTIVE_PARAM_RESOLVER_DISTRIBUTED_H_
#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_COLLECTIVE_PARAM_RESOLVER_DISTRIBUTED_H_
#include "tensorflow/core/common_runtime/collective_param_resolver_local.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
class ConfigProto;
class WorkerCacheInterface;
class DeviceResolverDistributed;
class DeviceMgr;
class CollectiveParamResolverDistributed : public CollectiveParamResolverLocal {
public:
CollectiveParamResolverDistributed(
const ConfigProto& config, const DeviceMgr* dev_mgr,
DeviceResolverDistributed* dev_resolver,
NcclCommunicatorInterface* nccl_communicator,
WorkerCacheInterface* worker_cache, const string& task_name);
void CompleteParamsAsync(const DeviceAttributes& device, CollectiveParams* cp,
CancellationManager* cancel_mgr,
const StatusCallback& done) override;
void CompleteGroupAsync(const DeviceAttributes& device,
CollGroupParams* group_params,
CancellationManager* cancel_mgr,
const StatusCallback& done) override;
void CompleteInstanceAsync(const CompleteInstanceRequest* request,
CompleteInstanceResponse* response,
CancellationManager* cancel_mgr,
const StatusCallback& done) override;
void StartAbort(const Status& s) override;
protected:
GroupRec* GetCachedGroup(int32_t group_key) TF_LOCKS_EXCLUDED(group_mu_);
Status UpdateGroupCache(const CompleteGroupResponse& resp)
TF_LOCKS_EXCLUDED(group_mu_);
void CompleteGroupDistributed(const DeviceAttributes& device,
CollGroupParams* group_params,
CancellationManager* cancel_mgr,
const StatusCallback& done);
bool InstanceIsCached(int32_t group_key, const CollInstanceParams& instance)
TF_LOCKS_EXCLUDED(instance_mu_);
Status UpdateInstanceCache(CollectiveParams* cp,
const CompleteInstanceResponse& resp)
TF_LOCKS_EXCLUDED(instance_mu_, group_mu_);
void CompleteInstanceDistributed(const string& device, CollectiveParams* cp,
CancellationManager* cancel_mgr,
const StatusCallback& done)
TF_LOCKS_EXCLUDED(instance_mu_, group_mu_);
WorkerCacheInterface* worker_cache_;
const string group_leader_;
CancellationManager abortion_cancel_mgr_;
};
}
#endif
#include "tensorflow/core/distributed_runtime/collective_param_resolver_distributed.h"
#include "absl/strings/escaping.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/distributed_runtime/cancellable_call.h"
#include "tensorflow/core/distributed_runtime/device_resolver_distributed.h"
#include "tensorflow/core/distributed_runtime/worker_cache.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace {
class CompleteGroupCall : public CancellableCall {
public:
CompleteGroupCall(const CollGroupParams& group,
const DeviceAttributes& device,
CancellationManager* cancel_mgr,
const string& remote_worker, WorkerCacheInterface* wc)
: CancellableCall(cancel_mgr, remote_worker, wc) {
req_.set_group_key(group.group_key);
req_.set_group_size(group.group_size);
req_.set_device_type(group.device_type.type_string());
*req_.mutable_device_attributes() = device;
}
~CompleteGroupCall() override {}
void IssueCall(const StatusCallback& done) override {
wi_->CompleteGroupAsync(&opts_, &req_, &resp_, done);
}
CompleteGroupRequest req_;
CompleteGroupResponse resp_;
};
class CompleteInstanceCall : public CancellableCall {
public:
CompleteInstanceCall(const CollGroupParams& group,
const CollInstanceParams& instance,
const string& node_name, const string& device_name,
bool is_source, CancellationManager* cancel_mgr,
const string& remote_worker, WorkerCacheInterface* wc)
: CancellableCall(cancel_mgr, remote_worker, wc) {
req_.set_name(node_name);
req_.set_type(instance.type);
req_.set_step_id(instance.step_id);
req_.set_data_type(instance.data_type);
instance.shape.AsProto(req_.mutable_shape());
req_.set_group_key(group.group_key);
req_.set_group_size(group.group_size);
req_.set_instance_key(instance.instance_key);
req_.set_device_type(group.device_type.type_string());
for (int32_t offset : instance.impl_details.subdiv_offsets) {
req_.add_subdiv_offset(offset);
}
req_.set_device(device_name);
req_.set_is_source(is_source);
}
~CompleteInstanceCall() override {}
void IssueCall(const StatusCallback& done) override {
wi_->CompleteInstanceAsync(&opts_, &req_, &resp_, done);
}
CompleteInstanceRequest req_;
CompleteInstanceResponse resp_;
};
}
CollectiveParamResolverDistributed::CollectiveParamResolverDistributed(
const ConfigProto& config, const DeviceMgr* dev_mgr,
DeviceResolverDistributed* dev_resolver,
NcclCommunicatorInterface* nccl_communicator,
WorkerCacheInterface* worker_cache, const string& task_name)
: CollectiveParamResolverLocal(config, dev_mgr, dev_resolver,
nccl_communicator, task_name),
worker_cache_(worker_cache),
group_leader_(task_name == config.experimental().collective_group_leader()
? ""
: config.experimental().collective_group_leader()) {
VLOG(1) << "CompleteParamResolverDistributed ctor task={" << task_name
<< "} config.collective_group_leader={"
<< config.experimental().collective_group_leader() << "}"
<< " config.collective_nccl={"
<< config.experimental().collective_nccl() << "}";
}
void CollectiveParamResolverDistributed::CompleteParamsAsync(
const DeviceAttributes& device, CollectiveParams* cp,
CancellationManager* cancel_mgr, const StatusCallback& done) {
VLOG(1) << "CompleteParams distributed " << device.name() << " for " << cp
<< ": " << cp->ToString();
if (cp->run_group_initialization) {
CompleteGroupDistributed(
device, &cp->group, cancel_mgr,
[this, device, cp, cancel_mgr, done](Status s) {
if (s.ok()) {
std::vector<DeviceAttributes> devices;
devices.reserve(cp->group.group_size);
for (const CollGroupMember& m : cp->group.members) {
devices.push_back(m.device);
}
s = dev_resolver_->UpdateDeviceAttributes(devices);
}
if (s.ok()) {
CompleteInstanceDistributed(device.name(), cp, cancel_mgr, done);
} else {
done(s);
}
});
} else {
auto s = LookupGroup(cp->group.group_key, &cp->group);
if (s.ok()) {
CompleteInstanceDistributed(device.name(), cp, cancel_mgr, done);
} else {
done(s);
}
}
}
void CollectiveParamResolverDistributed::CompleteGroupAsync(
const DeviceAttributes& device, CollGroupParams* group_params,
CancellationManager* cancel_mgr, const StatusCallback& done) {
CompleteGroupDistributed(device, group_params, cancel_mgr, done);
}
void CollectiveParamResolverDistributed::CompleteInstanceAsync(
const CompleteInstanceRequest* request, CompleteInstanceResponse* response,
CancellationManager* cancel_mgr, const StatusCallback& done) {
GroupRec* gr = GetCachedGroup(request->group_key());
if (gr == nullptr) {
done(errors::FailedPrecondition(
"group ", request->group_key(),
" not found. This normally means the server has restarted"));
return;
}
CollectiveParams* cp = new CollectiveParams;
{
mutex_lock l(gr->mu);
if (!gr->status.ok()) {
done(gr->status);
return;
} else if (gr->group.members.size() != gr->group.group_size) {
done(errors::FailedPrecondition(
"group ", request->group_key(),
" failed to resolve. This normally means the server has restarted"));
return;
}
cp->group = gr->group;
}
cp->name = request->name();
cp->instance.type = CollectiveType(request->type());
cp->instance.instance_key = request->instance_key();
cp->instance.step_id = request->step_id();
cp->instance.data_type = request->data_type();
cp->instance.shape = TensorShape(request->shape());
cp->is_source = request->is_source();
for (int32_t offset : request->subdiv_offset()) {
cp->instance.impl_details.subdiv_offsets.push_back(offset);
}
StatusCallback done_and_cleanup = [cp, done](const Status& s) {
done(s);
cp->Unref();
};
CompleteInstanceDistributed(
request->device(), cp, cancel_mgr,
[this, cp, response, done_and_cleanup](Status status) {
if (status.ok()) {
bool created_irec;
InstanceRec* ir = GetOrCreateInstanceRec(cp, &created_irec);
{
mutex_lock l(ir->mu);
status = ir->status;
if (ir->status.ok()) {
response->set_instance_key(cp->instance.instance_key);
response->set_source_rank(ir->source_rank);
}
}
}
done_and_cleanup(status);
});
}
CollectiveParamResolverDistributed::GroupRec*
CollectiveParamResolverDistributed::GetCachedGroup(int32_t group_key) {
mutex_lock l(group_mu_);
auto it = group_table_.find(group_key);
if (it == group_table_.end()) {
return nullptr;
}
return it->second.get();
}
Status CollectiveParamResolverDistributed::UpdateGroupCache(
const CompleteGroupResponse& resp) {
std::unique_ptr<GroupRec> gr(new GroupRec);
{
mutex_lock grl(gr->mu);
gr->group.device_type = DeviceType(resp.device_type());
gr->group.group_key = resp.group_key();
gr->group.group_size = resp.group_size();
gr->group.num_tasks = resp.num_tasks();
if (resp.device_attributes().empty()) {
return errors::Internal(
"CompleteGroupResponse device_attributes is empty. Make sure you're "
"running the same version of Tensorflow on all workers.");
}
if (resp.device_attributes_size() != gr->group.group_size) {
return errors::Internal(
"CompleteGroupResponse group_size doesn't match device_name list");
}
gr->group.members.reserve(resp.device_attributes().size());
for (const DeviceAttributes& device : resp.device_attributes()) {
CollGroupMember member;
member.device = device;
gr->group.members.push_back(std::move(member));
gr->incarnations_by_device_name[device.name()] = device.incarnation();
}
gr->group.runtime_details.communicator_key = resp.communicator_key();
FinishGroup(gr.get());
}
GroupRec* previous_gr = nullptr;
{
mutex_lock l(group_mu_);
auto it = group_table_.find(resp.group_key());
if (it == group_table_.end()) {
VLOG(2) << "UpdateGroupCache: communicator_key="
<< absl::CEscape(resp.communicator_key());
group_table_[gr->group.group_key] = std::move(gr);
} else {
previous_gr = it->second.get();
}
}
if (previous_gr != nullptr) {
mutex_lock grl(previous_gr->mu);
if (previous_gr->group.runtime_details.communicator_key !=
resp.communicator_key()) {
return errors::Internal(
"UpdateGroupCache: CompleteGroupResponse for group ",
resp.group_key(),
" gives communicator_key=", absl::CEscape(resp.communicator_key()),
" but cache already holds communicator_key=",
absl::CEscape(previous_gr->group.runtime_details.communicator_key));
}
}
return absl::OkStatus();
}
void CollectiveParamResolverDistributed::CompleteGroupDistributed(
const DeviceAttributes& device, CollGroupParams* group_params,
CancellationManager* cancel_mgr, const StatusCallback& done) {
VLOG(1) << "CompleteGroupDistributed group_key=" << group_params->group_key
<< " dev: " << device.name()
<< " is_leader=" << (group_leader_.empty());
if (group_leader_.empty()) {
return CompleteGroupLocal(device, group_params, cancel_mgr, done);
} else if (GetCachedGroup(group_params->group_key) == nullptr) {
CompleteGroupCall* call = new CompleteGroupCall(
*group_params, device, cancel_mgr, group_leader_, worker_cache_);
CancellationToken abortion_token =
abortion_cancel_mgr_.get_cancellation_token();
bool already_aborted = !abortion_cancel_mgr_.RegisterCallback(
abortion_token, [call] { call->Cancel(); });
if (already_aborted) {
done(errors::Cancelled("collective ops already aborted"));
delete call;
return;
}
call->Start([this, device, group_params, call, cancel_mgr, abortion_token,
done](const Status& s) {
abortion_cancel_mgr_.DeregisterCallback(abortion_token);
if (s.ok()) {
Status status = UpdateGroupCache(call->resp_);
if (status.ok()) {
CompleteGroupLocal(device, group_params, cancel_mgr, done);
} else {
done(status);
}
} else {
done(s);
}
delete call;
});
return;
} else {
return CompleteGroupLocal(device, group_params, cancel_mgr, done);
}
}
bool CollectiveParamResolverDistributed::InstanceIsCached(
int32_t group_key, const CollInstanceParams& instance) {
mutex_lock l(instance_mu_);
auto group_it = instance_table_.find(group_key);
if (group_it == instance_table_.end()) {
return false;
}
auto instance_it =
group_it->second.find({instance.step_id, instance.instance_key});
return instance_it != group_it->second.end();
}
Status CollectiveParamResolverDistributed::UpdateInstanceCache(
CollectiveParams* cp, const CompleteInstanceResponse& resp) {
int32_t source_rank = resp.source_rank();
bool created_irec;
InstanceRec* ir = GetOrCreateInstanceRec(cp, &created_irec);
mutex_lock l(ir->mu);
if (!ir->status.ok()) {
return ir->status;
}
if (ir->source_rank != source_rank) {
if (ir->source_rank >= 0) {
ir->status = errors::Internal(
"UpdateInstanceCache: CompleteInstanceResponse for instance ",
cp->instance.instance_key, " gives source_rank=", source_rank,
" but cache already holds value=", ir->source_rank);
return ir->status;
}
ir->source_rank = source_rank;
}
if (ir->known_count < cp->group.group_size) {
ir->known_count = cp->group.group_size;
const int ir_known_size = ir->known.size();
if (ir_known_size != cp->group.group_size) {
ir->status = errors::Internal(
"UpdateInstanceCache:: CompleteInstanceResponse for instance ",
cp->instance.instance_key, " has known.size()=", ir->known.size(),
" < group_size=", cp->group.group_size);
return ir->status;
}
for (int i = 0; i < ir_known_size; ++i) {
ir->known[i] = true;
}
}
return ir->status;
}
void CollectiveParamResolverDistributed::CompleteInstanceDistributed(
const string& device, CollectiveParams* cp, CancellationManager* cancel_mgr,
const StatusCallback& done) {
if (group_leader_.empty()) {
return CompleteInstanceLocal(device, cp, done);
} else if (InstanceIsCached(cp->group.group_key, cp->instance)) {
return CompleteInstanceLocal(device, cp, done);
} else {
CompleteInstanceCall* call = new CompleteInstanceCall(
cp->group, cp->instance, cp->name, device, cp->is_source, cancel_mgr,
group_leader_, worker_cache_);
CancellationToken abortion_token =
abortion_cancel_mgr_.get_cancellation_token();
bool already_aborted = !abortion_cancel_mgr_.RegisterCallback(
abortion_token, [call] { call->Cancel(); });
if (already_aborted) {
done(errors::Cancelled("collective ops already aborted"));
delete call;
return;
}
call->Start([this, device, cp, call, abortion_token, done](Status s) {
abortion_cancel_mgr_.DeregisterCallback(abortion_token);
if (s.ok()) {
s = UpdateInstanceCache(cp, call->resp_);
}
if (s.ok()) {
CompleteInstanceLocal(device, cp, done);
} else {
done(s);
}
delete call;
});
return;
}
}
void CollectiveParamResolverDistributed::StartAbort(const Status& s) {
{
mutex_lock l(status_mu_);
if (!status_.ok()) {
VLOG(2) << "CollectiveParamResolverDistributed already aborted. Ignoring "
"subsequent abortion with status: "
<< s;
return;
}
status_ = s;
}
StartAbortLocal(s);
abortion_cancel_mgr_.StartCancel();
}
} | #include "tensorflow/core/distributed_runtime/collective_param_resolver_distributed.h"
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/test_collective_executor_mgr.h"
#include "tensorflow/core/distributed_runtime/device_resolver_distributed.h"
#include "tensorflow/core/distributed_runtime/test_utils.h"
#include "tensorflow/core/distributed_runtime/worker.h"
#include "tensorflow/core/distributed_runtime/worker_env.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/random.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace {
static std::unique_ptr<Device> NewDevice(const string& type,
const string& name) {
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& attr) : Device(nullptr, attr) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes) override { return nullptr; }
};
DeviceAttributes attr;
attr.set_name(name);
attr.set_device_type(type);
attr.mutable_locality()->set_numa_node(3);
attr.set_incarnation(random::New64());
return std::make_unique<FakeDevice>(attr);
}
class FakeCache : public TestWorkerCache {
public:
bool GetDeviceLocalityNonBlocking(const string& device,
DeviceLocality* locality) override {
return false;
}
void GetDeviceLocalityAsync(const string& device, DeviceLocality* locality,
StatusCallback done) override {
string task_name;
string dev_part;
if (!DeviceNameUtils::SplitDeviceName(device, &task_name, &dev_part)) {
done(errors::Internal("failed to parse device name"));
return;
}
auto it = workers_.find(task_name);
if (it == workers_.end()) {
done(errors::Internal("failed to find worker ", task_name));
return;
}
WorkerInterface* wi = it->second;
GetStatusRequest req;
GetStatusResponse resp;
Status status = wi->GetStatus(&req, &resp);
if (!status.ok()) {
done(status);
return;
}
for (const auto& it : resp.device_attributes()) {
if (it.name() == device) {
*locality = it.locality();
done(absl::OkStatus());
return;
}
}
done(errors::Internal("device not found: ", device));
}
};
class FakeNcclCommunicator : public NcclCommunicatorInterface {
public:
string GenerateCommunicatorKey() override { return "mock-communicator-key"; }
void Enqueue(std::shared_ptr<CollectiveContext> col_ctx,
StatusCallback done) override {
done(absl::OkStatus());
}
void StartAbort(const Status& s) override {}
};
class DeviceResDistTest : public ::testing::Test {
public:
~DeviceResDistTest() override {
for (auto& name_param : cp_) {
name_param.second->Unref();
}
}
protected:
void DefineWorkers(int num_workers, int num_devices,
const string& device_type, bool nccl) {
for (int w = 0; w < num_workers; ++w) {
string name = strings::StrCat("/job:worker/replica:0/task:", w);
DefineWorker(name, device_type, num_devices, nccl);
}
}
void DefineWorker(const string& worker_name, const string& device_type,
int num_devices, bool nccl) {
ConfigProto config;
config.mutable_experimental()->set_collective_group_leader(
"/job:worker/replica:0/task:0");
config.mutable_experimental()->set_collective_nccl(nccl);
std::vector<std::unique_ptr<Device>> devices;
for (int i = 0; i < num_devices; ++i) {
devices.push_back(NewDevice(
device_type,
strings::StrCat(worker_name, "/device:", device_type, ":", i)));
}
device_mgrs_[worker_name] =
std::make_unique<StaticDeviceMgr>(std::move(devices));
std::vector<string>* dv = &dev_by_task_[worker_name];
dv->clear();
for (auto* d : device_mgrs_[worker_name]->ListDevices()) {
dv->push_back(d->name());
}
dev_resolvers_[worker_name] = std::make_unique<DeviceResolverDistributed>(
device_mgrs_[worker_name].get());
cp_resolvers_[worker_name] =
std::make_unique<CollectiveParamResolverDistributed>(
config, device_mgrs_[worker_name].get(),
dev_resolvers_[worker_name].get(), &nccl_communicator_, &wc_,
worker_name);
auto worker_env = std::make_unique<WorkerEnv>();
worker_env->env = Env::Default();
worker_env->device_mgr = device_mgrs_[worker_name].get();
worker_env->collective_executor_mgr =
std::make_unique<TestCollectiveExecutorMgr>(
cp_resolvers_[worker_name].get(), nullptr);
workers_[worker_name] = std::make_unique<Worker>(worker_env.get());
worker_envs_[worker_name] = std::move(worker_env);
wc_.AddWorker(worker_name, workers_[worker_name].get());
}
void DefineCollectiveParams(int num_workers, int num_devices,
const string& device_type,
CollectiveType coll_type = REDUCTION_COLLECTIVE,
int source_rank = 0) {
for (int wi = 0; wi < num_workers; ++wi) {
string task_name = strings::StrCat("/job:worker/replica:0/task:", wi);
for (int di = 0; di < num_devices; ++di) {
int idx = wi * num_devices + di;
string device_name =
strings::StrCat(task_name, "/device:", device_type, ":", di);
cp_[device_name] =
CreateCollectiveParams(num_workers, num_devices, device_type,
coll_type, idx == source_rank);
}
}
}
CollectiveParams* CreateCollectiveParams(int num_workers, int num_devices,
const string& device_type,
CollectiveType coll_type,
bool is_source) {
const int kGroupKey = 5;
const int kInstanceKey = 3;
auto* cp = new CollectiveParams();
cp->is_source = is_source;
cp->group.group_key = kGroupKey;
cp->group.group_size = num_workers * num_devices;
cp->group.device_type = DeviceType(device_type);
cp->group.num_tasks = num_workers;
cp->instance.instance_key = kInstanceKey;
cp->instance.type = coll_type;
cp->instance.data_type = DT_FLOAT;
cp->instance.shape = TensorShape({64});
cp->instance.impl_details.subdiv_offsets.push_back(0);
return cp;
}
void IssueRequests(int num_workers, int num_devices) {
{
mutex_lock l(mu_);
num_done_ = 0;
}
int group_size = num_workers * num_devices;
for (int wi = 0; wi < num_workers; ++wi) {
string task_name = strings::StrCat("/job:worker/replica:0/task:", wi);
for (int di = 0; di < num_devices; ++di) {
string device_name = strings::StrCat(task_name, "/device:CPU:", di);
IssueRequest(task_name, device_name, group_size);
}
}
}
void IssueRequest(const string& task_name, const string& device_name,
int group_size) {
Device* device = nullptr;
TF_CHECK_OK(device_mgrs_[task_name]->LookupDevice(device_name, &device));
CollectiveParams* cp = cp_[device_name];
CollectiveParamResolverDistributed* cp_res = cp_resolvers_[task_name].get();
CHECK(cp_res);
cp_res->CompleteParamsAsync(
device->attributes(), cp, &cm_,
[this, device_name, group_size](const Status& s) {
status_[device_name] = s;
{
mutex_lock l(mu_);
++num_done_;
if (num_done_ == group_size) {
done_.notify_all();
}
}
});
}
void ValidateCollectiveParams(int num_workers, int num_devices) {
int device_count = num_workers * num_devices;
{
mutex_lock l(mu_);
if (num_done_ < device_count) {
done_.wait(l);
}
}
const int dev_count = num_workers * num_devices;
string dev0 = "/job:worker/replica:0/task:0/device:CPU:0";
for (int wi = 0; wi < num_workers; ++wi) {
string task_name = strings::StrCat("/job:worker/replica:0/task:", wi);
for (int di = 0; di < num_devices; ++di) {
string device_name = strings::StrCat(task_name, "/device:CPU:", di);
int idx = wi * num_devices + di;
TF_ASSERT_OK(status_[device_name]);
EXPECT_EQ(cp_[device_name]->default_rank, idx);
EXPECT_EQ(cp_[device_name]->group.members.size(), dev_count);
EXPECT_EQ(cp_[device_name]->group.members[idx].device.name(),
device_name);
EXPECT_EQ(cp_[device_name]->group.members[idx].task, task_name);
ValidateDeviceResolver(*cp_[device_name], task_name);
if (idx > 0) {
EXPECT_EQ(cp_[dev0]->group.runtime_details.communicator_key,
cp_[device_name]->group.runtime_details.communicator_key);
for (int i = 0; i < dev_count; ++i) {
EXPECT_EQ(cp_[dev0]->group.members[i].device.name(),
cp_[device_name]->group.members[i].device.name());
EXPECT_EQ(cp_[dev0]->group.members[i].task,
cp_[device_name]->group.members[i].task);
}
}
}
}
}
void ValidateDeviceResolver(const CollectiveParams& cp, const string& task) {
for (const CollGroupMember& member : cp.group.members) {
DeviceAttributes attributes;
TF_ASSERT_OK(dev_resolvers_[task]->GetDeviceAttributes(
member.device.name(), &attributes));
}
}
void RestartWorker(int worker_idx, int num_workers, int num_devices,
const string& device_type, bool nccl,
CollectiveType coll_type = REDUCTION_COLLECTIVE,
bool is_source = false) {
string worker_name =
strings::StrCat("/job:worker/replica:0/task:", worker_idx);
DefineWorker(worker_name, device_type, num_devices, nccl);
for (int i = 0; i < num_devices; ++i) {
string device_name =
strings::StrCat(worker_name, "/device:", device_type, ":", i);
if (cp_.find(device_name) != cp_.end()) {
cp_[device_name]->Unref();
}
cp_[device_name] = CreateCollectiveParams(
num_workers, num_devices, device_type, coll_type, is_source);
status_.erase(device_name);
}
}
FakeCache wc_;
FakeNcclCommunicator nccl_communicator_;
CancellationManager cm_;
absl::flat_hash_map<string, std::unique_ptr<DeviceMgr>> device_mgrs_;
absl::flat_hash_map<string, std::unique_ptr<DeviceResolverDistributed>>
dev_resolvers_;
absl::flat_hash_map<string,
std::unique_ptr<CollectiveParamResolverDistributed>>
cp_resolvers_;
absl::flat_hash_map<string, std::vector<string>> dev_by_task_;
absl::flat_hash_map<string, std::unique_ptr<WorkerEnv>> worker_envs_;
absl::flat_hash_map<string, std::unique_ptr<Worker>> workers_;
absl::flat_hash_map<string, CollectiveParams*> cp_;
absl::flat_hash_map<string, Status> status_;
mutex mu_;
int num_done_ TF_GUARDED_BY(mu_);
condition_variable done_;
};
TEST_F(DeviceResDistTest, Workers1Devices1) {
const int num_workers = 1;
const int num_devices = 1;
DefineWorkers(num_workers, num_devices, "CPU", false);
DefineCollectiveParams(num_workers, num_devices, "CPU");
IssueRequests(num_workers, num_devices);
ValidateCollectiveParams(num_workers, num_devices);
}
TEST_F(DeviceResDistTest, Workers2Devices2) {
const int num_workers = 2;
const int num_devices = 2;
DefineWorkers(num_workers, num_devices, "CPU", false);
DefineCollectiveParams(num_workers, num_devices, "CPU");
IssueRequests(num_workers, num_devices);
ValidateCollectiveParams(num_workers, num_devices);
}
TEST_F(DeviceResDistTest, DifferentIncarnation) {
const int num_workers = 2;
const int num_devices = 1;
DefineWorkers(num_workers, num_devices, "CPU", false);
DefineCollectiveParams(num_workers, num_devices, "CPU");
IssueRequests(num_workers, num_devices);
RestartWorker(1, num_workers, num_devices, "CPU", false);
const string task_name = "/job:worker/replica:0/task:1";
const string device_name = absl::StrCat(task_name, "/device:CPU:0");
IssueRequest(task_name, device_name, num_workers * num_devices);
EXPECT_TRUE(errors::IsFailedPrecondition(status_[device_name]));
}
TEST_F(DeviceResDistTest, BroadcastSourceRank0) {
const int num_workers = 2;
const int num_devices = 2;
const int source_rank = 0;
DefineWorkers(num_workers, num_devices, "CPU", false);
DefineCollectiveParams(num_workers, num_devices, "CPU", BROADCAST_COLLECTIVE,
source_rank);
IssueRequests(num_workers, num_devices);
ValidateCollectiveParams(num_workers, num_devices);
}
TEST_F(DeviceResDistTest, BroadcastSourceRank3) {
const int num_workers = 2;
const int num_devices = 2;
const int source_rank = 3;
DefineWorkers(num_workers, num_devices, "CPU", false);
DefineCollectiveParams(num_workers, num_devices, "CPU", BROADCAST_COLLECTIVE,
source_rank);
IssueRequests(num_workers, num_devices);
ValidateCollectiveParams(num_workers, num_devices);
}
TEST_F(DeviceResDistTest, Workers4Devices3) {
const int num_workers = 4;
const int num_devices = 3;
DefineWorkers(num_workers, num_devices, "CPU", true);
DefineCollectiveParams(num_workers, num_devices, "CPU");
IssueRequests(num_workers, num_devices);
ValidateCollectiveParams(num_workers, num_devices);
}
}
} |
1,297 | cpp | tensorflow/tensorflow | eager_service_impl | tensorflow/core/distributed_runtime/eager/eager_service_impl.cc | tensorflow/core/distributed_runtime/eager/eager_service_impl_test.cc | #ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_EAGER_EAGER_SERVICE_IMPL_H_
#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_EAGER_EAGER_SERVICE_IMPL_H_
#include <memory>
#include <unordered_map>
#include <utility>
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/distributed_runtime/eager/remote_mgr.h"
#include "tensorflow/core/distributed_runtime/eager/remote_tensor_handle.h"
#include "tensorflow/core/distributed_runtime/worker_env.h"
namespace tensorflow {
namespace eager {
class EagerServiceImpl {
public:
explicit EagerServiceImpl(WorkerEnv* env) : env_(env) {
gc_thread_.reset(
env_->env->StartThread({}, "EagerServiceContextGC", [this]() {
while (true) {
{
mutex_lock l(gc_thread_shutdown_mu_);
gc_thread_cv_.wait_for(l, std::chrono::seconds(1));
if (shutting_down_) {
return;
}
}
{
mutex_lock l(contexts_mu_);
for (auto it = contexts_.begin(); it != contexts_.end();) {
if (it->second->IsStale()) {
it->second->Unref();
it = contexts_.erase(it);
} else {
it++;
}
}
}
}
}));
}
virtual ~EagerServiceImpl() {
{
mutex_lock l(gc_thread_shutdown_mu_);
shutting_down_ = true;
gc_thread_cv_.notify_all();
}
gc_thread_.reset();
mutex_lock l(contexts_mu_);
for (auto& entry : contexts_) {
entry.second->Unref();
}
}
Status CreateContext(const CreateContextRequest* request,
CreateContextResponse* response);
Status UpdateContext(const UpdateContextRequest* request,
UpdateContextResponse* response);
Status CreateMasterContext(const tensorflow::uint64 context_id,
EagerContext* context);
static constexpr uint64 kInvalidStreamId = 0;
Status Enqueue(CallOptions* call_opts, const EnqueueRequest* request,
EnqueueResponse* response,
uint64 stream_id = kInvalidStreamId);
Status WaitQueueDone(const WaitQueueDoneRequest* request,
WaitQueueDoneResponse* response);
void RunComponentFunction(CallOptions* call_opts,
const RunComponentFunctionRequest* request,
RunComponentFunctionResponse* response,
StatusCallback done);
Status KeepAlive(const KeepAliveRequest* request,
KeepAliveResponse* response);
Status CloseContext(const CloseContextRequest* request,
CloseContextResponse* response);
protected:
class ServerContext : public core::RefCounted {
public:
static ServerContext* CreateMasterContext(tensorflow::EagerContext* ctx,
const WorkerEnv* env) {
return new ServerContext(ctx, -1, env, true);
}
explicit ServerContext(tensorflow::EagerContext* ctx,
int64_t destroy_after_secs, const WorkerEnv* env,
const bool is_master = false)
: ctx_(ctx), env_(env), is_master_(is_master) {
ctx->Ref();
destroy_after_micros_ =
destroy_after_secs * tensorflow::EnvTime::kSecondsToMicros;
RecordAccess();
}
~ServerContext() override {
if (!is_master_) {
ctx_->WaitForAndCloseRemoteContexts();
}
ctx_->Unref();
}
tensorflow::EagerContext* Context() const { return ctx_; }
void RecordAccess() {
mutex_lock l(last_accessed_mu_);
last_accessed_micros_ = env_->env->NowMicros();
}
bool IsStale() {
mutex_lock l(last_accessed_mu_);
const int64_t time_passed =
env_->env->NowMicros() - last_accessed_micros_;
return (destroy_after_micros_ > 0 && time_passed > destroy_after_micros_);
}
private:
tensorflow::EagerContext* ctx_;
const WorkerEnv* const env_;
mutex last_accessed_mu_;
int64_t last_accessed_micros_ TF_GUARDED_BY(last_accessed_mu_);
int64_t destroy_after_micros_;
const bool is_master_;
};
tensorflow::Status GetServerContext(uint64, ServerContext**);
class ClientTensorHandleDeleteNode : public EagerNode {
public:
ClientTensorHandleDeleteNode(
ServerContext* context,
std::unique_ptr<RemoteTensorHandleInternal> handle_to_delete)
: tensorflow::EagerNode(),
context_(context),
handle_to_delete_(std::move(handle_to_delete)) {
context_->Ref();
}
~ClientTensorHandleDeleteNode() override { context_->Unref(); }
Status Run() override {
VLOG(3) << "ServerContext: Deleting tensor handle "
<< handle_to_delete_->op_id << ":"
<< handle_to_delete_->output_num;
return context_->Context()->RemoteMgr()->DeleteTensorHandle(
*handle_to_delete_);
}
void Abort(Status status) override {}
bool Fatal() const override { return false; }
string DebugString() const override {
string out = "[ClientTensorHandleDeleteNode]";
strings::StrAppend(&out, " op_id: ", handle_to_delete_->op_id);
strings::StrAppend(&out, ", output_num: ", handle_to_delete_->output_num);
return out;
}
private:
ServerContext* const context_;
const std::unique_ptr<RemoteTensorHandleInternal> handle_to_delete_;
};
private:
Status ExecuteOp(CallOptions* call_opts, const Operation& operation,
EagerContext* eager_context, EagerExecutor* eager_executor,
QueueResponse* queue_response);
Status SendTensor(const SendTensorOp& send_tensor,
EagerContext* eager_context);
Status SendPackedHandle(const SendPackedHandleOp& send_packed_handle,
EagerContext* eager_context);
Status RegisterFunction(const RegisterFunctionOp& register_function,
EagerContext* eager_context);
Status RemoveFunction(const RemoveFunctionOp& remove_function,
EagerContext* eager_context);
Status CleanupFunction(const CleanupFunctionOp& cleanup_function);
WorkerEnv* const env_;
mutex contexts_mu_;
std::unordered_map<uint64, ServerContext*> contexts_
TF_GUARDED_BY(contexts_mu_);
std::unique_ptr<Thread> gc_thread_;
mutex gc_thread_shutdown_mu_;
condition_variable gc_thread_cv_;
bool shutting_down_ TF_GUARDED_BY(gc_thread_shutdown_mu_) = false;
EagerServiceImpl(const EagerServiceImpl&) = delete;
void operator=(const EagerServiceImpl&) = delete;
};
}
}
#endif
#include "tensorflow/core/distributed_runtime/eager/eager_service_impl.h"
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/container/fixed_array.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/types/optional.h"
#include "tensorflow/c/eager/immediate_execution_distributed_manager.h"
#include "xla/tsl/distributed_runtime/preemption/preemption_notifier.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/common_runtime/eager/context_distributed_manager.h"
#include "tensorflow/core/common_runtime/eager/eager_operation.h"
#include "tensorflow/core/common_runtime/eager/execute.h"
#include "tensorflow/core/distributed_runtime/eager/cluster_function_library_runtime.h"
#include "tensorflow/core/distributed_runtime/eager/remote_mgr.h"
#include "tensorflow/core/distributed_runtime/eager/remote_tensor_handle.h"
#include "tensorflow/core/distributed_runtime/message_wrappers.h"
#include "tensorflow/core/distributed_runtime/rpc_collective_executor_mgr.h"
#include "tensorflow/core/distributed_runtime/session_mgr.h"
#include "tensorflow/core/distributed_runtime/worker_cache.h"
#include "tensorflow/core/distributed_runtime/worker_env.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/rendezvous.h"
#include "tensorflow/core/nccl/collective_communicator.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/host_info.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tsl/protobuf/coordination_config.pb.h"
namespace tensorflow {
namespace eager {
namespace {
Status GetNumRetvals(FunctionLibraryDefinition* func_lib_def,
const string& op_name,
const google::protobuf::Map<string, tensorflow::AttrValue>& attrs,
int* num_retvals) {
const tensorflow::OpRegistrationData* op_reg_data = nullptr;
auto status = tensorflow::OpRegistry::Global()->LookUp(op_name, &op_reg_data);
if (absl::IsNotFound(status)) {
status = func_lib_def->LookUp(op_name, &op_reg_data);
}
TF_RETURN_IF_ERROR(status);
const tensorflow::OpDef& op_def = op_reg_data->op_def;
for (const auto& output_arg : op_def.output_arg()) {
if (!output_arg.number_attr().empty()) {
auto iter = attrs.find(output_arg.number_attr());
if (iter == attrs.end()) {
return errors::InvalidArgument("Unable to find number_attr ",
output_arg.number_attr(),
" for Op: ", op_name);
}
*num_retvals += iter->second.i();
} else if (!output_arg.type_list_attr().empty()) {
auto iter = attrs.find(output_arg.type_list_attr());
if (iter == attrs.end()) {
return errors::InvalidArgument("Unable to find type_list_attr ",
output_arg.type_list_attr(),
" for Op: ", op_name);
}
*num_retvals += iter->second.list().type_size();
} else {
*num_retvals += 1;
}
}
return absl::OkStatus();
}
Status GetEagerOperationAndNumRetvals(const Operation& operation,
EagerContext* eager_context,
EagerExecutor* eager_executor,
EagerOperation* eager_op,
int* num_retvals) {
const char* name = operation.name().c_str();
std::optional<tensorflow::EagerFunctionParams> remote_func_params =
std::nullopt;
FunctionLibraryDefinition* func_lib_def;
if (operation.is_function()) {
if (operation.is_component_function()) {
func_lib_def =
eager_context->GetComponentFunctionFunctionLibraryDefinition(
operation.name());
if (func_lib_def == nullptr) {
return absl::InternalError(
absl::StrCat("Could not find function library for registered "
"component function: ",
operation.name()));
}
remote_func_params = {operation.id(), true,
operation.func_step_id(), func_lib_def};
} else {
func_lib_def = eager_context->FuncLibDef();
remote_func_params = {operation.id(), false,
std::nullopt, nullptr};
}
} else {
func_lib_def = eager_context->FuncLibDef();
}
TF_RETURN_IF_ERROR(eager_op->Reset(name, operation.device().c_str(), false,
eager_executor, remote_func_params));
{
tsl::profiler::TraceMe activity("EagerService:RemoteTensorHandleInternal",
tsl::profiler::TraceMeLevel::kVerbose);
for (const auto& input : operation.op_inputs()) {
tensorflow::TensorHandle* handle;
if (input.has_remote_handle()) {
TF_RETURN_IF_ERROR(
eager_context->RemoteMgr()->DeserializeRemoteTensorHandle(
input.remote_handle(), &handle));
TF_RETURN_IF_ERROR(eager_op->AddInput(handle));
} else {
Tensor tensor;
if (!ParseTensorProtoToTensor(input.tensor(), &tensor)) {
return errors::InvalidArgument("Invalid TensorProto: ",
input.tensor().DebugString());
} else {
handle = TensorHandle::CreateLocalHandle(std::move(tensor), nullptr,
nullptr, eager_context);
TF_RETURN_IF_ERROR(eager_op->AddInput(handle));
}
}
handle->Unref();
}
}
for (const auto& attr : operation.attrs()) {
eager_op->MutableAttrs()->Set(attr.first, attr.second);
}
return GetNumRetvals(func_lib_def, operation.name(), operation.attrs(),
num_retvals);
}
Status TensorHandleProto(TensorHandle* handle, TensorProto* proto) {
const tensorflow::Tensor* t = nullptr;
TF_RETURN_IF_ERROR(handle->Tensor(&t));
t->AsProtoTensorContent(proto);
return absl::OkStatus();
}
Status TensorHandleShape(TensorHandle* handle, TensorShapeProto* proto) {
const tensorflow::Tensor* t = nullptr;
if (handle->Type() == TensorHandle::LOCAL) {
TF_RETURN_IF_ERROR(handle->Tensor(&t));
t->shape().AsProto(proto);
} else {
TensorShape shape;
TF_RETURN_IF_ERROR(handle->Shape(&shape));
shape.AsProto(proto);
}
return absl::OkStatus();
}
Status AddOpRetvalsToResponse(
EagerContext* eager_context, int op_id, int num_retvals,
const std::vector<int32>& output_nums, TensorHandle** retvals,
std::function<TensorProto*()> add_tensor_proto_fn,
std::function<TensorShapeProto*()> add_shape_proto_fn,
std::function<string*()> add_device_fn = nullptr) {
StatusGroup sg;
if (op_id == kInvalidOpId) {
for (int i = 0; i < num_retvals; i++) {
sg.Update(TensorHandleProto(retvals[i], add_tensor_proto_fn()));
retvals[i]->Unref();
}
} else {
for (int i = 0; i < num_retvals; i++) {
sg.Update(TensorHandleShape(retvals[i], add_shape_proto_fn()));
if (add_device_fn) {
Device* device = retvals[i]->device();
*add_device_fn() = device ? device->name() : "";
}
if (retvals[i]->Type() == TensorHandle::REMOTE) {
retvals[i]->Unref();
} else {
const int output_num = output_nums.empty() ? i : output_nums.at(i);
eager_context->RemoteMgr()->AddOperationOutput(retvals[i], op_id,
output_num);
}
}
}
return sg.as_summary_status();
}
Status ResetAgentAndConnectToCoordinationService(
tsl::CoordinationServiceAgent* coord_agent) {
if (coord_agent->IsError()) {
const Status s = coord_agent->Reset();
if (!s.ok()) {
LOG(ERROR) << "Coordination Service agent reset failed " << s;
return s;
}
}
if (!coord_agent->IsConnected()) {
const Status s = coord_agent->Connect();
if (!s.ok()) {
LOG(ERROR) << "Coordination Service agent connect failed " << s;
return s;
}
}
return absl::OkStatus();
}
}
Status EagerServiceImpl::CreateContext(const CreateContextRequest* request,
CreateContextResponse* response) {
bool update_collective_executor_mgr = false;
{
mutex_lock l(contexts_mu_);
if (contexts_.empty()) {
update_collective_executor_mgr = true;
} else {
auto context_it = contexts_.find(request->context_id());
if (context_it != contexts_.end()) {
if (request->context_view_id() <
context_it->second->Context()->GetContextViewId()) {
return errors::InvalidArgument("EagerService:CreateContext failed. ",
"Context id: <", request->context_id(),
"> already exists.");
} else {
context_it->second->Unref();
contexts_.erase(context_it);
}
}
}
}
if (env_ == nullptr || env_->rendezvous_mgr == nullptr) {
return tensorflow::errors::Internal(
"invalid eager env_ or env_->rendezvous_mgr.");
}
if (request->clear_existing_contexts()) {
for (auto* device : env_->device_mgr->ListDevices()) {
device->ClearResourceMgr();
}
env_->rendezvous_mgr->CleanupAll();
env_->collective_executor_mgr->CleanupAll();
TF_RETURN_IF_ERROR(env_->session_mgr->DeleteAllSessions());
std::unordered_map<uint64, ServerContext*> tmp_contexts;
{
mutex_lock l(contexts_mu_);
if (!contexts_.empty()) {
std::swap(tmp_contexts, contexts_);
}
}
for (auto& context : tmp_contexts) {
context.second->Unref();
}
}
tsl::core::RefCountPtr<RemoteRendezvous> r =
env_->rendezvous_mgr->Find(request->context_id());
auto session_name =
tensorflow::strings::StrCat("eager_", request->context_id());
if (VLOG_IS_ON(2)) {
VLOG(2) << "Creating context on /job:" << request->server_def().job_name()
<< "/task:" << request->server_def().task_index();
for (const auto& da : request->cluster_device_attributes()) {
VLOG(2) << " " << da.name();
}
}
TF_RETURN_IF_ERROR(env_->session_mgr->CreateSession(
session_name, request->server_def(), request->cluster_device_attributes(),
request->server_def().default_session_config().isolate_session_state()));
int64_t context_id = request->context_id();
std::function<void()> session_destroyer = [this, context_id, session_name]() {
env_->rendezvous_mgr->Cleanup(context_id);
auto s = env_->session_mgr->DeleteSession(session_name);
if (!s.ok()) {
LOG(WARNING) << "Failed to destroy worker session '" << session_name
<< "' due to " << s.message();
}
};
std::shared_ptr<WorkerSession> worker_session;
TF_RETURN_IF_ERROR(env_->session_mgr->WorkerSessionForSession(
session_name, &worker_session));
tensorflow::DeviceMgr* device_mgr = worker_session->device_mgr();
TF_RETURN_IF_ERROR(r->Initialize(worker_session.get()));
r->SetRemoteEagerContextDefault();
std::function<tsl::core::RefCountPtr<Rendezvous>(const int64_t)>
rendezvous_creator = [worker_session, this](const int64_t step_id) {
tsl::core::RefCountPtr<RemoteRendezvous> r =
env_->rendezvous_mgr->Find(step_id);
r->Initialize(worker_session.get()).IgnoreError();
return r;
};
LOG(INFO) << "Creating " << (request->async() ? "async" : "sync")
<< " eager service context with rendezvous_id on host "
<< port::Hostname() << " " << worker_session->worker_name();
SessionOptions opts;
opts.config = request->server_def().default_session_config();
LOG(INFO) << "SessionOptions: " << opts.config.DebugString();
if (update_collective_executor_mgr) {
env_->collective_executor_mgr = CreateProdRpcCollectiveExecutorMgr(
opts.config, device_mgr, MaybeCreateNcclCommunicator(opts.config),
worker_session->worker_cache(), worker_session->worker_name());
}
tensorflow::EagerContext* ctx = new tensorflow::EagerContext(
opts, tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
request->async(), device_mgr, false, std::move(r),
worker_session->cluster_flr(), env_->collective_executor_mgr.get());
core::ScopedUnref unref_ctx(ctx);
std::vector<string> remote_workers;
worker_session->worker_cache()->ListWorkers(&remote_workers);
remote_workers.erase(std::remove(remote_workers.begin(), remote_workers.end(),
worker_session->worker_name()),
remote_workers.end());
std::unique_ptr<tensorflow::eager::EagerClientCache> remote_eager_workers;
TF_RETURN_IF_ERROR(worker_session->worker_cache()->GetEagerClientCache(
&remote_eager_workers));
DistributedFunctionLibraryRuntime* cluster_flr =
eager::CreateClusterFLR(request->context_id(), ctx, worker_session.get());
auto remote_mgr =
std::make_unique<tensorflow::eager::RemoteMgr>(false, ctx);
Status s = ctx->InitializeRemoteWorker(
std::move(remote_eager_workers), worker_session->remote_device_mgr(),
remote_workers, request->context_id(), request->context_view_id(),
std::move(rendezvous_creator), cluster_flr, std::move(remote_mgr),
std::move(session_destroyer));
if (!s.ok()) {
VLOG(1) << "EagerContext::InitializeRemoteWorker failed with "
<< s.ToString();
return s;
}
#if !defined(IS_MOBILE_PLATFORM)
const auto& config = request->server_def().default_session_config();
const bool enable_coordination =
!config.experimental().coordination_config().service_type().empty();
if (enable_coordination) {
auto dist_mgr = std::make_unique<EagerContextDistributedManager>(ctx);
auto coord_agent = env_->session_mgr->GetCoordinationServiceAgent();
dist_mgr->SetCoordinationServiceAgent(coord_agent);
if (config.experimental().coordination_config().enable_health_check()) {
TF_RETURN_IF_ERROR(
ResetAgentAndConnectToCoordinationService(coord_agent));
}
auto preemption_notifier =
tsl::PreemptionNotifier::CreatePreemptionNotifier("sigterm",
Env::Default());
preemption_notifier->WillBePreemptedAtAsync(
[coord_agent](absl::StatusOr<absl::Time> time_or_status) {
if (time_or_status.ok()) {
const auto coord_task = coord_agent->GetOwnTask().value();
Status s = coord_agent->InsertKeyValue(
"TF_DEFAULT_PREEMPTION_NOTICE_KEY",
absl::StrCat("/job:", coord_task.job_name(),
"/task:", coord_task.task_id()));
if (!s.ok()) {
LOG(INFO) << "Preemption not exported to coordination service: "
<< s;
}
}
});
dist_mgr->SetPreemptionNotifier(std::move(preemption_notifier));
ctx->SetDistributedManager(std::move(dist_mgr));
}
#endif
std::vector<DeviceAttributes> device_attributes;
device_mgr->ListDeviceAttributes(&device_attributes);
for (const auto& da : device_attributes) {
*response->add_device_attributes() = da;
}
{
mutex_lock l(contexts_mu_);
auto context_it = contexts_.find(request->context_id());
if (context_it != contexts_.end()) {
return errors::InvalidArgument("EagerService:CreateContext failed. ",
"Context id: <", request->context_id(),
"> already exists.");
}
contexts_.emplace(request->context_id(),
new ServerContext(ctx, request->keep_alive_secs(), env_));
}
return absl::OkStatus();
}
Status EagerServiceImpl::UpdateContext(const UpdateContextRequest* request,
UpdateContextResponse* response) {
if (env_ == nullptr || env_->rendezvous_mgr == nullptr) {
return tensorflow::errors::Internal(
"invalid eager env_ or env_->rendezvous_mgr.");
}
ServerContext* server_context = nullptr;
TF_RETURN_IF_ERROR(GetServerContext(request->context_id(), &server_context));
core::ScopedUnref context_unref(server_context);
tensorflow::EagerContext* ctx = server_context->Context();
if (request->context_view_id() != ctx->GetContextViewId() + 1) {
return errors::InvalidArgument(
"EagerService:UpdateContext failed. Context id: <",
request->context_id(), "> currently at view #", ctx->GetContextViewId(),
" but received update request at view #", request->context_view_id(),
". View id should only be continuously incremented.");
}
if (request->cluster_device_attributes_size() == 0) {
ctx->IncrementContextViewId();
VLOG(1) << "Processing simplified UpdateContextRequest on "
<< ctx->HostCPU()->name();
return absl::OkStatus();
}
auto session_name =
tensorflow::strings::StrCat("eager_", request->context_id());
TF_RETURN_IF_ERROR(
env_->session_mgr->UpdateSession(session_name, request->server_def(),
request->cluster_device_attributes()));
std::shared_ptr<WorkerSession> worker_session;
TF_RETURN_IF_ERROR(env_->session_mgr->WorkerSessionForSession(
session_name, &worker_session));
const tensorflow::DeviceMgr* device_mgr = worker_session->device_mgr();
std::vector<string> remote_workers;
worker_session->worker_cache()->ListWorkers(&remote_workers);
remote_workers.erase(std::remove(remote_workers.begin(), remote_workers.end(),
worker_session->worker_name()),
remote_workers.end());
VLOG(1) << "On existing server " << worker_session->worker_name()
<< " updating remote workers";
if (VLOG_IS_ON(2)) {
for (const string& rw : remote_workers) {
VLOG(2) << "Remote worker " << rw;
}
}
std::unique_ptr<tensorflow::eager::EagerClientCache> remote_eager_workers;
TF_RETURN_IF_ERROR(worker_session->worker_cache()->GetEagerClientCache(
&remote_eager_workers));
ctx->ClearCachesAndThreadExecutors();
Status s = ctx->UpdateRemoteWorker(std::move(remote_eager_workers),
remote_workers, request->context_id());
if (!s.ok()) {
VLOG(1) << "EagerContext::UpdateRemoteWorker failed with " << s.ToString();
return s;
}
#if !defined(IS_MOBILE_PLATFORM)
const auto& config = request->server_def().default_session_config();
const bool should_connect =
!config.experimental().coordination_config().service_type().empty() &&
config.experimental().coordination_config().enable_health_check();
if (should_connect) {
auto coord_agent = env_->session_mgr->GetCoordinationServiceAgent();
TF_RETURN_IF_ERROR(ResetAgentAndConnectToCoordinationService(coord_agent));
}
#endif
std::vector<DeviceAttributes> device_attributes;
device_mgr->ListDeviceAttributes(&device_attributes);
for (const auto& da : device_attributes) {
*response->add_device_attributes() = da;
}
return absl::OkStatus();
}
Status EagerServiceImpl::CreateMasterContext(
const tensorflow::uint64 context_id, EagerContext* context) {
{
mutex_lock l(contexts_mu_);
auto iter = contexts_.find(context_id);
if (iter != contexts_.end()) {
return errors::InvalidArgument(
"EagerService:CreateMasterContext failed. ", "Context id: <",
context_id, "> already exists.");
}
}
ServerContext* server_context =
ServerContext::CreateMasterContext(context, env_);
mutex_lock l(contexts_mu_);
contexts_.emplace(context_id, server_context);
return absl::OkStatus();
}
void EagerServiceImpl::RunComponentFunction(
CallOptions* call_opts, const RunComponentFunctionRequest* request,
RunComponentFunctionResponse* response, StatusCallback done) {
ServerContext* context = nullptr;
Status s = GetServerContext(request->context_id(), &context);
if (!s.ok()) {
done(s);
return;
}
core::ScopedUnref context_unref(context);
auto& operation = request->operation();
if (!operation.is_function() || !operation.is_component_function()) {
done(errors::Internal(
"RunComponentFunction request can only be used to execute "
"component functions."));
return;
}
EagerContext* eager_context = context->Context();
EagerExecutor* eager_executor = &eager_context->Executor | #include "tensorflow/core/distributed_runtime/eager/eager_service_impl.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <unordered_map>
#include <utility>
#include <variant>
#include <vector>
#include "absl/status/status.h"
#include "absl/types/optional.h"
#include "absl/types/variant.h"
#include "tensorflow/c/tf_tensor.h"
#include "tensorflow/c/tf_tensor_internal.h"
#include "tensorflow/core/common_runtime/eager/kernel_and_device.h"
#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include "tensorflow/core/distributed_runtime/eager/cluster_function_library_runtime.h"
#include "tensorflow/core/distributed_runtime/eager/remote_mgr.h"
#include "tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.h"
#include "tensorflow/core/distributed_runtime/session_mgr.h"
#include "tensorflow/core/distributed_runtime/test_utils.h"
#include "tensorflow/core/distributed_runtime/worker_env.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/eager_service.pb.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/protobuf/remote_tensor_handle.pb.h"
#include "tensorflow/core/protobuf/tensorflow_server.pb.h"
namespace tensorflow {
namespace eager {
namespace {
class TestEagerServiceImpl : public EagerServiceImpl {
public:
explicit TestEagerServiceImpl(WorkerEnv* env) : EagerServiceImpl(env) {}
Status GetEagerContext(const uint64 context_id, EagerContext** ctx) {
ServerContext* context = nullptr;
TF_RETURN_IF_ERROR(GetServerContext(context_id, &context));
core::ScopedUnref context_unref(context);
*ctx = context->Context();
return absl::OkStatus();
}
Status GetTensorHandle(const uint64 context_id,
const RemoteTensorHandleInternal& remote_handle,
tensorflow::TensorHandle** handle) {
ServerContext* context = nullptr;
TF_RETURN_IF_ERROR(GetServerContext(context_id, &context));
core::ScopedUnref context_unref(context);
return context->Context()->RemoteMgr()->GetTensorHandle(remote_handle,
handle);
}
};
class FakeEagerClient : public EagerClient {
public:
FakeEagerClient() {}
~FakeEagerClient() override {}
void SetServiceImpl(TestEagerServiceImpl* impl) { impl_ = impl; }
#define CLIENT_METHOD(method) \
void method##Async(const method##Request* request, \
method##Response* response, StatusCallback done) \
override { \
done(impl_->method(request, response)); \
}
CLIENT_METHOD(CreateContext);
CLIENT_METHOD(UpdateContext);
CLIENT_METHOD(WaitQueueDone);
CLIENT_METHOD(KeepAlive);
CLIENT_METHOD(CloseContext);
#undef CLIENT_METHOD
#define CLIENT_METHOD_WITH_TIMEOUT_AND_RETRIES(method) \
void method##Async(const method##Request* request, \
method##Response* response, StatusCallback done, \
int64_t init_timeout_in_ms, int retries) override { \
done(impl_->method(request, response)); \
}
CLIENT_METHOD_WITH_TIMEOUT_AND_RETRIES(CreateContext);
#undef CLIENT_METHOD_WITH_TIMEOUT_AND_RETRIES
void EnqueueAsync(CallOptions* call_opts, const EnqueueRequest* request,
EnqueueResponse* response, StatusCallback done) override {
done(impl_->Enqueue(call_opts, request, response));
}
void RunComponentFunctionAsync(CallOptions* call_opts,
const RunComponentFunctionRequest* request,
RunComponentFunctionResponse* response,
StatusCallback done) override {
impl_->RunComponentFunction(call_opts, request, response, std::move(done));
}
void StreamingEnqueueAsync(bool enable_streaming_enqueue,
CallOptions* call_opts,
const EnqueueRequest* request,
EnqueueResponse* response,
StatusCallback done) override {
done(impl_->Enqueue(nullptr, request, response));
}
bool allow_multiple_pending_requests() const override { return false; }
private:
TestEagerServiceImpl* impl_;
};
class DummyEagerClientCache : public EagerClientCache {
public:
DummyEagerClientCache() : client_(new FakeEagerClient) {}
Status GetClient(const string& target,
core::RefCountPtr<EagerClient>* client) override {
client->reset(client_.get());
client_->Ref();
return absl::OkStatus();
}
private:
core::RefCountPtr<EagerClient> client_;
};
class FakeCache : public TestWorkerCache {
Status GetEagerClientCache(
std::unique_ptr<eager::EagerClientCache>* eager_client_cache) override {
*eager_client_cache = std::make_unique<DummyEagerClientCache>();
return absl::OkStatus();
}
void ListWorkers(std::vector<string>* workers) const override {
workers->push_back("/job:localhost/replica:0/task:0");
}
};
class EagerServiceImplTest : public ::testing::Test {
public:
EagerServiceImplTest()
: rendezvous_mgr_(&worker_env_),
session_mgr_(new SessionMgr(
&worker_env_, "/job:localhost/replica:0/task:0/device:CPU:0",
std::unique_ptr<WorkerCacheInterface>(new FakeCache),
[](const ServerDef& server_def,
WorkerCacheInterface** worker_cache) {
*worker_cache = new FakeCache;
return absl::OkStatus();
},
nullptr)) {
worker_env_.env = Env::Default();
worker_env_.rendezvous_mgr = &rendezvous_mgr_;
worker_env_.session_mgr = session_mgr_.get();
device_mgr_ = std::make_unique<StaticDeviceMgr>(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:0"));
worker_env_.device_mgr = device_mgr_.get();
}
protected:
WorkerEnv worker_env_;
tensorflow::RpcRendezvousMgr rendezvous_mgr_;
std::unique_ptr<SessionMgr> session_mgr_;
std::unique_ptr<DynamicDeviceMgr> device_mgr_;
};
void SetTensorProto(TensorProto* tensor_proto) {
int64_t dims[] = {2, 2};
float data[] = {1.0f, 2.0f, 3.0f, 4.0f};
TF_Tensor* t = TF_AllocateTensor(
TF_FLOAT, &dims[0], sizeof(dims) / sizeof(int64_t), sizeof(data));
memcpy(TF_TensorData(t), &data[0], TF_TensorByteSize(t));
tensorflow::Tensor tensor;
TF_ASSERT_OK(tensorflow::TF_TensorToTensor(t, &tensor));
tensor.AsProtoTensorContent(tensor_proto);
TF_DeleteTensor(t);
}
void BuildOperation(
Operation* operation, int64_t id, const string& name,
const std::vector<std::variant<TensorProto, std::pair<int64_t, int32>>>&
inputs,
const std::unordered_map<string, AttrValue>& attrs, const string& device) {
operation->set_id(id);
operation->set_name(name);
operation->set_device(device);
for (const auto& input : inputs) {
if (input.index() == 0) {
*operation->add_op_inputs()->mutable_tensor() =
std::get<TensorProto>(input);
} else {
const auto& tensor_handle_pair =
std::get<std::pair<int64_t, int32>>(input);
auto* input = operation->add_op_inputs()->mutable_remote_handle();
input->set_op_id(tensor_handle_pair.first);
input->set_output_num(tensor_handle_pair.second);
input->set_op_device(device);
input->set_device(device);
}
}
for (const auto& attr_entry : attrs) {
(*operation->mutable_attrs())[attr_entry.first] = attr_entry.second;
}
}
void AddOperationToEnqueueRequest(
int64_t id, const string& name,
const std::vector<std::variant<TensorProto, std::pair<int64_t, int32>>>&
inputs,
const std::unordered_map<string, AttrValue>& attrs, const string& device,
EnqueueRequest* request) {
auto* operation = request->add_queue()->mutable_operation();
BuildOperation(operation, id, name, inputs, attrs, device);
}
void AddOperationToRunComponentFunctionRequest(
int64_t id, const string& name,
const std::vector<std::variant<TensorProto, std::pair<int64_t, int32>>>&
inputs,
const std::unordered_map<string, AttrValue>& attrs, const string& device,
const int output_num, RunComponentFunctionRequest* request) {
auto* operation = request->mutable_operation();
operation->set_is_function(true);
operation->set_is_component_function(true);
request->add_output_num(output_num);
BuildOperation(operation, id, name, inputs, attrs, device);
}
tensorflow::NodeDef MatMulFunctionNodeDef() {
tensorflow::NodeDef def;
CHECK(tensorflow::protobuf::TextFormat::ParseFromString(
" name: 'matmul_func'"
" op: 'MatMulFunction'"
" input: 'a'"
" input: 'a'"
" attr {"
" key: 'T'"
" value {"
" type: DT_FLOAT"
" }"
" }",
&def));
return def;
}
tensorflow::FunctionDef MatMulFunction() {
tensorflow::FunctionDef def;
CHECK(tensorflow::protobuf::TextFormat::ParseFromString(
" signature {"
" name: 'MatMulFunction'"
" input_arg {"
" name: 'a'"
" type: DT_FLOAT"
" }"
" output_arg {"
" name: 'm'"
" type: DT_FLOAT"
" }"
" }"
" node_def {"
" name: 'matmul'"
" op: 'MatMul'"
" input: 'a'"
" input: 'a'"
" attr {"
" key: 'T'"
" value {"
" type: DT_FLOAT"
" }"
" }"
" attr {"
" key: 'transpose_a'"
" value {"
" b: false"
" }"
" }"
" }"
" ret {"
" key: 'm'"
" value: 'matmul:product'"
" }",
&def));
return def;
}
tensorflow::FunctionDef MatMulTransposeFunction() {
tensorflow::FunctionDef def;
CHECK(tensorflow::protobuf::TextFormat::ParseFromString(
" signature {"
" name: 'MatMulFunction'"
" input_arg {"
" name: 'a'"
" type: DT_FLOAT"
" }"
" output_arg {"
" name: 'm'"
" type: DT_FLOAT"
" }"
" }"
" node_def {"
" name: 'matmul'"
" op: 'MatMul'"
" input: 'a'"
" input: 'a'"
" attr {"
" key: 'T'"
" value {"
" type: DT_FLOAT"
" }"
" }"
" attr {"
" key: 'transpose_a'"
" value {"
" b: true"
" }"
" }"
" }"
" ret {"
" key: 'm'"
" value: 'matmul:product'"
" }",
&def));
return def;
}
tensorflow::FunctionDef MatMulNestedFunction() {
tensorflow::FunctionDef def;
CHECK(tensorflow::protobuf::TextFormat::ParseFromString(
" signature {"
" name: 'MatMulNestedFunction'"
" input_arg {"
" name: 'a'"
" type: DT_FLOAT"
" }"
" output_arg {"
" name: 'matmul_nested'"
" type: DT_FLOAT"
" }"
" }"
" node_def {"
" name: 'matmul_nested'"
" op: 'MatMulFunction'"
" input: 'a'"
" attr {"
" key: 'T'"
" value {"
" type: DT_FLOAT"
" }"
" }"
" }"
" ret {"
" key: 'matmul_nested'"
" value: 'matmul_nested:m:0'"
" }",
&def));
return def;
}
tensorflow::FunctionDef SingleRecvNodeFunction() {
tensorflow::FunctionDef def;
CHECK(tensorflow::protobuf::TextFormat::ParseFromString(
" signature {"
" name: 'SingleRecvNodeFunction'"
" input_arg {"
" name: 'a'"
" type: DT_FLOAT"
" }"
" output_arg {"
" name: 'recv_tensor'"
" type: DT_FLOAT"
" }"
" }"
" node_def {"
" name: 'recv_node'"
" op: '_Recv'"
" device: '/job:localhost/replica:0/task:0/device:CPU:0'"
" attr {"
" key: 'client_terminated'"
" value {"
" b: true"
" }"
" }"
" attr {"
" key: 'recv_device'"
" value {"
" s: '/job:localhost/replica:0/task:0/device:CPU:0'"
" }"
" }"
" attr {"
" key: 'send_device'"
" value {"
" s: '/job:localhost/replica:0/task:0/device:CPU:0'"
" }"
" }"
" attr {"
" key: 'send_device_incarnation'"
" value {"
" i: 1"
" }"
" }"
" attr {"
" key: 'tensor_name'"
" value {"
" s: 't0'"
" }"
" }"
" attr {"
" key: 'tensor_type'"
" value {"
" type: DT_FLOAT"
" }"
" }"
" }"
" ret {"
" key: 'recv_tensor'"
" value: 'recv_node:tensor:0'"
" }",
&def));
return def;
}
TEST_F(EagerServiceImplTest, BasicTest) {
TestEagerServiceImpl eager_service_impl(&worker_env_);
uint64 context_id = random::New64();
CreateContextRequest request;
request.mutable_server_def()->set_job_name("localhost");
request.mutable_server_def()->set_task_index(0);
request.set_context_id(context_id);
CreateContextResponse response;
TF_ASSERT_OK(eager_service_impl.CreateContext(&request, &response));
EnqueueRequest remote_enqueue_request;
remote_enqueue_request.set_context_id(context_id);
EnqueueResponse remote_enqueue_response;
std::unordered_map<string, AttrValue> const_attrs;
AttrValue val;
val.set_type(tensorflow::DataType::DT_FLOAT);
const_attrs.insert({"dtype", val});
val.Clear();
SetTensorProto(val.mutable_tensor());
const_attrs.insert({"value", val});
AddOperationToEnqueueRequest(1, "Const", {}, const_attrs,
"/job:localhost/replica:0/task:0/device:CPU:0",
&remote_enqueue_request);
std::unordered_map<string, AttrValue> attrs;
val.Clear();
val.set_type(tensorflow::DataType::DT_FLOAT);
attrs.insert({"T", val});
val.Clear();
val.set_b(false);
attrs.insert({"transpose_a", val});
attrs.insert({"transpose_b", val});
AddOperationToEnqueueRequest(
2, "MatMul", {std::make_pair(1, 0), std::make_pair(1, 0)}, attrs,
"/job:localhost/replica:0/task:0/device:CPU:0", &remote_enqueue_request);
TF_ASSERT_OK(eager_service_impl.Enqueue(nullptr, &remote_enqueue_request,
&remote_enqueue_response));
auto& matmul_result_shape =
remote_enqueue_response.queue_response(1).shape(0);
EXPECT_EQ(matmul_result_shape.dim(0).size(), 2);
EXPECT_EQ(matmul_result_shape.dim(1).size(), 2);
tensorflow::TensorHandle* tensor_handle;
TF_ASSERT_OK(eager_service_impl.GetTensorHandle(
context_id, RemoteTensorHandleInternal(2, 0), &tensor_handle));
const tensorflow::Tensor* t = nullptr;
TF_ASSERT_OK(tensor_handle->Tensor(&t));
auto actual = t->flat<float>();
EXPECT_EQ(4, actual.size());
EXPECT_EQ(7, actual(0));
EXPECT_EQ(10, actual(1));
EXPECT_EQ(15, actual(2));
EXPECT_EQ(22, actual(3));
CloseContextRequest close_context_request;
close_context_request.set_context_id(context_id);
close_context_request.set_context_view_id(0);
CloseContextResponse close_context_response;
TF_ASSERT_OK(eager_service_impl.CloseContext(&close_context_request,
&close_context_response));
}
class EagerServiceImplFunctionTest : public EagerServiceImplTest {
public:
EagerServiceImplFunctionTest() : EagerServiceImplTest() {}
void TestFunction(const RegisterFunctionOp& register_op,
const string& function_name,
const bool local_inputs = false,
const bool test_cancel = false) {
TestEagerServiceImpl eager_service_impl(&worker_env_);
uint64 context_id = random::New64();
CreateContextRequest request;
request.mutable_server_def()->set_job_name("localhost");
request.mutable_server_def()->set_task_index(0);
request.set_context_id(context_id);
CreateContextResponse response;
TF_ASSERT_OK(eager_service_impl.CreateContext(&request, &response));
EnqueueRequest enqueue_request;
enqueue_request.set_context_id(context_id);
*enqueue_request.add_queue()->mutable_register_function() = register_op;
EnqueueResponse enqueue_response;
TF_ASSERT_OK(eager_service_impl.Enqueue(nullptr, &enqueue_request,
&enqueue_response));
EnqueueRequest remote_enqueue_request;
remote_enqueue_request.set_context_id(context_id);
EnqueueResponse remote_enqueue_response;
if (local_inputs) {
TensorProto tensor_proto;
SetTensorProto(&tensor_proto);
AddOperationToEnqueueRequest(
2, function_name, {tensor_proto},
std::unordered_map<string, AttrValue>(),
"/job:localhost/replica:0/task:0/device:CPU:0",
&remote_enqueue_request);
} else {
std::unordered_map<string, AttrValue> const_attrs;
AttrValue val;
val.set_type(tensorflow::DataType::DT_FLOAT);
const_attrs.insert({"dtype", val});
val.Clear();
SetTensorProto(val.mutable_tensor());
const_attrs.insert({"value", val});
AddOperationToEnqueueRequest(
1, "Const", {}, const_attrs,
"/job:localhost/replica:0/task:0/device:CPU:0",
&remote_enqueue_request);
AddOperationToEnqueueRequest(
2, function_name, {std::make_pair(1, 0)},
std::unordered_map<string, AttrValue>(),
"/job:localhost/replica:0/task:0/device:CPU:0",
&remote_enqueue_request);
}
CallOptions call_opts;
Status status;
Notification n;
Env::Default()->SchedClosure([&] {
status = eager_service_impl.Enqueue(&call_opts, &remote_enqueue_request,
&remote_enqueue_response);
n.Notify();
});
if (test_cancel) {
Env::Default()->SleepForMicroseconds(500000);
call_opts.StartCancel();
n.WaitForNotification();
EXPECT_TRUE(absl::IsCancelled(status)) << status.message();
} else {
n.WaitForNotification();
TF_ASSERT_OK(status);
const tensorflow::Tensor* t = nullptr;
tensorflow::TensorHandle* tensor_handle;
TF_ASSERT_OK(eager_service_impl.GetTensorHandle(
context_id, RemoteTensorHandleInternal(2, 0), &tensor_handle));
TF_ASSERT_OK(tensor_handle->Tensor(&t));
auto actual = t->flat<float>();
EXPECT_EQ(4, actual.size());
EXPECT_EQ(7, actual(0));
EXPECT_EQ(10, actual(1));
EXPECT_EQ(15, actual(2));
EXPECT_EQ(22, actual(3));
}
CloseContextRequest close_context_request;
close_context_request.set_context_id(context_id);
close_context_request.set_context_view_id(0);
CloseContextResponse close_context_response;
TF_ASSERT_OK(eager_service_impl.CloseContext(&close_context_request,
&close_context_response));
}
void TestComponentFunction(const RegisterFunctionOp& register_op,
const string& function_name,
const bool test_cancel) {
TestEagerServiceImpl eager_service_impl(&worker_env_);
uint64 context_id = random::New64();
CreateContextRequest request;
request.mutable_server_def()->set_job_name("localhost");
request.mutable_server_def()->set_task_index(0);
request.set_context_id(context_id);
CreateContextResponse response;
TF_ASSERT_OK(eager_service_impl.CreateContext(&request, &response));
EnqueueRequest enqueue_request;
enqueue_request.set_context_id(context_id);
*enqueue_request.add_queue()->mutable_register_function() = register_op;
EnqueueResponse enqueue_response;
TF_ASSERT_OK(eager_service_impl.Enqueue(nullptr, &enqueue_request,
&enqueue_response));
EnqueueRequest remote_enqueue_request;
remote_enqueue_request.set_context_id(context_id);
EnqueueResponse remote_enqueue_response;
std::unordered_map<string, AttrValue> const_attrs;
AttrValue val;
val.set_type(tensorflow::DataType::DT_FLOAT);
const_attrs.insert({"dtype", val});
val.Clear();
SetTensorProto(val.mutable_tensor());
const_attrs.insert({"value", val});
AddOperationToEnqueueRequest(1, "Const", {}, const_attrs,
"/job:localhost/replica:0/task:0/device:CPU:0",
&remote_enqueue_request);
TF_ASSERT_OK(eager_service_impl.Enqueue(nullptr, &remote_enqueue_request,
&remote_enqueue_response));
RunComponentFunctionRequest run_comp_func_request;
run_comp_func_request.set_context_id(context_id);
RunComponentFunctionResponse run_comp_func_response;
const int output_num = 5;
AddOperationToRunComponentFunctionRequest(
2, function_name, {std::make_pair(1, 0)},
std::unordered_map<string, AttrValue>(),
"/job:localhost/replica:0/task:0/device:CPU:0", output_num,
&run_comp_func_request);
CallOptions call_opts;
Notification n;
Status status;
eager_service_impl.RunComponentFunction(&call_opts, &run_comp_func_request,
&run_comp_func_response,
[&status, &n](const Status& s) {
status.Update(s);
n.Notify();
});
if (test_cancel) {
call_opts.StartCancel();
}
n.WaitForNotification();
if (test_cancel) {
EXPECT_TRUE(absl::IsCancelled(status)) << status.message();
} else {
TF_ASSERT_OK(status);
const tensorflow::Tensor* t = nullptr;
tensorflow::TensorHandle* tensor_handle;
TF_ASSERT_OK(eager_service_impl.GetTensorHandle(
context_id, RemoteTensorHandleInternal(2, output_num),
&tensor_handle));
TF_ASSERT_OK(tensor_handle->Tensor(&t));
auto actual = t->flat<float>();
EXPECT_EQ(4, actual.size());
EXPECT_EQ(7, actual(0));
EXPECT_EQ(10, actual(1));
EXPECT_EQ(15, actual(2));
EXPECT_EQ(22, actual(3));
}
CloseContextRequest close_context_request;
close_context_request.set_context_id(context_id);
close_context_request.set_context_view_id(0);
CloseContextResponse close_context_response;
TF_ASSERT_OK(eager_service_impl.CloseContext(&close_context_request,
&close_context_response));
}
};
TEST_F(EagerServiceImplFunctionTest, BasicFunctionTest) {
RegisterFunctionOp register_op;
*register_op.mutable_function_def() = MatMulFunction();
TestFunction(register_op, "MatMulFunction");
}
TEST_F(EagerServiceImplFunctionTest, FunctionWithLocalInputsTest) {
RegisterFunctionOp register_op;
*register_op.mutable_function_def() = MatMulFunction();
TestFunction(register_op, "MatMulFunction", true);
}
TEST_F(EagerServiceImplFunctionTest, NestedFunctionTest) {
RegisterFunctionOp register_op;
*register_op.mutable_function_def() = MatMulNestedFunction();
*register_op.mutable_library()->add_function() = MatMulFunction();
TestFunction(register_op, "MatMulNestedFunction");
}
TEST_F(EagerServiceImplFunctionTest, FunctionCancellationTest) {
RegisterFunctionOp register_op;
*register_op.mutable_function_def() = SingleRecvNodeFunction();
TestFunction(register_op, "SingleRecvNodeFunction", false,
true);
}
TEST_F(EagerServiceImplFunctionTest, ComponentFunctionTest) {
RegisterFunctionOp register_op;
*register_op.mutable_function_def() = MatMulFunction();
register_op.set_is_component_function(true);
TestComponentFunction(register_op, "MatMulFunction", false);
}
TEST_F(EagerServiceImplFunctionTest, ComponentFunctionCancellationTest) {
RegisterFunctionOp register_op;
*register_op.mutable_function_def() = SingleRecvNodeFunction();
register_op.set_is_component_function(true);
TestComponentFunction(register_op, "SingleRecvNodeFunction", true);
}
TEST_F(EagerServiceImplFunctionTest, ComponentNestedFunctionTest) {
RegisterFunctionOp register_op;
*register_op.mutable_function_def() = MatMulNestedFunction();
*register_op.mutable_library()->add_function() = MatMulFunction();
register_op.set_is_component_function(true);
TestComponentFunction(register_op, "MatMulNestedFunction", false);
}
TEST_F(EagerServiceImplFunctionTest, ComponentNestedFunctionWithNameClashTest) {
TestEagerServiceImpl eager_service_impl(&worker_env_);
uint64 context_id = random::New64();
CreateContextRequest request;
request.mutable_server_def()->set_job_name("localhost");
request.mutable_server_def()->set_task_index(0);
request.set_context_id(context_id);
CreateContextResponse response;
TF_ASSERT_OK(eager_service_impl.CreateContext(&request, &response));
{
EnqueueRequest enqueue_request;
enqueue_request.set_context_id(context_id);
RegisterFunctionOp* register_op =
enqueue_request.add_queue()->mutable_register_function();
*register_op->mutable_function_def() = MatMulNestedFunction();
*register_op->mutable_library()->add_function() = MatMulFunction();
register_op->set_is_component_function(true);
EnqueueResponse enqueue_response;
TF_ASSERT_OK(eager_service_impl.Enqueue(nullptr, &enqueue_request,
&enqueue_response));
}
{
EnqueueRequest enqueue_request;
enqueue_request.set_context_id(context_id);
RegisterFunctionOp* register_op =
enqueue_request.add_queue()->mutable_register_function();
*register_op->mutable_function_def() = MatMulNestedFunction();
register_op->mutable_function_def()->mutable_signature()->set_name(
"MatMulNestedTransposeFunction");
*register_op->mutable_library()->add_function() = MatMulTransposeFunction();
register_op->set_is_component_function(true);
EnqueueResponse enqueue_response;
TF_ASSERT_OK(eager_service_impl.Enqueue(nullptr, &enqueue_request,
&enqueue_response));
}
EnqueueRequest remote_enqueue_request;
remote_enqueue_request.set_context_id(context_id);
EnqueueResponse remote_enqueue_response;
std::unordered_map<string, AttrValue> const_attrs;
AttrValue val;
val.set_type(tensorflow::DataType::DT_FLOAT);
const_attrs.insert({"dtype", val});
val.Clear();
SetTensorProto(val.mutable_tensor());
const_attrs.insert({"value", val});
AddOperationToEnqueueRequest(1, "Const", {}, const_attrs,
"/job:localhost/replica:0/task:0/device:CPU:0",
&remote_enqueue_request);
TF_ASSERT_OK(eager_service_impl.Enqueue(nullptr, &remote_enqueue_request,
&remote_enqueue_response));
{
RunComponentFunctionRequest run_comp_func_request;
run_comp_func_request.set_context_id(context_id);
RunComponentFunctionResponse run_comp_func_response;
const int output_num = 5;
AddOperationToRunComponentFunctionRequest(
2, "MatMulNestedFunction", {std::make_pair(1, 0)},
std::unordered_map<string, AttrValue>(),
"/job:localhost/replica:0/task:0/device:CPU:0", output_num,
&run_comp_func_request);
CallOptions call_opts;
Notification n;
Status status;
eager_service_impl.RunComponentFunction(&call_opts, &run_comp_func_request,
&run_comp_func_response,
[&status, &n](const Status& s) {
status.Update(s);
n.Notify();
});
n.WaitForNotification();
TF_ASSERT_OK(status);
const tensorflow::Tensor* t = nullptr;
tensorflow::TensorHandle* tensor_handle;
TF_ASSERT_OK(eager_service_impl.GetTensorHandle(
context_id, RemoteTensorHandleInternal(2, output_num), &tensor_handle));
TF_ASSERT_OK(tensor_handle->Tensor(&t));
auto actual = t->flat<float>();
EXPECT_EQ(4, actual.size());
EXPECT_EQ(7, actual(0));
EXPECT_EQ(10, actual(1));
EXPECT_EQ(15, actual(2));
EXPECT_EQ(22, actual(3));
}
{
RunComponentFunctionRequest run_comp_func_request;
run_comp_func_request.set_context_id(context_id);
RunComponentFunctionResponse run_comp_func_response;
const int output_num = 5;
AddOperationToRunComponentFunctionRequest(
3, "MatMulNestedTransposeFunction", {std::make_pair(1, 0)},
std::unordered_map<string, AttrValue>(),
"/job:localhost/replica:0/task:0/device:CPU:0", output_num,
&run_comp_func_request);
CallOptions call_opts;
Notification n;
Status status;
eager_service_impl.RunComponentFunction(&call_opts, &run_comp_func_request,
&run_comp_func_response,
[&status, &n](const Status& s) {
status.Update(s);
n.Notify();
});
n.WaitForNotification();
TF_ASSERT_OK(status);
const tensorflow::Tensor* t = nullptr;
tensorflow::TensorHandle* tensor_handle;
TF_ASSERT_OK(eager_service_impl.GetTensorHandle(
context_id, RemoteTensorHandleInternal(3, output_num), &tensor_handle));
TF_ASSERT_OK(tensor_handle->Tensor(&t));
auto actual = t->flat<float>();
EXPECT_EQ(4, actual.size());
EXPECT_EQ(10, actual(0));
EXPECT_EQ(14, actual(1));
EXPECT_EQ(14, actual(2));
EXPECT_EQ(20, actual(3));
}
CloseContextRequest close_context_request;
close_context_request.set_context_id(context_id);
close_context_request.set_context_view_id(0);
CloseContextResponse close_contex |
1,298 | cpp | tensorflow/tensorflow | remote_mgr | tensorflow/core/distributed_runtime/eager/remote_mgr.cc | tensorflow/core/distributed_runtime/eager/remote_mgr_test.cc | #ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_EAGER_REMOTE_MGR_H_
#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_EAGER_REMOTE_MGR_H_
#include <unordered_map>
#include <vector>
#include "absl/strings/string_view.h"
#include "tensorflow/core/common_runtime/eager/eager_executor.h"
#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include "tensorflow/core/distributed_runtime/eager/remote_tensor_handle.h"
#include "tensorflow/core/platform/mutex.h"
namespace tensorflow {
namespace eager {
class RemoteMgr {
public:
RemoteMgr(bool is_master, EagerContext* ctx)
: is_master_(is_master), parent_(ctx) {}
~RemoteMgr() {
for (const auto& entry : remote_tensor_handle_map_) {
entry.second->Unref();
}
}
bool IsMaster() { return is_master_; }
void AddOperationOutputs(
const absl::Span<tensorflow::TensorHandle* const> handles,
int64_t operation_id);
void AddOperationOutput(tensorflow::TensorHandle* handles,
int64_t operation_id, int32_t output_num);
Status GetTensorHandle(const RemoteTensorHandleInternal& remote_handle,
tensorflow::TensorHandle** handle);
Status DeleteTensorHandle(const RemoteTensorHandleInternal& remote_handle);
uint64 NextOpId() {
DCHECK(is_master_);
mutex_lock l(next_id_mutex_);
return next_op_id_++;
}
Status SerializeRemoteTensorHandle(
TensorHandle* in, const bool wait_until_ready, RemoteTensorHandle* out,
Device* device, absl::string_view device_name = "",
const bool serialize_resource_dtype_and_shape = false);
Status DeserializeRemoteTensorHandle(const RemoteTensorHandle& in,
TensorHandle** out);
EagerExecutor& GetOrCreateExecutorForStream(uint64 stream_id);
void DeleteExecutorForStream(uint64 stream_id);
protected:
mutex next_id_mutex_;
uint64 next_op_id_ TF_GUARDED_BY(next_id_mutex_) = 1;
private:
Status GetRemoteTensorHandle(const tensorflow::TensorHandle* handle,
const bool wait_until_ready, int64_t* op_id,
int32* output_num)
TF_SHARED_LOCKS_REQUIRED(remote_tensor_handle_mu_);
Status GetTensorHandleImpl(const RemoteTensorHandleInternal& remote_handle,
tensorflow::TensorHandle** handle)
TF_SHARED_LOCKS_REQUIRED(remote_tensor_handle_mu_);
Status GetMirroredResourceShape(
const RemoteTensorHandleInternal& remote_handle,
std::vector<DtypeAndPartialTensorShape>* handle);
bool is_master_;
using RemoteTensorHandleMap =
gtl::FlatMap<RemoteTensorHandleInternal, tensorflow::TensorHandle*,
RemoteTensorHandleInternalHash,
RemoteTensorHandleInternalEquals>;
using MirroredResourceShapeMap = gtl::FlatMap<
RemoteTensorHandleInternal, std::vector<DtypeAndPartialTensorShape>,
RemoteTensorHandleInternalHash, RemoteTensorHandleInternalEquals>;
mutex remote_tensor_handle_mu_;
RemoteTensorHandleMap remote_tensor_handle_map_
TF_GUARDED_BY(remote_tensor_handle_mu_);
mutex mirrored_resource_shape_mu_;
MirroredResourceShapeMap mirrored_resource_shape_map_
TF_GUARDED_BY(mirrored_resource_shape_mu_);
EagerContext* parent_;
mutex executor_map_mu_;
std::unordered_map<uint64, EagerExecutor> executor_map_
TF_GUARDED_BY(executor_map_mu_);
};
}
}
#endif
#include "tensorflow/core/distributed_runtime/eager/remote_mgr.h"
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/distributed_runtime/eager/remote_tensor_handle.h"
#include "tensorflow/core/platform/error_payloads.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
namespace {
Status WithErrorSourcePayload(Status error) {
core::platform::ErrorSourceProto error_source_proto;
error_source_proto.set_error_source(
core::platform::ErrorSourceProto::EAGER_REMOTE_MGR);
error.SetPayload(tensorflow::kErrorSource,
absl::Cord(error_source_proto.SerializeAsString()));
return error;
}
}
namespace eager {
void RemoteMgr::AddOperationOutputs(
const absl::Span<tensorflow::TensorHandle* const> handles,
int64_t operation_id) {
mutex_lock l(remote_tensor_handle_mu_);
for (int i = 0, end = handles.size(); i < end; i++) {
remote_tensor_handle_map_.emplace(
RemoteTensorHandleInternal(operation_id, i), handles[i]);
}
}
void RemoteMgr::AddOperationOutput(tensorflow::TensorHandle* handle,
int64_t operation_id, int32_t output_num) {
mutex_lock l(remote_tensor_handle_mu_);
remote_tensor_handle_map_.emplace(
RemoteTensorHandleInternal(operation_id, output_num), handle);
}
Status RemoteMgr::GetTensorHandleImpl(
const RemoteTensorHandleInternal& remote_handle,
tensorflow::TensorHandle** handle) {
auto iter = remote_tensor_handle_map_.find(remote_handle);
if (iter == remote_tensor_handle_map_.end()) {
std::string error_message = absl::StrCat(
"Unable to find the relevant tensor remote_handle: Op ID: ",
remote_handle.op_id, ", Output num: ", remote_handle.output_num,
". One possible cause is that the tensor was accessed after "
"deallocation in a distributed worker setup.");
bool result;
TF_CHECK_OK(ReadBoolFromEnvVar("TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE",
true, &result));
if (result) {
std::string error_message_ext;
absl::StrAppend(
&error_message_ext, error_message,
"Try setting "
"`os.environ['TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE']='False'` in "
"your client to disable async streaming behavior to see if it fixes "
"the problem.");
return WithErrorSourcePayload(
absl::InvalidArgumentError(error_message_ext));
}
return WithErrorSourcePayload(absl::InvalidArgumentError(error_message));
}
*handle = iter->second;
return absl::OkStatus();
}
Status RemoteMgr::GetTensorHandle(
const RemoteTensorHandleInternal& remote_handle,
tensorflow::TensorHandle** handle) {
tf_shared_lock l(remote_tensor_handle_mu_);
return GetTensorHandleImpl(remote_handle, handle);
}
Status RemoteMgr::GetMirroredResourceShape(
const RemoteTensorHandleInternal& remote_handle,
std::vector<DtypeAndPartialTensorShape>* handle) {
tf_shared_lock l(mirrored_resource_shape_mu_);
auto iter = mirrored_resource_shape_map_.find(remote_handle);
if (iter == mirrored_resource_shape_map_.end()) {
return WithErrorSourcePayload(errors::InvalidArgument(
"Unable to find the relevant tensor remote_handle: Op ID: ",
remote_handle.op_id, ", Output num: ", remote_handle.output_num,
". One possible cause is that the tensor was accessed after "
"deallocation in a distributed worker setup. Try setting "
"`os.environ['TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE']='False'` in "
"your client to disable async streaming behavior to see if it fixes "
"the problem."));
}
*handle = iter->second;
return absl::OkStatus();
}
Status RemoteMgr::GetRemoteTensorHandle(const tensorflow::TensorHandle* handle,
const bool wait_until_ready,
int64_t* op_id, int32* output_num) {
TF_RETURN_IF_ERROR(handle->RemoteAddress(handle->device(), wait_until_ready,
op_id, output_num));
tensorflow::TensorHandle* h;
TF_RETURN_IF_ERROR(
GetTensorHandleImpl(RemoteTensorHandleInternal(*op_id, *output_num), &h));
if (handle != h) {
return WithErrorSourcePayload(errors::Internal(
"Found two different tensor handles with the same op_id:", *op_id,
" and output_num:", *output_num));
}
return absl::OkStatus();
}
Status RemoteMgr::DeleteTensorHandle(
const RemoteTensorHandleInternal& remote_handle) {
{
mutex_lock l(remote_tensor_handle_mu_);
auto iter = remote_tensor_handle_map_.find(remote_handle);
if (iter != remote_tensor_handle_map_.end()) {
iter->second->Unref();
remote_tensor_handle_map_.erase(iter);
return absl::OkStatus();
}
}
{
mutex_lock l(mirrored_resource_shape_mu_);
auto iter = mirrored_resource_shape_map_.find(remote_handle);
if (iter != mirrored_resource_shape_map_.end()) {
mirrored_resource_shape_map_.erase(iter);
return absl::OkStatus();
}
}
return WithErrorSourcePayload(errors::InvalidArgument(
"Unable to find the relevant tensor remote_handle: Op ID: ",
remote_handle.op_id, ", Output num: ", remote_handle.output_num));
}
Status RemoteMgr::SerializeRemoteTensorHandle(
TensorHandle* in, const bool wait_until_ready, RemoteTensorHandle* out,
Device* device, absl::string_view device_name,
const bool serialize_resource_dtype_and_shape) {
int64_t op_id;
int32_t output_num;
auto status =
in->RemoteAddress(device, wait_until_ready, &op_id, &output_num);
if (!status.ok()) {
LOG(ERROR)
<< "Failed to get remote address for tensor handle with given device "
<< device->name() << " error " << status.message();
tf_shared_lock l(remote_tensor_handle_mu_);
TF_RETURN_IF_ERROR(
GetRemoteTensorHandle(in, wait_until_ready, &op_id, &output_num));
}
out->Clear();
out->set_op_id(op_id);
out->set_output_num(output_num);
out->set_op_device(in->op_device() ? in->op_device()->name() : "");
out->set_device(device_name.empty()
? std::string(in->DeviceOrHostCPU(*parent_)->name())
: std::string(device_name));
out->set_dtype(in->dtype);
if (serialize_resource_dtype_and_shape) {
std::vector<DtypeAndPartialTensorShape> resource_dtypes_and_shapes;
TF_RETURN_IF_ERROR(
in->GetResourceHandleDtypesAndShapes(&resource_dtypes_and_shapes));
for (const auto& dtype_and_shape : resource_dtypes_and_shapes) {
ResourceDtypeAndShape* dtype_and_shape_proto =
out->add_resource_dtypes_and_shapes();
dtype_and_shape_proto->set_dtype(dtype_and_shape.dtype);
dtype_and_shape.shape.AsProto(dtype_and_shape_proto->mutable_shape());
}
}
return absl::OkStatus();
}
Status RemoteMgr::DeserializeRemoteTensorHandle(const RemoteTensorHandle& in,
TensorHandle** out) {
Device* device;
if (parent_->local_device_mgr()->LookupDevice(in.op_device(), &device).ok() ||
parent_->local_device_mgr()->LookupDevice(in.device(), &device).ok()) {
TF_RETURN_IF_ERROR(GetTensorHandle(RemoteTensorHandleInternal(in), out));
(*out)->Ref();
} else {
const string& device_name =
in.op_device().empty() ? in.device() : in.op_device();
TF_RETURN_IF_ERROR(
parent_->FindDeviceFromName(device_name.c_str(), &device));
*out = TensorHandle::CreateLazyRemoteHandle(in.op_id(), in.output_num(),
in.dtype(), device,
true, parent_);
std::vector<DtypeAndPartialTensorShape> dtypes_and_shapes;
if (!GetMirroredResourceShape(RemoteTensorHandleInternal(in),
&dtypes_and_shapes)
.ok()) {
for (const auto& dtype_and_shape_proto :
in.resource_dtypes_and_shapes()) {
dtypes_and_shapes.push_back(DtypeAndPartialTensorShape{
dtype_and_shape_proto.dtype(),
TensorShape(dtype_and_shape_proto.shape())});
}
mutex_lock l(mirrored_resource_shape_mu_);
mirrored_resource_shape_map_.emplace(
RemoteTensorHandleInternal(in.op_id(), in.output_num()),
dtypes_and_shapes);
}
(*out)->SetResourceHandleDtypeAndShape(std::move(dtypes_and_shapes));
}
return absl::OkStatus();
}
EagerExecutor& RemoteMgr::GetOrCreateExecutorForStream(uint64 stream_id) {
mutex_lock l(executor_map_mu_);
auto it = executor_map_.find(stream_id);
if (it == executor_map_.end()) {
auto it_and_bool = executor_map_.emplace(
std::piecewise_construct, std::forward_as_tuple(stream_id),
std::forward_as_tuple(true));
DCHECK(it_and_bool.second);
it = it_and_bool.first;
}
return it->second;
}
void RemoteMgr::DeleteExecutorForStream(uint64 stream_id) {
mutex_lock l(executor_map_mu_);
auto it = executor_map_.find(stream_id);
if (it == executor_map_.end()) {
return;
}
Status s = it->second.ShutDown();
if (!s.ok()) {
LOG(ERROR) << "EagerExecutor shutdown with error " << s.message();
}
executor_map_.erase(it);
}
}
} | #include "tensorflow/core/distributed_runtime/eager/remote_mgr.h"
#include <memory>
#include <utility>
#include <vector>
#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/error_payloads.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/remote_tensor_handle.pb.h"
namespace tensorflow {
namespace eager {
namespace {
class TestRemoteMgr : public RemoteMgr {
public:
TestRemoteMgr(bool is_master, EagerContext* ctx)
: RemoteMgr(is_master, ctx) {}
uint64 OpId() {
tf_shared_lock l(next_id_mutex_);
return next_op_id_;
}
};
class RemoteMgrTest : public ::testing::Test {
public:
RemoteMgrTest() {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:0"));
local_device_ = devices.back().get();
devices.push_back(
DeviceFactory::NewDevice("CPU", {}, "/job:worker/replica:0/task:0"));
remote_device_ = devices.back().get();
auto device_mgr = std::make_unique<StaticDeviceMgr>(std::move(devices));
auto rendezvous = tsl::core::RefCountPtr<tensorflow::Rendezvous>(
new tensorflow::IntraProcessRendezvous(device_mgr.get()));
ctx_ = new tensorflow::EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, device_mgr.release(), true, std::move(rendezvous),
nullptr, nullptr, true);
}
~RemoteMgrTest() override { ctx_->Unref(); }
Device* local_device_;
Device* remote_device_;
EagerContext* ctx_;
};
TEST_F(RemoteMgrTest, SerializeLocalTensorHandleWithRemoteMirror) {
RemoteMgr remote_mgr(false, ctx_);
const TensorShape shape({0});
Tensor t(DT_FLOAT, shape);
TensorHandle* handle = TensorHandle::CreateLocalHandle(
std::move(t), local_device_, local_device_, ctx_);
const uint64 op_id = 2;
const int output_num = 3;
TF_ASSERT_OK(handle->AddUnshapedRemoteMirror(remote_device_, op_id,
output_num, "", ctx_));
TF_ASSERT_OK(
handle->SetRemoteShape(shape, remote_device_, ctx_->GetContextViewId()));
RemoteTensorHandle remote_handle;
TF_ASSERT_OK(remote_mgr.SerializeRemoteTensorHandle(
handle, true, &remote_handle, remote_device_,
remote_device_->name()));
EXPECT_EQ(op_id, remote_handle.op_id());
EXPECT_EQ(output_num, remote_handle.output_num());
EXPECT_EQ(remote_device_->name(), remote_handle.device());
handle->Unref();
}
TEST_F(RemoteMgrTest, SerializeRemoteTensorHandle) {
RemoteMgr remote_mgr(false, ctx_);
const uint64 op_id = 3;
const int output_num = 1;
TensorHandle* handle = TensorHandle::CreateLazyRemoteHandle(
op_id, output_num, DT_FLOAT, remote_device_, true, ctx_);
RemoteTensorHandle remote_handle;
TF_ASSERT_OK(remote_mgr.SerializeRemoteTensorHandle(
handle, true, &remote_handle, remote_device_));
EXPECT_EQ(op_id, remote_handle.op_id());
EXPECT_EQ(output_num, remote_handle.output_num());
EXPECT_EQ(remote_device_->name(), remote_handle.device());
handle->Unref();
}
TEST_F(RemoteMgrTest, InvalidateRemoteMirrorWithClusterUpdate) {
RemoteMgr remote_mgr(false, ctx_);
Tensor t(DT_FLOAT, TensorShape({0}));
TensorHandle* handle = TensorHandle::CreateLocalHandle(
std::move(t), local_device_, local_device_, ctx_);
const uint64 op_id = 2;
const int output_num = 3;
TF_ASSERT_OK(handle->AddUnshapedRemoteMirror(remote_device_, op_id,
output_num, "", ctx_));
EXPECT_TRUE(
handle->HasRemoteMirror(remote_device_, ctx_->GetContextViewId()));
ctx_->IncrementContextViewId();
EXPECT_FALSE(
handle->HasRemoteMirror(remote_device_, ctx_->GetContextViewId()));
EXPECT_FALSE(handle
->SetRemoteShape(TensorShape({0}), remote_device_,
ctx_->GetContextViewId())
.ok());
handle->Unref();
}
TEST_F(RemoteMgrTest, SetRemoteShapeWithClusterUpdate) {
RemoteMgr remote_mgr(false, ctx_);
const uint64 op_id = 3;
const int output_num = 1;
TensorHandle* handle = TensorHandle::CreateUnshapedRemoteHandle(
op_id, output_num,
"", DT_FLOAT, remote_device_, ctx_);
TF_ASSERT_OK(handle->SetRemoteShape(TensorShape({0}), remote_device_,
ctx_->GetContextViewId()));
handle->Unref();
handle = TensorHandle::CreateUnshapedRemoteHandle(
op_id, output_num,
"", DT_FLOAT, remote_device_, ctx_);
ctx_->IncrementContextViewId();
TF_ASSERT_OK(handle->SetRemoteShape(TensorShape({0}), remote_device_,
ctx_->GetContextViewId()));
handle->Unref();
}
TEST_F(RemoteMgrTest, ErrorSourcesShouldExist) {
RemoteMgr remote_mgr(false, ctx_);
const uint64 op_id = 3;
const int output_num = 1;
TensorHandle* handle = TensorHandle::CreateLazyRemoteHandle(
op_id, output_num, DT_FLOAT, remote_device_, true, ctx_);
RemoteTensorHandle remote_handle;
remote_mgr.AddOperationOutput(handle, op_id, output_num);
TF_ASSERT_OK(remote_mgr.SerializeRemoteTensorHandle(
handle, true, &remote_handle, remote_device_));
auto remote_handle_internal = RemoteTensorHandleInternal(remote_handle);
TF_ASSERT_OK(remote_mgr.DeleteTensorHandle(remote_handle_internal));
Status s = remote_mgr.DeleteTensorHandle(remote_handle_internal);
EXPECT_FALSE(s.ok());
EXPECT_TRUE(s.GetPayload(kErrorSource).has_value());
TensorHandle* out;
s = remote_mgr.GetTensorHandle(remote_handle_internal, &out);
EXPECT_FALSE(s.ok());
EXPECT_TRUE(s.GetPayload(kErrorSource).has_value());
s = remote_mgr.DeserializeRemoteTensorHandle(remote_handle, &out);
EXPECT_FALSE(s.ok());
EXPECT_TRUE(s.GetPayload(kErrorSource).has_value());
}
}
}
} |
1,299 | cpp | tensorflow/tensorflow | grpc_session | tensorflow/core/distributed_runtime/rpc/grpc_session.cc | tensorflow/core/distributed_runtime/rpc/grpc_session_test.cc | #ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_GRPC_SESSION_H_
#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_GRPC_SESSION_H_
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/distributed_runtime/call_options.h"
#include "tensorflow/core/distributed_runtime/message_wrappers.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_channel.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/master.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
class MasterInterface;
class GrpcSession : public Session {
protected:
explicit GrpcSession(const SessionOptions& options);
public:
static Status Create(const SessionOptions& options,
std::unique_ptr<GrpcSession>* out_session);
static Status Reset(const SessionOptions& options,
const std::vector<string>& containers);
~GrpcSession() override;
Status Create(const GraphDef& graph) override;
Status Create(const RunOptions& run_options, const GraphDef& graph) override;
Status Create(GraphDef&& graph) override;
Status Create(const RunOptions& run_options, GraphDef&& graph) override;
Status Run(const std::vector<std::pair<string, Tensor> >& inputs,
const std::vector<string>& output_tensor_names,
const std::vector<string>& target_node_names,
std::vector<Tensor>* outputs) override;
Status Run(const RunOptions& run_options,
const std::vector<std::pair<string, Tensor> >& inputs,
const std::vector<string>& output_tensor_names,
const std::vector<string>& target_node_names,
std::vector<Tensor>* outputs, RunMetadata* run_metadata) override;
Status Extend(const GraphDef& graph) override;
Status Extend(const RunOptions& run_options, const GraphDef& graph) override;
Status Extend(GraphDef&& graph) override;
Status Extend(const RunOptions& run_options, GraphDef&& graph) override;
Status Close() override;
Status PRunSetup(const std::vector<string>& input_names,
const std::vector<string>& output_names,
const std::vector<string>& target_nodes,
string* handle) override;
Status PRun(const string& handle,
const std::vector<std::pair<string, Tensor> >& inputs,
const std::vector<string>& output_names,
std::vector<Tensor>* outputs) override;
Status ListDevices(std::vector<DeviceAttributes>* response) override;
Status MakeCallable(const CallableOptions& callable_options,
CallableHandle* out_handle) override;
Status RunCallable(CallableHandle handle,
const std::vector<Tensor>& feed_tensors,
std::vector<Tensor>* fetch_tensors,
RunMetadata* run_metadata) override;
Status ReleaseCallable(CallableHandle handle) override;
protected:
void SetRemoteMaster(std::unique_ptr<MasterInterface> master);
void SetHandleAndGraphVersion(string handle, int64_t graph_version)
TF_LOCKS_EXCLUDED(mu_);
private:
const SessionOptions options_;
std::unique_ptr<MasterInterface> master_;
mutex mu_;
string handle_ TF_GUARDED_BY(mu_);
int64_t current_graph_version_ TF_GUARDED_BY(mu_);
bool is_local_ = false;
Status Handle(string* out_handle) TF_LOCKS_EXCLUDED(mu_);
Status RunHelper(const RunOptions& run_options,
const std::vector<std::pair<string, Tensor> >& inputs,
const std::vector<string>& output_tensor_names,
const std::vector<string>& target_node_names,
std::vector<Tensor>* outputs, RunMetadata* run_metadata,
const string& prun_handle);
Status RunProto(CallOptions* call_options, MutableRunStepRequestWrapper* req,
MutableRunStepResponseWrapper* resp);
Status CreateImpl(CallOptions* call_options, GraphDef graph);
Status ExtendImpl(CallOptions* call_options, GraphDef graph);
GrpcSession(const GrpcSession&) = delete;
void operator=(const GrpcSession&) = delete;
};
}
#endif
#include "tensorflow/core/distributed_runtime/rpc/grpc_session.h"
#include <unordered_map>
#include "tensorflow/core/common_runtime/session_factory.h"
#include "tensorflow/core/distributed_runtime/call_options.h"
#include "tensorflow/core/distributed_runtime/local_master.h"
#include "tensorflow/core/distributed_runtime/master_interface.h"
#include "tensorflow/core/distributed_runtime/request_id.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_channel.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_remote_master.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/protobuf/master.pb.h"
namespace tensorflow {
const char* const kSchemePrefix = "grpc:
const size_t kSchemePrefixLength = strlen(kSchemePrefix);
GrpcSession::GrpcSession(const SessionOptions& options)
: options_(options), current_graph_version_(-1) {}
GrpcSession::~GrpcSession() {}
Status GrpcSession::Create(const SessionOptions& options,
std::unique_ptr<GrpcSession>* out_session) {
std::unique_ptr<GrpcSession> session(new GrpcSession(options));
std::unique_ptr<MasterInterface> master;
if (!options.config.rpc_options().use_rpc_for_inprocess_master()) {
master = LocalMaster::Lookup(options.target);
}
if (!master) {
SharedGrpcChannelPtr master_channel;
TF_RETURN_IF_ERROR(
NewHostPortGrpcChannel(options.target.substr(kSchemePrefixLength),
&options.config.rpc_options(), &master_channel));
master.reset(NewGrpcMaster(master_channel));
} else {
session->is_local_ = true;
}
session->SetRemoteMaster(std::move(master));
*out_session = std::move(session);
return absl::OkStatus();
}
namespace {
void ReEncodeConsts(GraphDef* gdef) {
for (NodeDef& ndef : *(gdef->mutable_node())) {
if (ndef.op() == "Const") {
TensorProto* proto = nullptr;
for (auto& attr : *ndef.mutable_attr()) {
if (attr.first == "value") {
proto = attr.second.mutable_tensor();
}
}
if (proto != nullptr && proto->tensor_content().empty() &&
proto->ByteSizeLong() > 64) {
Tensor parsed(proto->dtype());
if (parsed.FromProto(*proto)) {
parsed.AsProtoTensorContent(proto);
}
}
}
}
}
}
void GrpcSession::SetHandleAndGraphVersion(string handle,
int64_t graph_version) {
mutex_lock l(mu_);
handle_ = std::move(handle);
current_graph_version_ = graph_version;
}
Status GrpcSession::Handle(string* out_handle) {
mutex_lock l(mu_);
if (handle_.empty()) {
return errors::InvalidArgument("A session is not created yet....");
}
*out_handle = handle_;
return absl::OkStatus();
}
Status GrpcSession::CreateImpl(CallOptions* call_options, GraphDef graph) {
{
mutex_lock l(mu_);
if (!handle_.empty()) {
return errors::InvalidArgument("A session is alive.");
}
}
CreateSessionRequest req;
*req.mutable_config() = options_.config;
req.mutable_graph_def()->Swap(&graph);
req.set_target(options_.target);
ReEncodeConsts(req.mutable_graph_def());
CreateSessionResponse resp;
Status s = master_->CreateSession(call_options, &req, &resp);
if (s.ok()) {
SetHandleAndGraphVersion(resp.session_handle(), resp.graph_version());
}
return s;
}
Status GrpcSession::Create(const GraphDef& graph) {
return Create(GraphDef(graph));
}
Status GrpcSession::Create(const RunOptions& run_options,
const GraphDef& graph) {
return Create(run_options, GraphDef(graph));
}
Status GrpcSession::Create(GraphDef&& graph) {
CallOptions call_options;
call_options.SetTimeout(options_.config.operation_timeout_in_ms());
return CreateImpl(&call_options, std::move(graph));
}
Status GrpcSession::Create(const RunOptions& run_options, GraphDef&& graph) {
CallOptions call_options;
call_options.SetTimeout(run_options.timeout_in_ms());
return CreateImpl(&call_options, std::move(graph));
}
Status GrpcSession::ExtendImpl(CallOptions* call_options, GraphDef graph) {
bool handle_is_empty;
{
mutex_lock l(mu_);
handle_is_empty = handle_.empty();
}
if (handle_is_empty) {
return Create(std::move(graph));
}
mutex_lock l(mu_);
ExtendSessionRequest req;
req.set_session_handle(handle_);
req.mutable_graph_def()->Swap(&graph);
req.set_current_graph_version(current_graph_version_);
ExtendSessionResponse resp;
Status s = master_->ExtendSession(call_options, &req, &resp);
if (s.ok()) {
current_graph_version_ = resp.new_graph_version();
}
return s;
}
Status GrpcSession::Extend(const GraphDef& graph) {
return Extend(GraphDef(graph));
}
Status GrpcSession::Extend(const RunOptions& run_options,
const GraphDef& graph) {
return Extend(run_options, GraphDef(graph));
}
Status GrpcSession::Extend(GraphDef&& graph) {
CallOptions call_options;
call_options.SetTimeout(options_.config.operation_timeout_in_ms());
return ExtendImpl(&call_options, std::move(graph));
}
Status GrpcSession::Extend(const RunOptions& run_options, GraphDef&& graph) {
CallOptions call_options;
call_options.SetTimeout(run_options.timeout_in_ms());
return ExtendImpl(&call_options, std::move(graph));
}
Status GrpcSession::RunHelper(
const RunOptions& run_options,
const std::vector<std::pair<string, Tensor>>& inputs,
const std::vector<string>& output_tensor_names,
const std::vector<string>& target_node_names, std::vector<Tensor>* outputs,
RunMetadata* run_metadata, const string& prun_handle) {
std::unique_ptr<MutableRunStepRequestWrapper> req(
master_->CreateRunStepRequest());
std::unique_ptr<MutableRunStepResponseWrapper> resp(
master_->CreateRunStepResponse());
*req->mutable_options() = run_options;
if (run_options.timeout_in_ms() == 0) {
req->mutable_options()->set_timeout_in_ms(
options_.config.operation_timeout_in_ms());
}
if (!prun_handle.empty()) {
req->set_partial_run_handle(prun_handle);
}
for (const auto& it : inputs) {
req->add_feed(it.first, it.second);
}
req->set_store_errors_in_response_body(true);
std::unordered_map<string, int> output_name_to_offset;
for (int i = 0, end = output_tensor_names.size(); i < end; ++i) {
const string& name = output_tensor_names[i];
if (output_name_to_offset.insert(std::make_pair(name, i)).second) {
req->add_fetch(name);
}
}
for (const string& target : target_node_names) {
req->add_target(target);
}
CallOptions call_options;
call_options.SetTimeout(req->options().timeout_in_ms());
TF_RETURN_IF_ERROR(RunProto(&call_options, req.get(), resp.get()));
if (resp->status_code() != absl::StatusCode::kOk) {
return resp->status();
}
if (!output_tensor_names.empty()) {
outputs->resize(output_tensor_names.size());
}
for (size_t i = 0; i < resp->num_tensors(); ++i) {
auto fetch_it = output_name_to_offset.find(resp->tensor_name(i));
if (fetch_it == output_name_to_offset.end()) {
return errors::Internal("Received response for unrequested fetch: ",
resp->tensor_name(i));
}
Tensor output;
TF_RETURN_IF_ERROR(resp->TensorValue(i, &output));
(*outputs)[fetch_it->second] = output;
}
if (output_name_to_offset.size() != output_tensor_names.size()) {
for (int i = 0, end = output_tensor_names.size(); i < end; ++i) {
const string& name = output_tensor_names[i];
int offset = output_name_to_offset[name];
if (offset != i) {
(*outputs)[i] = (*outputs)[offset];
}
}
}
if (run_metadata) {
run_metadata->Swap(resp->mutable_metadata());
}
return absl::OkStatus();
}
Status GrpcSession::Run(const RunOptions& run_options,
const std::vector<std::pair<string, Tensor>>& inputs,
const std::vector<string>& output_tensor_names,
const std::vector<string>& target_node_names,
std::vector<Tensor>* outputs,
RunMetadata* run_metadata) {
return RunHelper(run_options, inputs, output_tensor_names, target_node_names,
outputs, run_metadata, "");
}
Status GrpcSession::Run(const std::vector<std::pair<string, Tensor>>& inputs,
const std::vector<string>& output_tensor_names,
const std::vector<string>& target_node_names,
std::vector<Tensor>* outputs) {
RunOptions run_options;
run_options.set_timeout_in_ms(options_.config.operation_timeout_in_ms());
return Run(run_options, inputs, output_tensor_names, target_node_names,
outputs, nullptr);
}
Status GrpcSession::RunProto(CallOptions* call_options,
MutableRunStepRequestWrapper* req,
MutableRunStepResponseWrapper* resp) {
string handle;
TF_RETURN_IF_ERROR(Handle(&handle));
req->set_session_handle(handle);
return master_->RunStep(call_options, req, resp);
}
Status GrpcSession::PRunSetup(const std::vector<string>& input_names,
const std::vector<string>& output_names,
const std::vector<string>& target_nodes,
string* handle) {
PartialRunSetupRequest req;
PartialRunSetupResponse resp;
CallOptions call_options;
TF_RETURN_IF_ERROR(Handle(req.mutable_session_handle()));
for (const string& feed : input_names) {
req.add_feed(feed);
}
for (const string& fetch : output_names) {
req.add_fetch(fetch);
}
for (const string& target : target_nodes) {
req.add_target(target);
}
if (!is_local_) req.set_request_id(GetUniqueRequestId());
call_options.SetTimeout(options_.config.operation_timeout_in_ms());
TF_RETURN_IF_ERROR(master_->PartialRunSetup(&call_options, &req, &resp));
*handle = resp.partial_run_handle();
return absl::OkStatus();
}
Status GrpcSession::PRun(const string& handle,
const std::vector<std::pair<string, Tensor>>& inputs,
const std::vector<string>& output_names,
std::vector<Tensor>* outputs) {
RunOptions run_options;
run_options.set_timeout_in_ms(options_.config.operation_timeout_in_ms());
return RunHelper(run_options, inputs, output_names, {}, outputs,
nullptr, handle);
}
Status GrpcSession::Close() {
CloseSessionRequest req;
{
mutex_lock l(mu_);
if (handle_.empty()) {
return absl::OkStatus();
}
req.set_session_handle(handle_);
handle_.clear();
}
CloseSessionResponse resp;
CallOptions call_options;
call_options.SetTimeout(options_.config.operation_timeout_in_ms());
return master_->CloseSession(&call_options, &req, &resp);
}
Status GrpcSession::ListDevices(std::vector<DeviceAttributes>* response) {
ListDevicesRequest req;
{
mutex_lock l(mu_);
req.set_session_handle(handle_);
}
if (req.session_handle().empty()) {
LOG(WARNING) << "GrpcSession::ListDevices will initialize the session with "
"an empty graph and other defaults because the session has "
"not yet been created.";
GraphDef graph_def;
TF_RETURN_IF_ERROR(Create(graph_def));
{
mutex_lock l(mu_);
req.set_session_handle(handle_);
}
}
ListDevicesResponse resp;
CallOptions call_options;
call_options.SetTimeout(options_.config.operation_timeout_in_ms());
Status s = master_->ListDevices(&call_options, &req, &resp);
if (!s.ok()) {
LOG(ERROR) << "Could not list devices: " << s;
return s;
}
response->clear();
response->reserve(resp.local_device_size() + resp.remote_device_size());
for (const auto& device_attr : resp.local_device()) {
response->emplace_back(device_attr);
}
for (const auto& device_attr : resp.remote_device()) {
response->emplace_back(device_attr);
}
return absl::OkStatus();
}
void GrpcSession::SetRemoteMaster(std::unique_ptr<MasterInterface> master) {
master_ = std::move(master);
}
Status GrpcSession::Reset(const SessionOptions& options,
const std::vector<string>& containers) {
SharedGrpcChannelPtr master_channel;
TF_RETURN_IF_ERROR(
NewHostPortGrpcChannel(options.target.substr(kSchemePrefixLength),
nullptr, &master_channel));
auto master = NewGrpcMaster(master_channel);
ResetRequest req;
req.mutable_container()->Reserve(containers.size());
for (const auto& c : containers) req.add_container(c);
ResetResponse resp;
CallOptions call_options;
call_options.SetTimeout(options.config.operation_timeout_in_ms());
Status ret = master->Reset(&call_options, &req, &resp);
delete master;
return ret;
}
Status GrpcSession::MakeCallable(const CallableOptions& callable_options,
CallableHandle* out_handle) {
MakeCallableRequest req;
TF_RETURN_IF_ERROR(Handle(req.mutable_session_handle()));
*req.mutable_options() = callable_options;
if (!is_local_) req.set_request_id(GetUniqueRequestId());
MakeCallableResponse resp;
CallOptions call_options;
call_options.SetTimeout(options_.config.operation_timeout_in_ms());
TF_RETURN_IF_ERROR(master_->MakeCallable(&call_options, &req, &resp));
*out_handle = resp.handle();
return absl::OkStatus();
}
Status GrpcSession::RunCallable(CallableHandle handle,
const std::vector<Tensor>& feed_tensors,
std::vector<Tensor>* fetch_tensors,
RunMetadata* run_metadata) {
RunCallableRequest req;
TF_RETURN_IF_ERROR(Handle(req.mutable_session_handle()));
req.set_handle(handle);
if (!is_local_) req.set_request_id(GetUniqueRequestId());
for (const Tensor& feed : feed_tensors) {
feed.AsProtoTensorContent(req.mutable_feed()->Add());
}
RunCallableResponse resp;
CallOptions call_options;
call_options.SetTimeout(options_.config.operation_timeout_in_ms());
TF_RETURN_IF_ERROR(master_->RunCallable(&call_options, &req, &resp));
for (const TensorProto& fetch : resp.fetch()) {
Tensor fetch_tensor;
if (!fetch_tensor.FromProto(cpu_allocator(), fetch)) {
return errors::Internal(
"Could not parse fetched tensor data in response from master.");
}
fetch_tensors->push_back(std::move(fetch_tensor));
}
return absl::OkStatus();
}
Status GrpcSession::ReleaseCallable(CallableHandle handle) {
ReleaseCallableRequest req;
TF_RETURN_IF_ERROR(Handle(req.mutable_session_handle()));
req.set_handle(handle);
ReleaseCallableResponse resp;
CallOptions call_options;
call_options.SetTimeout(options_.config.operation_timeout_in_ms());
return master_->ReleaseCallable(&call_options, &req, &resp);
}
class GrpcSessionFactory : public SessionFactory {
public:
bool AcceptsOptions(const SessionOptions& options) override {
return absl::StartsWith(options.target, kSchemePrefix);
}
Status NewSession(const SessionOptions& options,
Session** out_session) override {
std::unique_ptr<GrpcSession> session;
TF_RETURN_IF_ERROR(GrpcSession::Create(options, &session));
*out_session = session.release();
return absl::OkStatus();
}
Status Reset(const SessionOptions& options,
const std::vector<string>& containers) override {
return GrpcSession::Reset(options, containers);
}
};
class GrpcSessionRegistrar {
public:
GrpcSessionRegistrar() {
SessionFactory::Register("GRPC_SESSION", new GrpcSessionFactory());
}
};
static GrpcSessionRegistrar registrar;
} | #include "tensorflow/core/distributed_runtime/rpc/grpc_session.h"
#include <string>
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_testlib.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/graph/default_device.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/port.h"
#include "tsl/lib/core/status_test_util.h"
namespace tensorflow {
static SessionOptions Devices(int num_cpus, int num_gpus) {
SessionOptions result;
(*result.config.mutable_device_count())["CPU"] = num_cpus;
(*result.config.mutable_device_count())["GPU"] = num_gpus;
return result;
}
void CreateGraphDef(GraphDef* graph_def, string node_names[3]) {
Graph graph(OpRegistry::Global());
Tensor a_tensor(DT_FLOAT, TensorShape({1, 2}));
test::FillValues<float>(&a_tensor, {1, 2});
Node* a = test::graph::Constant(&graph, a_tensor);
node_names[0] = a->name();
Tensor b_tensor(DT_FLOAT, TensorShape({2, 1}));
test::FillValues<float>(&b_tensor, {2, 1});
Node* b = test::graph::Constant(&graph, b_tensor);
node_names[1] = b->name();
Node* c = test::graph::Matmul(&graph, a, b, false, false);
node_names[2] = c->name();
test::graph::ToGraphDef(&graph, graph_def);
}
static void IsSingleFloatValue(const Tensor& val, float expected_val) {
ASSERT_EQ(val.dtype(), DT_FLOAT);
ASSERT_EQ(val.NumElements(), 1);
ASSERT_EQ(val.flat<float>()(0), expected_val);
}
static SessionOptions Options(const string& target, int placement_period) {
SessionOptions options;
options.target = strings::StrCat("grpc:
options.config.set_isolate_session_state(false);
options.config.set_placement_period(placement_period);
options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_opt_level(OptimizerOptions::L0);
return options;
}
static Session* NewRemote(const SessionOptions& options) {
LOG(INFO) << "Connecting to " << options.target;
return CHECK_NOTNULL(NewSession(options));
}
using test::TestClusterConfig;
using test::TestJob;
TEST(GrpcSessionTest, BasicNonProtoAPI) {
GraphDef graph;
string node_names[3];
CreateGraphDef(&graph, node_names);
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
std::unique_ptr<Session> session(
NewRemote(Options(cluster->targets()[0], 1)));
ASSERT_TRUE(session != nullptr);
for (int iters = 0; iters < 25; ++iters) {
TF_ASSERT_OK(session->Create(graph));
{
std::vector<std::pair<string, Tensor>> inputs;
std::vector<string> targets = {node_names[2]};
TF_ASSERT_OK(session->Run(inputs, {}, targets, nullptr));
}
{
std::vector<std::pair<string, Tensor>> inputs;
std::vector<string> names = {node_names[2] + ":0"};
std::vector<string> targets = {node_names[1]};
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->Run(inputs, names, targets, &outputs));
ASSERT_TRUE(outputs[0].IsInitialized());
ASSERT_EQ(4.0, outputs[0].flat<float>()(0));
}
TF_ASSERT_OK(session->Close());
}
}
TEST(GrpcSessionTest, BasicCallable) {
GraphDef graph;
string node_names[3];
CreateGraphDef(&graph, node_names);
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
std::unique_ptr<Session> session(
NewRemote(Options(cluster->targets()[0], 1)));
ASSERT_TRUE(session != nullptr);
for (int iters = 0; iters < 25; ++iters) {
TF_ASSERT_OK(session->Create(graph));
{
CallableOptions opts;
opts.add_target(node_names[2]);
Session::CallableHandle handle;
TF_ASSERT_OK(session->MakeCallable(opts, &handle));
TF_ASSERT_OK(session->RunCallable(handle, {}, nullptr, nullptr));
TF_ASSERT_OK(session->ReleaseCallable(handle));
}
{
CallableOptions opts;
opts.add_target(node_names[1]);
opts.add_fetch(node_names[2] + ":0");
Session::CallableHandle handle;
TF_ASSERT_OK(session->MakeCallable(opts, &handle));
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->RunCallable(handle, {}, &outputs, nullptr));
ASSERT_EQ(1, outputs.size());
ASSERT_TRUE(outputs[0].IsInitialized());
ASSERT_EQ(4.0, outputs[0].flat<float>()(0));
TF_ASSERT_OK(session->ReleaseCallable(handle));
}
TF_ASSERT_OK(session->Close());
}
}
TEST(GrpcSessionTest, CallableWithOnDeviceFeedsAndFetches) {
GraphDef graph;
string node_names[3];
CreateGraphDef(&graph, node_names);
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
std::unique_ptr<Session> session(
NewRemote(Options(cluster->targets()[0], 1)));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(graph));
std::vector<DeviceAttributes> devices;
TF_ASSERT_OK(session->ListDevices(&devices));
ASSERT_GT(devices.size(), 0);
const string device_name = devices.back().name();
CallableOptions opts;
const string fetch = node_names[2] + ":0";
opts.add_fetch(fetch);
opts.mutable_fetch_devices()->insert({fetch, device_name});
Session::CallableHandle handle;
Status status = session->MakeCallable(opts, &handle);
EXPECT_EQ(error::UNIMPLEMENTED, status.code());
TF_ASSERT_OK(session->Close());
}
TEST(GrpcSessionTest, BasicNonProtoAPIConsistentOrder) {
GraphDef graph;
string node_names[3];
CreateGraphDef(&graph, node_names);
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
std::unique_ptr<Session> session(
NewRemote(Options(cluster->targets()[0], 1)));
ASSERT_TRUE(session != nullptr);
ASSERT_TRUE(session->Create(graph).ok());
std::vector<std::pair<string, Tensor>> inputs;
std::vector<string> names = {node_names[2] + ":0", node_names[0] + ":0",
node_names[1] + ":0"};
std::vector<string> target_ops = {node_names[1]};
std::vector<Tensor> outputs;
ASSERT_TRUE(session->Run(inputs, names, target_ops, &outputs).ok());
ASSERT_TRUE(outputs[0].IsInitialized());
ASSERT_EQ(4.0, outputs[0].flat<float>()(0));
ASSERT_TRUE(outputs[1].IsInitialized());
ASSERT_EQ(1.0, outputs[1].flat<float>()(0));
ASSERT_TRUE(outputs[2].IsInitialized());
ASSERT_EQ(2.0, outputs[2].flat<float>()(0));
ASSERT_TRUE(session->Close().ok());
}
TEST(GrpcSessionTest, NonLocalWithFilters) {
GraphDef graph;
string node_names[3];
CreateGraphDef(&graph, node_names);
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
SessionOptions options;
options.target = strings::StrCat("grpc:
options.config.add_device_filters(cluster->devices()[0].name());
std::unique_ptr<Session> session(NewRemote(options));
ASSERT_TRUE(session != nullptr);
{
GraphDef graph_copy(graph);
graph::SetDefaultDevice(cluster->devices()[0].name(), &graph_copy);
TF_ASSERT_OK(session->Create(graph_copy));
TF_ASSERT_OK(session->Run({}, {}, {node_names[2]}, nullptr));
TF_ASSERT_OK(session->Close());
}
{
GraphDef graph_copy(graph);
graph::SetDefaultDevice(cluster->devices()[1].name(), &graph_copy);
auto status = session->Create(graph_copy);
EXPECT_EQ(absl::StatusCode::kInvalidArgument, status.code());
}
}
TEST(GrpcSessionTest, FetchMultipleTimes) {
GraphDef graph;
string node_names[3];
CreateGraphDef(&graph, node_names);
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
std::unique_ptr<Session> session(
NewRemote(Options(cluster->targets()[0], 1)));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(graph));
const std::vector<std::pair<string, Tensor>> inputs;
std::vector<Tensor> outputs;
const string node = node_names[2] + ":0";
TF_ASSERT_OK(session->Run(inputs, {node, node}, {}, &outputs));
EXPECT_EQ(2, outputs.size());
for (int i = 0; i < outputs.size(); ++i) {
const Tensor& t = outputs[i];
ASSERT_TRUE(t.IsInitialized()) << i;
ASSERT_EQ(4.0, t.flat<float>()(0)) << i;
}
TF_ASSERT_OK(session->Close());
}
TEST(GrpcSessionTest, DisableOutputPartitionGraphs) {
GraphDef graph;
string node_names[3];
CreateGraphDef(&graph, node_names);
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
SessionOptions options = Options(cluster->targets()[0], 1);
options.config.mutable_experimental()->set_disable_output_partition_graphs(
true);
std::unique_ptr<Session> session(NewRemote(options));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(graph));
{
TF_ASSERT_OK(session->Run({}, {}, {node_names[2]}, nullptr));
}
{
RunOptions run_options;
run_options.set_output_partition_graphs(true);
RunMetadata run_metadata;
Status s = session->Run(run_options, {}, {}, {node_names[2]}, nullptr,
&run_metadata);
EXPECT_TRUE(errors::IsInvalidArgument(s));
EXPECT_TRUE(
absl::StrContains(s.message(), "disable_output_partition_graphs"));
}
TF_ASSERT_OK(session->Close());
}
void FindMaxEigen(const string& target) {
Graph graph(OpRegistry::Global());
Tensor a_tensor(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&a_tensor, {3, 2, -1, 0});
Node* a = test::graph::Constant(&graph, a_tensor);
Tensor x_tensor(DT_FLOAT, TensorShape({2, 1}));
test::FillValues<float>(&x_tensor, {0, 0});
Node* x = test::graph::Constant(&graph, x_tensor);
Node* y = test::graph::Matmul(&graph, a, x, false, false);
Node* y2 = test::graph::Unary(&graph, "Square", y);
Tensor rdim_tensor(DT_INT32, TensorShape({}));
rdim_tensor.scalar<int32>()() = 0;
Node* rdim = test::graph::Constant(&graph, rdim_tensor);
Node* y2_sum = test::graph::Reduce(&graph, "Sum", y2, rdim);
Node* y_norm = test::graph::Unary(&graph, "Sqrt", y2_sum);
Node* y_normalized = test::graph::Binary(&graph, "Div", y, y_norm);
GraphDef def;
test::graph::ToGraphDef(&graph, &def);
std::unique_ptr<Session> session(NewRemote(Options(target, 1)));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def));
float lambda;
Tensor feed_value(DT_FLOAT, TensorShape({2, 1}));
feed_value.matrix<float>()(0, 0) = -3.1415;
feed_value.matrix<float>()(1, 0) = +2.7183;
for (int i = 0; i < 25; ++i) {
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->Run({{x->name(), feed_value}},
{y->name(), y_normalized->name()}, {}, &outputs));
const Tensor& y = outputs[0];
const Tensor& y_normalized = outputs[1];
CHECK_EQ(2, feed_value.NumElements());
CHECK_EQ(2, y.NumElements());
lambda = y.flat<float>()(0) / feed_value.flat<float>()(0);
printf("%06d lambda = %8.6f x = [%8.6f %8.6f] y = [%8.6f %8.6f]\n", i,
lambda, feed_value.flat<float>()(0), feed_value.flat<float>()(1),
y.flat<float>()(0), y.flat<float>()(1));
feed_value = y_normalized;
}
EXPECT_NEAR(2.0, lambda, 1e-6);
}
TEST(FindMaxEigenTest, RemoteDevice) {
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
FindMaxEigen(cluster->targets()[0]);
}
void SetDevice(GraphDef* graph, const string& name, const string& dev) {
for (int i = 0; i < graph->node_size(); ++i) {
if (graph->node(i).name() == name) {
graph->mutable_node(i)->set_device(dev);
return;
}
}
LOG(FATAL) << "Name '" << name << "' not found.";
}
TEST(GrpcSessionTest, DISABLED_MultiDevices) {
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
Graph graph(OpRegistry::Global());
const int kSize = 1048576;
Tensor a_tensor(DT_FLOAT, TensorShape({1, kSize}));
Tensor b_tensor(DT_FLOAT, TensorShape({kSize, 1}));
for (int i = 0; i < kSize; ++i) {
a_tensor.flat<float>()(i) = 2;
b_tensor.flat<float>()(i) = 3;
}
Node* a = test::graph::Constant(&graph, a_tensor);
Node* b = test::graph::Constant(&graph, b_tensor);
Node* c = test::graph::Matmul(&graph, a, b, false, false);
GraphDef def;
test::graph::ToGraphDef(&graph, &def);
for (const auto& a_dev : cluster->devices()) {
for (const auto& b_dev : cluster->devices()) {
for (const auto& c_dev : cluster->devices()) {
LOG(INFO) << "a: " << a_dev.name() << " b: " << b_dev.name()
<< " c: " << c_dev.name();
SetDevice(&def, a->name(), a_dev.name());
SetDevice(&def, b->name(), b_dev.name());
SetDevice(&def, c->name(), c_dev.name());
std::unique_ptr<Session> session(
NewRemote(Options(cluster->targets()[0], 1000)));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def));
{
std::vector<Tensor> outputs;
RunOptions options;
options.set_trace_level(RunOptions::FULL_TRACE);
RunMetadata metadata;
TF_ASSERT_OK(
session->Run(options, {}, {c->name()}, {}, &outputs, &metadata));
ASSERT_EQ(1, outputs.size());
IsSingleFloatValue(outputs[0], 6.0 * kSize);
const StepStats& ss = metadata.step_stats();
bool c_placed_correctly = false;
for (const auto& dev : ss.dev_stats()) {
for (const auto& node : dev.node_stats()) {
if (node.node_name() == c->name() &&
dev.device() == c_dev.name()) {
c_placed_correctly = true;
}
}
}
ASSERT_TRUE(c_placed_correctly);
}
TF_ASSERT_OK(session->Close());
}
}
}
}
TEST(GrpcSessionTest, LargeTensorSend) {
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
Graph graph(OpRegistry::Global());
Tensor fill_shape_tensor(DT_INT32, TensorShape({4}));
fill_shape_tensor.vec<int32>()(0) = 1;
fill_shape_tensor.vec<int32>()(1) = 256;
fill_shape_tensor.vec<int32>()(2) = 1024;
fill_shape_tensor.vec<int32>()(3) = 1024;
Node* fill_shape_node = test::graph::Constant(&graph, fill_shape_tensor);
Tensor fill_val_tensor(DT_FLOAT, TensorShape({}));
fill_val_tensor.flat<float>()(0) = 1.0;
Node* fill_val_node = test::graph::Constant(&graph, fill_val_tensor);
Node* fill_node =
test::graph::Binary(&graph, "Fill", fill_shape_node, fill_val_node);
Tensor max_axes_tensor(DT_INT32, TensorShape({4}));
max_axes_tensor.vec<int32>()(0) = 0;
max_axes_tensor.vec<int32>()(1) = 1;
max_axes_tensor.vec<int32>()(2) = 2;
max_axes_tensor.vec<int32>()(3) = 3;
Node* max_axes_node = test::graph::Constant(&graph, max_axes_tensor);
Node* max_node = test::graph::Reduce(&graph, "Max", fill_node, max_axes_node);
GraphDef def;
test::graph::ToGraphDef(&graph, &def);
SetDevice(&def, fill_node->name(), cluster->devices()[0].name());
SetDevice(&def, fill_node->name(), cluster->devices()[1].name());
std::unique_ptr<Session> session(
NewRemote(Options(cluster->targets()[0], 1000)));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def));
{
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->Run({}, {max_node->name()}, {}, &outputs));
ASSERT_EQ(1, outputs.size());
IsSingleFloatValue(outputs[0], 1.0);
}
TF_ASSERT_OK(session->Close());
}
TEST(GrpcSessionTest, MultiDevices_String) {
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 1))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
std::unique_ptr<Session> session(
NewRemote(Options(cluster->targets()[0], 1000)));
ASSERT_TRUE(session != nullptr);
Graph graph(OpRegistry::Global());
Tensor a_tensor(DT_STRING, TensorShape({2, 2}));
for (int i = 0; i < 4; ++i) {
a_tensor.flat<tstring>()(i) = "hello, world";
}
Node* a = test::graph::Constant(&graph, a_tensor);
Node* b = test::graph::Identity(&graph, a);
GraphDef def;
test::graph::ToGraphDef(&graph, &def);
for (const auto& a_dev : cluster->devices()) {
for (const auto& b_dev : cluster->devices()) {
LOG(INFO) << "a: " << a_dev.name() << " b: " << b_dev.name();
SetDevice(&def, a->name(), a_dev.name());
SetDevice(&def, b->name(), b_dev.name());
Status s = session->Create(def);
if (s.ok()) {
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->Run({}, {b->name()}, {}, &outputs));
ASSERT_EQ(1, outputs.size());
ASSERT_EQ(outputs[0].dtype(), DT_STRING);
ASSERT_EQ(outputs[0].NumElements(), 4);
for (int i = 0; i < outputs[0].NumElements(); ++i) {
EXPECT_EQ(outputs[0].flat<tstring>()(i), "hello, world");
}
TF_ASSERT_OK(session->Close());
} else {
LOG(ERROR) << "Error: " << s;
ASSERT_TRUE((a_dev.device_type() == DEVICE_GPU) ||
(b_dev.device_type() == DEVICE_GPU));
ASSERT_FALSE(s.ok());
}
}
}
}
TEST(GrpcSessionTest, SendRecv_Node_Naming) {
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 3}}),
&cluster));
std::unique_ptr<Session> session(
NewRemote(Options(cluster->targets()[0], 1)));
ASSERT_TRUE(session != nullptr);
CHECK_GE(cluster->devices().size(), 3);
const DeviceAttributes& src = cluster->devices()[0];
const DeviceAttributes& dst0 = cluster->devices()[1];
const DeviceAttributes& dst1 = cluster->devices()[2];
LOG(INFO) << "src = " << src.name() << " dst0 = " << dst0.name()
<< " dst1 = " << dst1.name();
Graph graph(OpRegistry::Global());
Tensor a_tensor(DT_FLOAT, TensorShape({1, 1}));
a_tensor.flat<float>()(0) = 100;
Node* a = test::graph::Constant(&graph, a_tensor);
Node* b = test::graph::Identity(&graph, a);
Node* c = test::graph::Identity(&graph, a);
GraphDef def;
test::graph::ToGraphDef(&graph, &def);
SetDevice(&def, a->name(), src.name());
SetDevice(&def, b->name(), dst0.name());
SetDevice(&def, c->name(), dst1.name());
TF_ASSERT_OK(session->Create(def));
{
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->Run({}, {b->name()}, {}, &outputs));
ASSERT_EQ(1, outputs.size());
IsSingleFloatValue(outputs[0], 100);
}
{
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->Run({}, {c->name()}, {}, &outputs));
ASSERT_EQ(1, outputs.size());
IsSingleFloatValue(outputs[0], 100);
}
TF_ASSERT_OK(session->Close());
}
TEST(GrpcSessionTest, Error) {
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
auto master = cluster->targets()[0];
const string& dev_a = cluster->devices()[0].name();
const string& dev_b = cluster->devices()[1].name();
LOG(INFO) << "master " << master << "dev_a " << dev_a << "dev_b " << dev_b;
GraphDef gdef;
std::vector<string> fetches;
{
Graph g(OpRegistry::Global());
auto a = test::graph::Constant(&g, Tensor());
a->set_assigned_device_name(dev_a);
auto a_err = test::graph::Error(&g, a, "fantasia!");
a_err->set_assigned_device_name(dev_a);
auto a2 = test::graph::Add(&g, a, a_err);
a2->set_assigned_device_name(dev_a);
fetches.push_back(a2->name());
auto b = test::graph::Constant(&g, Tensor());
b->set_assigned_device_name(dev_b);
auto b_delay = test::graph::Delay(&g, b, Microseconds(1000000));
b_delay->set_assigned_device_name(dev_b);
auto b2 = test::graph::Add(&g, b, b_delay);
b2->set_assigned_device_name(dev_b);
fetches.push_back(b2->name());
test::graph::ToGraphDef(&g, &gdef);
}
std::unique_ptr<Session> session(NewRemote(Options(master, 1)));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(gdef));
{
Status status = session->Run({}, fetches, {}, nullptr);
EXPECT_FALSE(status.ok());
EXPECT_NE(status.ToString().find("fantasia!"), string::npos);
}
TF_ASSERT_OK(session->Close());
Env::Default()->SleepForMicroseconds(2000000);
}
TEST(GrpcSessionTest, ErrorStatusLog) {
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
auto master = cluster->targets()[0];
const string& dev_a = cluster->devices()[0].name();
const string& dev_b = cluster->devices()[1].name();
LOG(INFO) << "master " << master << "dev_a " << dev_a << "dev_b " << dev_b;
GraphDef gdef;
std::vector<string> fetches;
{
Graph g(OpRegistry::Global());
auto a = test::graph::Constant(&g, Tensor());
a->set_assigned_device_name(dev_a);
auto a_err = test::graph::Error(&g, a, "fantasia!", true);
a_err->set_assigned_device_name(dev_a);
auto a2 = test::graph::Add(&g, a, a_err);
a2->set_assigned_device_name(dev_a);
fetches.push_back(a2->name());
auto b = test::graph::Constant(&g, Tensor());
b->set_assigned_device_name(dev_b);
auto b_delay = test::graph::Delay(&g, b, Microseconds(1000000));
b_delay->set_assigned_device_name(dev_b);
auto b2 = test::graph::Add(&g, b, b_delay);
b2->set_assigned_device_name(dev_b);
fetches.push_back(b2->name());
g.ToGraphDef(&gdef);
}
std::unique_ptr<Session> session(NewRemote(Options(master, 1)));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(gdef));
{
Status status = session->Run({}, fetches, {}, nullptr);
EXPECT_FALSE(status.ok());
std::cerr << status << "\n";
EXPECT_NE(status.ToString().find("fantasia!"), string::npos);
EXPECT_NE(status.ToString().find("ErrorOp: fantasia!"), string::npos);
}
TF_ASSERT_OK(session->Close());
Env::Default()->SleepForMicroseconds(2000000);
}
TEST(GrpcSessionTest, LongErrorMessage) {
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
auto master = cluster->targets()[0];
const string& dev_a = cluster->devices()[0].name();
const string& dev_b = cluster->devices()[1].name();
LOG(INFO) << "master " << master << "dev_a " << dev_a << "dev_b " << dev_b;
GraphDef gdef;
std::vector<string> fetches;
{
Graph g(OpRegistry::Global());
auto a = test::graph::Constant(&g, Tensor());
a->set_assigned_device_name(dev_a);
std::vector<char> long_string_buffer(1024 * 1024, 'x');
StringPiece long_string(long_string_buffer.data(), 1024 * 1024);
string name = strings::StrCat(long_string, "fantasia!");
auto a_err = test::graph::Error(&g, a, name);
a_err->set_assigned_device_name(dev_a);
auto a2 = test::graph::Add(&g, a, a_err);
a2->set_assigned_device_name(dev_a);
fetches.push_back(a2->name());
auto b = test::graph::Constant(&g, Tensor());
b->set_assigned_device_name(dev_b);
auto b_delay = test::graph::Delay(&g, b, Microseconds(1000000));
b_delay->set_assigned_device_name(dev_b);
auto b2 = test::graph::Add(&g, b, b_delay);
b2->set_assigned_device_name(dev_b);
fetches.push_back(b2->name());
test::graph::ToGraphDef(&g, &gdef);
}
std::unique_ptr<Session> session(NewRemote(Options(master, 1)));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(gdef));
{
Status status = session->Run({}, fetches, {}, nullptr);
EXPECT_FALSE(status.ok());
EXPECT_NE(status.ToString().find("fantasia!"), string::npos);
}
TF_ASSERT_OK(session->Close());
Env::Default()->SleepForMicroseconds(2000000);
}
TEST(SessionTest, SharedVar) {
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 1}}),
&cluster));
const string master = cluster->targets()[0];
CHECK_EQ(cluster->devices().size(), 1);
GraphDef gdef;
string init_name;
string inc_name;
string get_name;
{
Graph g(OpRegistry::Global());
Tensor one(DT_FLOAT, TensorShape({}));
one.scalar<float>()() = 1.0;
Node* var = test::graph::Var(&g, DT_FLOAT, one.shape());
Node* init = test::graph::Assign(&g, var, test::graph::Constant(&g, one));
init_name = init->name();
Node* update = test::graph::Assign(
&g, var, test::graph::Add(&g, var, test::graph::Constant(&g, one)));
inc_name = update->name();
get_name = var->name();
test::graph::ToGraphDef(&g, &gdef);
}
{
Session* sess = NewRemote(Options(master, 1));
TF_ASSERT_OK(sess->Create(gdef));
std::vector<std::pair<string, Tensor>> inp;
TF_ASSERT_OK(sess->Run(inp, {}, {init_name}, nullptr));
TF_ASSERT_OK(sess->Close());
delete sess;
}
for (int rep = 1; rep < 10; ++rep) {
{
Session* sess = NewRemote(Options(master, 1));
TF_ASSERT_OK(sess->Create(gdef));
std::vector<std::pair<string, Tensor>> inp;
TF_ASSERT_OK(sess->Run(inp, {}, {inc_name}, nullptr));
TF_ASSERT_OK(sess->Close());
delete sess;
}
{
Session* sess = NewRemote(Options(master, 1));
TF_ASSERT_OK(sess->Create(gdef));
std::vector<std::pair<string, Tensor>> inp;
std::vector<Tensor> ret;
TF_ASSERT_OK(sess->Run(inp, {get_name}, {}, &ret));
ASSERT_EQ(ret.size(), 1);
EXPECT_EQ(ret[0].scalar<float>()(), 1.0 * (1 + rep));
TF_ASSERT_OK(sess->Close());
delete sess;
}
}
}
TEST(SessionTest, SharedVarWithMultipleLearnerReplicas) {
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.