ID
int64 0
2.65k
| Language
stringclasses 1
value | Repository Name
stringclasses 21
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 10
111
⌀ | File Path for Unit Test
stringlengths 16
116
⌀ | Code
stringlengths 66
1.91M
| Unit Test - (Ground Truth)
stringlengths 40
32.1k
⌀ |
---|---|---|---|---|---|---|---|
1,400 | cpp | tensorflow/tensorflow | remapper | tensorflow/core/grappler/optimizers/remapper.cc | tensorflow/core/grappler/optimizers/remapper_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_REMAPPER_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_REMAPPER_H_
#include "tensorflow/core/grappler/optimizers/graph_optimizer.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace grappler {
class Remapper : public GraphOptimizer {
public:
explicit Remapper(RewriterConfig::Toggle opt_level,
RewriterConfig::CpuLayout cpu_layout_conversion =
RewriterConfig::NO_CONVERSION_ON_CPU,
bool xla_auto_clustering_on = false)
: opt_level_(opt_level),
cpu_layout_conversion_(cpu_layout_conversion),
xla_auto_clustering_on_(xla_auto_clustering_on) {}
~Remapper() override {}
string name() const override { return "remapper"; };
bool UsesFunctionLibrary() const override { return false; }
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override;
private:
RewriterConfig::Toggle opt_level_;
RewriterConfig::CpuLayout cpu_layout_conversion_;
bool xla_auto_clustering_on_;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/remapper.h"
#include <algorithm>
#include <cstdlib>
#include <map>
#include <set>
#include <string>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/graph_view.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/constant_folding.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/graph_view.h"
#include "tensorflow/core/grappler/utils/pattern_utils.h"
#include "tensorflow/core/grappler/utils/symbolic_shapes.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tensorflow/core/util/env_var.h"
#include "tensorflow/core/util/use_cudnn.h"
#include "tsl/platform/errors.h"
#ifdef INTEL_MKL
#include "tensorflow/core/util/mkl_heuristics.h"
#endif
#include "tensorflow/core/util/util.h"
#if GOOGLE_CUDA
#include "third_party/gpus/cudnn/cudnn.h"
#endif
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kFusedConv2D[] = "_FusedConv2D";
constexpr char kFusedConv3D[] = "_FusedConv3D";
constexpr char kFusedMatMul[] = "_FusedMatMul";
constexpr char kFusedDepthwiseConv2dNative[] = "_FusedDepthwiseConv2dNative";
constexpr char kFusedBatchNormEx[] = "_FusedBatchNormEx";
constexpr char kFusedBatchNormGradEx[] = "_FusedBatchNormGradEx";
constexpr char kTensorToHashBucket[] = "_TensorToHashBucketFast";
constexpr char kLeakyRelu[] = "LeakyRelu";
constexpr char kMklFusedMish[] = "_MklFusedMish";
constexpr char kRelu[] = "Relu";
constexpr char kRelu6[] = "Relu6";
constexpr char kElu[] = "Elu";
constexpr char kDataFormat[] = "data_format";
constexpr char kIsTraining[] = "is_training";
constexpr char kWidth[] = "width";
constexpr char kFill[] = "fill";
constexpr int kMissingIndex = -1;
struct RemapperContext {
explicit RemapperContext(GrapplerItem* item, Status* status,
RewriterConfig::CpuLayout cpu_layout_conversion,
bool xla_auto_clustering_on,
bool xla_cpu_jit_disable_fusion)
: nodes_to_preserve(item->NodesToPreserve()),
graph_view(&item->graph, status),
graph_properties(*item),
inferred_graph_properties(false),
cpu_layout_conversion(cpu_layout_conversion),
xla_auto_clustering_on(xla_auto_clustering_on),
xla_cpu_jit_disable_fusion(xla_cpu_jit_disable_fusion) {}
std::unordered_set<string> nodes_to_preserve;
utils::MutableGraphView graph_view;
GraphProperties graph_properties;
bool inferred_graph_properties;
RewriterConfig::CpuLayout cpu_layout_conversion;
bool xla_auto_clustering_on;
bool xla_cpu_jit_disable_fusion;
};
struct FusedBatchNorm {
FusedBatchNorm() = default;
explicit FusedBatchNorm(int fused_batch_norm)
: fused_batch_norm(fused_batch_norm) {}
int fused_batch_norm = kMissingIndex;
};
struct FusedBatchNormEx {
FusedBatchNormEx() = default;
int fused_batch_norm = kMissingIndex;
int side_input = kMissingIndex;
int activation = kMissingIndex;
int invalidated = kMissingIndex;
};
struct FusedBatchNormGradEx {
int fused_batch_norm_grad = kMissingIndex;
int activation_grad = kMissingIndex;
int side_input_grad = kMissingIndex;
int fwd_fused_batch_norm = kMissingIndex;
};
struct TensorToHashBucket {
TensorToHashBucket() = default;
explicit TensorToHashBucket(int op1, int op2, int op3)
: pre_as_string(op1), as_string(op2), string_to_hash_bucket(op3) {}
int pre_as_string = kMissingIndex;
int as_string = kMissingIndex;
int string_to_hash_bucket = kMissingIndex;
};
struct PadWithConv3D {
PadWithConv3D() = default;
PadWithConv3D(int contraction_idx, int pad_idx, int padding_const_idx)
: contraction_idx(contraction_idx),
pad_idx(pad_idx),
padding_const_idx(padding_const_idx) {}
int contraction_idx = kMissingIndex;
int pad_idx = kMissingIndex;
int padding_const_idx = kMissingIndex;
};
struct ContractionWithBiasAdd {
ContractionWithBiasAdd() = default;
ContractionWithBiasAdd(int contraction, int bias_add, int bias_port)
: contraction(contraction), bias_add(bias_add), bias_port(bias_port) {}
int contraction = kMissingIndex;
int bias_add = kMissingIndex;
int bias_port = 1;
};
struct ContractionWithActivation {
ContractionWithActivation() = default;
ContractionWithActivation(int contraction, int activation)
: contraction(contraction), activation(activation) {}
int contraction = kMissingIndex;
int activation = kMissingIndex;
};
struct ContractionWithBiasAddAndActivation {
ContractionWithBiasAddAndActivation() = default;
ContractionWithBiasAddAndActivation(int contraction, int bias_add,
int activation, int bias_port)
: contraction(contraction),
bias_add(bias_add),
activation(activation),
bias_port(bias_port) {}
int contraction = kMissingIndex;
int bias_add = kMissingIndex;
int activation = kMissingIndex;
int bias_port = 1;
};
struct ContractionWithSqueezeAndBiasAdd {
ContractionWithSqueezeAndBiasAdd() = default;
ContractionWithSqueezeAndBiasAdd(int contraction, int squeeze, int bias_add)
: contraction(contraction), squeeze(squeeze), bias_add(bias_add) {}
int contraction = kMissingIndex;
int squeeze = kMissingIndex;
int bias_add = kMissingIndex;
};
struct ContractionWithBatchNorm {
ContractionWithBatchNorm() = default;
ContractionWithBatchNorm(int contraction, int fused_batch_norm,
float epsilon = 0.0)
: contraction(contraction),
fused_batch_norm(fused_batch_norm),
epsilon(epsilon) {}
int contraction = kMissingIndex;
int fused_batch_norm = kMissingIndex;
float epsilon = 0.0;
};
struct ContractionWithBatchNormAndActivation {
ContractionWithBatchNormAndActivation() = default;
ContractionWithBatchNormAndActivation(int contraction, int fused_batch_norm,
int activation, float epsilon = 0.0)
: contraction(contraction),
fused_batch_norm(fused_batch_norm),
activation(activation),
epsilon(epsilon) {}
int contraction = kMissingIndex;
int fused_batch_norm = kMissingIndex;
int activation = kMissingIndex;
float epsilon = 0.0;
};
struct ContractionWithBiasAddAndAdd {
ContractionWithBiasAddAndAdd() = default;
ContractionWithBiasAddAndAdd(int contraction, int bias_add, int add,
int port_id, int bias_port)
: contraction(contraction),
bias_add(bias_add),
add(add),
port_id(port_id),
bias_port(bias_port) {}
int contraction = kMissingIndex;
int bias_add = kMissingIndex;
int add = kMissingIndex;
int port_id = 0;
int bias_port = 1;
};
struct ContractionWithBiasAndAddActivation {
ContractionWithBiasAndAddActivation() = default;
ContractionWithBiasAndAddActivation(int contraction, int bias_add, int add,
int port_id, int activation,
int bias_port)
: contraction(contraction),
bias_add(bias_add),
add(add),
port_id(port_id),
activation(activation),
bias_port(bias_port) {}
int contraction = kMissingIndex;
int bias_add = kMissingIndex;
int add = kMissingIndex;
int port_id = 0;
int activation = kMissingIndex;
int bias_port = 1;
};
bool IsInPreserveSet(const RemapperContext& ctx, const NodeDef* node) {
return ctx.nodes_to_preserve.count(node->name()) > 0;
}
bool HaveSameDataType(const NodeDef* lhs, const NodeDef* rhs,
const string& type_attr = "T") {
DataType lhs_attr = GetDataTypeFromAttr(*lhs, type_attr);
DataType rhs_attr = GetDataTypeFromAttr(*rhs, type_attr);
return lhs_attr != DT_INVALID && rhs_attr != DT_INVALID &&
lhs_attr == rhs_attr;
}
bool HasDataType(const NodeDef* node, const DataType& expected,
const string& type_attr = "T") {
DataType dtype = GetDataTypeFromAttr(*node, type_attr);
return dtype == expected;
}
bool IsCpuCompatibleDataType(const NodeDef* contraction,
const string& type_attr = "T") {
DataType dtype = GetDataTypeFromAttr(*contraction, type_attr);
bool is_one_dnn_enabled = IsMKLEnabled();
if (is_one_dnn_enabled) {
bool is_supported_matmul = false;
if (IsMatMul(*contraction)) {
is_supported_matmul = (dtype == DT_BFLOAT16)
? contraction->attr().contains("transpose_a") &&
!contraction->attr().at("transpose_a").b()
: true;
}
return ((IsConv2D(*contraction) || IsDepthwiseConv2dNative(*contraction) ||
IsConv3D(*contraction) || IsAnyBatchMatMul(*contraction) ||
is_supported_matmul) &&
IsDataTypeSupportedByOneDNNOnThisCPU(dtype));
}
if (IsConv2D(*contraction)) {
return dtype == DT_FLOAT || dtype == DT_DOUBLE;
} else if (IsMatMul(*contraction)) {
return dtype == DT_FLOAT;
} else {
return false;
}
}
bool IsGpuCompatibleDataType(const NodeDef* contraction,
const string& type_attr = "T") {
DataType dtype = GetDataTypeFromAttr(*contraction, type_attr);
if (IsConv2D(*contraction) || IsMatMul(*contraction)) {
return dtype == DT_FLOAT || dtype == DT_HALF;
} else {
return false;
}
}
bool IsCpuCompatibleDataFormat(const RemapperContext& ctx,
const NodeDef* conv_node) {
const string& data_format = conv_node->attr().at(kDataFormat).s();
if (IsConv2D(*conv_node)) {
return data_format == "NHWC" || (IsMKLEnabled() && data_format == "NCHW") ||
(ctx.cpu_layout_conversion == RewriterConfig::NHWC_TO_NCHW &&
data_format == "NCHW");
} else if (IsConv3D(*conv_node)) {
return data_format == "NDHWC" || (IsMKLEnabled() && data_format == "NCDHW");
} else {
return false;
}
}
bool BlasLtMatmulEnabled() {
static bool is_enabled = [] {
bool is_enabled = false;
TF_CHECK_OK(tensorflow::ReadBoolFromEnvVar(
"TF_USE_CUBLASLT", false, &is_enabled));
return is_enabled;
}();
return is_enabled;
}
bool IsGpuCompatibleDataFormat(const RemapperContext& ctx,
const NodeDef* conv2d) {
DCHECK(IsConv2D(*conv2d)) << "Expected Conv2D op";
const string& data_format = conv2d->attr().at(kDataFormat).s();
return data_format == "NHWC" || data_format == "NCHW";
}
bool IsCpuCompatibleConv2D(const RemapperContext& ctx, const NodeDef* conv2d) {
DCHECK(IsConv2D(*conv2d)) << "Expected Conv2D op";
return NodeIsOnCpu(conv2d) && IsCpuCompatibleDataType(conv2d) &&
IsCpuCompatibleDataFormat(ctx, conv2d);
}
bool IsCpuCompatibleConv3D(const RemapperContext& ctx, const NodeDef* conv3d) {
DCHECK(IsConv3D(*conv3d)) << "Expected Conv3D op";
return NodeIsOnCpu(conv3d) && IsCpuCompatibleDataType(conv3d) &&
IsCpuCompatibleDataFormat(ctx, conv3d);
}
bool IsGpuCompatibleConv2D(const RemapperContext& ctx, const NodeDef* conv2d,
const NodeDef* activation) {
DCHECK(IsConv2D(*conv2d)) << "Expected Conv2D op";
if (IsRelu(*activation)) {
return NodeIsOnGpu(conv2d) && IsGpuCompatibleDataType(conv2d) &&
IsGpuCompatibleDataFormat(ctx, conv2d);
} else if (IsRelu6(*activation) || IsElu(*activation) ||
IsLeakyRelu(*activation)) {
DataType dtype = GetDataTypeFromAttr(*conv2d, "T");
const string& data_format = conv2d->attr().at(kDataFormat).s();
return NodeIsOnGpu(conv2d) && dtype == DT_HALF && data_format == "NHWC";
}
return false;
}
bool IsGpuCompatibleMatMul(const RemapperContext& ctx, const NodeDef* matmul,
const NodeDef* activation) {
DCHECK(IsMatMul(*matmul)) << "Expected MatMul op";
if (activation == nullptr || IsRelu(*activation)) {
return BlasLtMatmulEnabled() && NodeIsOnGpu(matmul) &&
IsGpuCompatibleDataType(matmul);
} else if (IsTanh(*activation) || IsSigmoid(*activation)) {
DataType dtype = GetDataTypeFromAttr(*matmul, "T");
return NodeIsOnGpu(matmul) && dtype == DT_HALF;
}
return false;
}
bool IsCpuCompatibleMatMul(const RemapperContext& ctx, const NodeDef* matmul) {
DCHECK(IsMatMul(*matmul)) << "Expected MatMul op";
return NodeIsOnCpu(matmul) && IsCpuCompatibleDataType(matmul);
}
bool IsCpuCompatibleDepthwiseConv2dNative(const NodeDef* dw_conv2d) {
DCHECK(IsDepthwiseConv2dNative(*dw_conv2d))
<< "Expected DepthwiseConv2dNative op";
return NodeIsOnCpu(dw_conv2d) && IsCpuCompatibleDataType(dw_conv2d);
}
template <typename Pattern>
bool IsCpuCompatible(const RemapperContext& ctx, const Pattern& matched) {
if (ctx.xla_cpu_jit_disable_fusion) return false;
const NodeDef& node = ctx.graph_view.graph()->node(matched.contraction);
if (IsConv2D(node)) {
return IsCpuCompatibleConv2D(ctx, &node);
} else if (IsDepthwiseConv2dNative(node)) {
return (IsMKLEnabled() && IsCpuCompatibleDepthwiseConv2dNative(&node));
} else if (IsMatMul(node)) {
return IsCpuCompatibleMatMul(ctx, &node);
} else if (IsConv3D(node)) {
return (IsMKLEnabled() && IsCpuCompatibleConv3D(ctx, &node));
} else {
return false;
}
}
bool RuntimeFusionEnabled(const Cluster* cluster) {
static bool is_enabled = [&] {
#if CUDNN_VERSION >= 8400
if (!cluster) return false;
auto devices = cluster->GetDevices();
int num_gpus = 0;
int num_ampere = 0;
for (const auto& d : devices) {
if (d.second.type() == "GPU") {
num_gpus++;
auto cc_it = d.second.environment().find("architecture");
if (cc_it != d.second.environment().end()) {
double compute_capability = 0.0;
if (absl::SimpleAtod(cc_it->second, &compute_capability) &&
compute_capability >= 8.0) {
num_ampere++;
}
}
}
}
bool runtime_fusion_enabled = CudnnUseRuntimeFusion() &&
CudnnUseFrontend() && num_gpus > 0 &&
num_gpus == num_ampere;
if (CudnnUseRuntimeFusion() && !runtime_fusion_enabled) {
VLOG(1) << "Enabling Cudnn with runtime compilation requires the "
<< "Cudnn frontend and Ampere GPUs or later, but we got "
<< "Cudnn frontend is "
<< (CudnnUseFrontend() ? "enabled" : "disabled") << " and "
<< num_ampere << " Ampere GPU(s) out of total " << num_gpus
<< " GPU(s)";
}
return runtime_fusion_enabled;
#else
return false;
#endif
}();
return is_enabled;
}
bool IsSupportedActivation(const NodeDef& node, const Cluster* cluster) {
bool is_default_supported =
IsRelu(node) || IsRelu6(node) || IsElu(node) || IsLeakyRelu(node);
bool is_device_specific = (IsMKLEnabled() || RuntimeFusionEnabled(cluster)) &&
(IsTanh(node) || IsSigmoid(node));
return (is_default_supported || is_device_specific);
}
bool IsGpuCompatible(const RemapperContext& ctx,
const ContractionWithBiasAddAndActivation& matched,
const Cluster* cluster) {
#if TENSORFLOW_USE_ROCM
return false;
#endif
if (ctx.xla_auto_clustering_on) return false;
const GraphDef* graph = ctx.graph_view.graph();
const NodeDef& activation_node = graph->node(matched.activation);
if (!IsSupportedActivation(activation_node, cluster)) return false;
const NodeDef& contraction_node = graph->node(matched.contraction);
if (IsConv2D(contraction_node)) {
const std::vector<OpInfo::TensorProperties>& input_props =
ctx.graph_properties.GetInputProperties(contraction_node.name());
const TensorShapeProto& filter_shape =
input_props.size() >= 2 ? input_props[1].shape() : TensorShapeProto();
bool is_spatial_conv = Rank(filter_shape) == 4 &&
IsKnown(filter_shape.dim(0)) &&
IsKnown(filter_shape.dim(1)) &&
filter_shape.dim(0).size() != 1 &&
filter_shape.dim(1).size() != 1;
bool valid_channels = Rank(filter_shape) == 4 &&
IsKnown(filter_shape.dim(2)) &&
IsKnown(filter_shape.dim(3)) &&
filter_shape.dim(2).size() % 2 == 0 &&
filter_shape.dim(3).size() % 2 == 0;
return is_spatial_conv &&
(IsRelu(activation_node) ||
(RuntimeFusionEnabled(cluster) && valid_channels)) &&
IsGpuCompatibleConv2D(ctx, &contraction_node, &activation_node);
} else if (IsMatMul(contraction_node)) {
const std::vector<OpInfo::TensorProperties>& input_props =
ctx.graph_properties.GetInputProperties(contraction_node.name());
const TensorShapeProto& a_shape =
!input_props.empty() ? input_props[0].shape() : TensorShapeProto();
const TensorShapeProto& b_shape =
!input_props.empty() ? input_props[1].shape() : TensorShapeProto();
bool valid_dims = Rank(a_shape) == 2 && Rank(b_shape) == 2 &&
IsKnown(a_shape.dim(1)) &&
IsKnown(b_shape.dim(1)) &&
a_shape.dim(1).size() % 2 == 0 &&
b_shape.dim(1).size() % 2 == 0;
return (IsRelu(activation_node) ||
(RuntimeFusionEnabled(cluster) && valid_dims)) &&
IsGpuCompatibleMatMul(ctx, &contraction_node, &activation_node);
}
return false;
}
bool IsGpuCompatible(const RemapperContext& ctx,
const ContractionWithBiasAdd& matched,
const Cluster* cluster) {
#if TENSORFLOW_USE_ROCM && !TF_HIPBLASLT
return false;
#endif
if (ctx.xla_auto_clustering_on) return false;
const GraphDef* graph = ctx.graph_view.graph();
const NodeDef& contraction_node = graph->node(matched.contraction);
if (!IsMatMul(contraction_node)) return false;
return IsGpuCompatibleMatMul(ctx, &contraction_node, nullptr);
}
bool IsGpuCompatible(const RemapperContext& ctx,
const ContractionWithSqueezeAndBiasAdd& matched,
const Cluster* cluster) {
return false;
}
template <typename Pattern>
bool IsDeviceCompatible(const RemapperContext& ctx, Pattern& matched,
Cluster* cluster = nullptr) {
return IsCpuCompatible(ctx, matched) ||
IsGpuCompatible(ctx, matched, cluster);
}
std::string GetActivationName(std::string s) {
if (s == kMklFusedMish) {
return "Mish";
} else {
return s;
}
}
inline bool HasControlFaninOrFanout(const utils::MutableNodeView& node_view) {
return node_view.NumControllingFanins() > 0 ||
node_view.NumControlledFanouts() > 0;
}
inline bool HasAtMostOneFanoutAtPort0(const utils::MutableNodeView& node_view) {
return node_view.GetRegularFanout(0).size() <= 1;
}
inline bool HasAtMostOneDataFanoutAtPort0(
const utils::MutableNodeView& node_view) {
const auto predicate = [](const auto& fanout) -> bool {
const NodeDef* node = fanout.node_view()->node();
return !IsShape(*node) && !IsRank(*node);
};
return absl::c_count_if(node_view.GetRegularFanout(0), predicate) <= 1;
}
bool IsConvOrMatMul(const NodeDef& node) {
return IsConv2D(node) || IsDepthwiseConv2dNative(node) || IsMatMul(node) ||
IsConv3D(node);
}
bool IsBiasSemanticAdd(const RemapperContext& ctx,
const utils::MutableNodeView& node_view,
int& bias_port) {
if (!IsMKLEnabled()) return false;
const auto* node_def = node_view.node();
if (!NodeIsOnCpu(node_def)) return false;
if (!IsAdd(*node_def) || node_view.NumRegularFanins() != 2) return false;
const auto& props = ctx.graph_properties.GetInputProperties(node_def->name());
if (props.size() < 2) return false;
const auto& regular_fanin_0 = node_view.GetRegularFanin(0);
const auto* node_view_0 = regular_fanin_0.node_view();
const auto* node_def_0 = node_view_0->node();
const auto& regular_fanin_1 = node_view.GetRegularFanin(1);
const auto* node_view_1 = regular_fanin_1.node_view();
const auto* node_def_1 = node_view_1->node();
if (!IsConvOrMatMul(*node_def_0) && !IsConvOrMatMul(*node_def_1))
return false;
auto is_channel_last_format = [](const NodeDef& node) -> bool {
if (node.attr().contains("data_format")) {
const string data_format = node.attr().at("data_format").s();
return (data_format == "NHWC" || data_format == "NDHWC");
}
return true;
};
if (!is_channel_last_format(*node_def_0) ||
!is_channel_last_format(*node_def_1))
return false;
const TensorShapeProto& prot0_shape = props[0].shape();
const TensorShapeProto& prot1_shape = props[1].shape();
if (prot0_shape.unknown_rank() || prot1_shape.unknown_rank() ||
prot0_shape.dim_size() < 1 || prot1_shape.dim_size() < 1 ||
!IsKnown(prot0_shape.dim(prot0_shape.dim_size() - 1)) ||
!IsKnown(prot1_shape.dim(prot1_shape.dim_size() - 1)))
return false;
const auto is_supported_shape =
[&](const TensorShapeProto& shape,
const TensorShapeProto& bcast_shape) -> bool {
int conv_channel_dim;
conv_channel_dim = shape.dim(shape.dim_size() - 1).size();
if (shape.dim_size() == 4 && bcast_shape.dim_size() > 4) return false;
if (shape.dim_size() == 5 && bcast_shape.dim_size() > 5) return false;
if (shape.dim_size() < 2) return false;
if (conv_channel_dim != bcast_shape.dim(bcast_shape.dim_size() - 1).size())
return false;
for (int i = 0; i < bcast_shape.dim_size() - 1; i++) {
if (1 != bcast_shape.dim(i).size()) return false;
}
return true;
};
if (ShapesSymbolicallyEqual(prot0_shape, prot1_shape) ||
!ShapesBroadcastable(prot0_shape, prot1_shape))
return false;
if (IsConvOrMatMul(*node_def_0)) {
bias_port = 1;
return (is_supported_shape(prot0_shape, prot1_shape));
} else if (IsConvOrMatMul(*node_def_1)) {
bias_port = 0;
return (is_supported_shape(prot1_shape, prot0_shape));
}
return false;
}
void AddInputShapesAttr(const RemapperContext& ctx, int node_index) {
auto mutable_node = ctx.graph_view.graph()->mutable_node(node_index);
AttrValue attr_input_shape;
auto tensor_properties =
ctx.graph_properties.GetInputProperties(mutable_node->name());
for (const auto& tensor_property : tensor_properties) {
TensorShapeProto* proto = attr_input_shape.mutable_list()->add_shape();
*proto = tensor_property.shape();
}
if (IsMKLEnabled() && !tensor_properties.empty()) {
(*mutable_node->mutable_attr())["_input_shapes"] =
std::move(attr_input_shape);
}
}
bool FindContractionWithBias(const RemapperContext& ctx, int node_index,
ContractionWithBiasAdd* matched,
bool check_device_compatible = true) {
const auto* node_view = ctx.graph_view.GetNode(node_index);
if (HasControlFaninOrFanout(*node_view)) return false;
const auto* node_def = node_view->node();
int bias_port = 1;
if (!IsBiasAdd(*node_def) && !IsBiasSemanticAdd(ctx, *node_view, bias_port))
return false;
if (node_view->NumRegularFanins() < 1) return false;
const auto& regular_fanin_0 = node_view->GetRegularFanin(1 - bias_port);
const auto* contraction_node_view = regular_fanin_0.node_view();
const auto* contraction_node_def = contraction_node_view->node();
bool is_contraction = IsConv2D(*contraction_node_def) ||
(IsConv3D(*contraction_node_def) && IsMKLEnabled()) ||
IsMatMul(*contraction_node_def) ||
IsDepthwiseConv2dNative(*contraction_node_def);
#ifdef DNNL_AARCH64_USE_ACL
if (IsDepthwiseConv2dNative(*contraction_node_def)) is_contraction = false;
#endif
if (!is_contraction || !HaveSameDataType(node_def, contraction_node_def) || | #include "tensorflow/core/grappler/optimizers/remapper.h"
#include "tensorflow/cc/ops/nn_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/devices.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/utils/graph_view.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/util.h"
#if GOOGLE_CUDA
#include "third_party/gpus/cudnn/cudnn.h"
#endif
namespace tensorflow {
namespace grappler {
class RemapperTest : public GrapplerTest {
protected:
void SetUp() override {
setenv("TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT", "1", 1 );
setenv("TF_USE_CUBLASLT", "1", 1 );
}
};
TEST_F(RemapperTest, FusedBatchNorm) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output dflt = ops::Const(s.WithOpName("dflt"), {3.14f, 2.7f}, {2, 1, 1, 1});
Output x = ops::PlaceholderWithDefault(s.WithOpName("x"), dflt, {2, 1, 1, 1});
Output scale = ops::Const(s.WithOpName("scale"), {0.3f}, {1});
Output offset = ops::Const(s.WithOpName("offset"), {0.123f}, {1});
Output mean = ops::Const(s.WithOpName("mean"), {7.3f}, {1});
Output variance = ops::Const(s.WithOpName("variance"), {0.57f}, {1});
ops::FusedBatchNorm::Attrs attr;
attr = attr.IsTraining(false);
ops::FusedBatchNorm bn(s.WithOpName("batch_norm"), x, scale, offset, mean,
variance, attr);
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
item.fetch = {"batch_norm"};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(RemapperTest, FusedBatchNormNCHW) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output dflt =
ops::Const(s.WithOpName("dflt"), {3.14f, 2.7f, 1.0f, 2.0f, 3.0f, 100.0f},
{1, 3, 1, 2});
Output x = ops::PlaceholderWithDefault(s.WithOpName("x"), dflt, {1, 3, 1, 2});
Output scale = ops::Const(s.WithOpName("scale"), {0.3f, 7.0f, 123.0f}, {3});
Output offset =
ops::Const(s.WithOpName("offset"), {0.123f, 2.1f, 0.55f}, {3});
Output mean = ops::Const(s.WithOpName("mean"), {7.3f, 8.3f, 3.1f}, {3});
Output variance =
ops::Const(s.WithOpName("variance"), {0.57f, 1.0f, 2.0f}, {3});
ops::FusedBatchNorm::Attrs attr;
attr = attr.IsTraining(false);
attr = attr.DataFormat("NCHW");
ops::FusedBatchNorm bn(s.WithOpName("batch_norm").WithDevice("/device:GPU:0"),
x, scale, offset, mean, variance, attr);
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
item.fetch = {"batch_norm"};
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
if (GetNumAvailableGPUs() > 0) {
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-3);
}
}
TEST_F(RemapperTest, FuseBatchNormWithRelu) {
if (IsMKLEnabled()) GTEST_SKIP() << "Fusion not available with oneDNN.";
using ::tensorflow::ops::Placeholder;
for (bool is_training : {true, false}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
#if !defined(GOOGLE_CUDA) || !(CUDNN_VERSION >= 7402)
if (is_training) {
LOG(INFO) << "Skip FuseBatchNormWithRelu"
<< "[is_training=" << is_training << "] "
<< "test. It requires CUDNN_VERSION >= 7402.";
continue;
}
#endif
#if !defined(GOOGLE_CUDA)
if (!is_training) {
LOG(INFO) << "Skip FuseBatchNormWithRelu"
<< "[is_training=" << is_training << "]";
continue;
}
#endif
const int num_channels = 24;
TensorShape channel_shape({num_channels});
TensorShape empty_shape({0});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT,
ops::Placeholder::Shape({2, 8, 8, num_channels}));
auto input_cast = ops::Cast(s.WithOpName("input_cast"), input, DT_HALF);
auto scale = Placeholder(s.WithOpName("scale"), DT_FLOAT);
auto offset = Placeholder(s.WithOpName("offset"), DT_FLOAT);
auto mean = Placeholder(s.WithOpName("mean"), DT_FLOAT);
auto var = Placeholder(s.WithOpName("var"), DT_FLOAT);
float epsilon = 0.1f;
auto fbn = ops::FusedBatchNormV3(
s.WithOpName("fused_batch_norm"), input_cast, scale, offset, mean, var,
ops::FusedBatchNormV3::IsTraining(is_training)
.Epsilon(epsilon)
.DataFormat("NHWC"));
auto relu = ops::Relu(s.WithOpName("relu"), fbn.y);
auto fetch = ops::Identity(s.WithOpName("fetch"), relu);
auto input_t = GenerateRandomTensor<DT_FLOAT>({2, 8, 8, num_channels});
auto scale_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto offset_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto mean_t = GenerateRandomTensor<DT_FLOAT>(is_training ? empty_shape
: channel_shape);
auto var_t = GenerateRandomTensor<DT_FLOAT>(is_training ? empty_shape
: channel_shape);
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t},
{"scale", scale_t},
{"offset", offset_t},
{"mean", mean_t},
{"var", var_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:GPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "relu") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "fused_batch_norm");
found++;
}
if (node.name() == "fused_batch_norm") {
EXPECT_EQ(node.op(), "_FusedBatchNormEx");
ASSERT_EQ(node.input_size(), 5);
EXPECT_EQ(node.input(0), "input_cast");
EXPECT_EQ(node.input(1), "scale");
EXPECT_EQ(node.input(2), "offset");
EXPECT_EQ(node.input(3), "mean");
EXPECT_EQ(node.input(4), "var");
auto attr = node.attr();
EXPECT_EQ(attr["num_side_inputs"].i(), 0);
EXPECT_EQ(attr["activation_mode"].s(), "Relu");
found++;
}
}
EXPECT_EQ(found, 2);
if (GetNumAvailableGPUs() > 0) {
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
}
}
}
#if defined(GOOGLE_CUDA) && CUDNN_VERSION >= 7402
TEST_F(RemapperTest, FuseBatchNormGradWithReluGrad) {
if (IsMKLEnabled()) GTEST_SKIP() << "Fusion not available with oneDNN.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
bool is_training = true;
const int num_channels = 24;
TensorShape channel_shape({num_channels});
TensorShape empty_shape({0});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT,
ops::Placeholder::Shape({2, 8, 8, num_channels}));
auto input_cast = ops::Cast(s.WithOpName("input_cast"), input, DT_HALF);
auto scale = Placeholder(s.WithOpName("scale"), DT_FLOAT);
auto offset = Placeholder(s.WithOpName("offset"), DT_FLOAT);
auto mean = Placeholder(s.WithOpName("mean"), DT_FLOAT);
auto var = Placeholder(s.WithOpName("var"), DT_FLOAT);
float epsilon = 0.1f;
auto fbn = ops::FusedBatchNormV3(
s.WithOpName("fused_batch_norm"), input_cast, scale, offset, mean, var,
ops::FusedBatchNormV3::IsTraining(is_training)
.Epsilon(epsilon)
.DataFormat("NHWC"));
auto relu = ops::Relu(s.WithOpName("relu"), fbn.y);
auto output_grad =
Placeholder(s.WithOpName("output_grad"), DT_FLOAT,
ops::Placeholder::Shape({2, 8, 8, num_channels}));
auto output_grad_cast =
ops::Cast(s.WithOpName("output_grad_cast"), output_grad, DT_HALF);
auto relu_grad = ops::internal::ReluGrad(s.WithOpName("relu_grad"),
output_grad_cast, relu);
auto fbn_grad = ops::FusedBatchNormGradV3(
s.WithOpName("fused_batch_norm_grad"), relu_grad, input_cast, scale,
fbn.reserve_space_1, fbn.reserve_space_2, fbn.reserve_space_3,
ops::FusedBatchNormGradV3::IsTraining(is_training)
.Epsilon(epsilon)
.DataFormat("NHWC"));
auto fetch0 = ops::Identity(s.WithOpName("fetch0"), fbn_grad.x_backprop);
auto fetch1 = ops::Identity(s.WithOpName("fetch1"), fbn_grad.scale_backprop);
auto fetch2 = ops::Identity(s.WithOpName("fetch2"), fbn_grad.offset_backprop);
auto input_t = GenerateRandomTensor<DT_FLOAT>({2, 8, 8, num_channels});
auto scale_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto offset_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto mean_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto var_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto output_grad_t = GenerateRandomTensor<DT_FLOAT>({2, 8, 8, num_channels});
GrapplerItem item;
item.fetch = {"fetch0", "fetch1", "fetch2"};
item.feed = {{"input", input_t}, {"scale", scale_t},
{"offset", offset_t}, {"mean", mean_t},
{"var", var_t}, {"output_grad", output_grad_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:GPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "relu") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "fused_batch_norm");
found++;
}
if (node.name() == "fused_batch_norm") {
EXPECT_EQ(node.op(), "_FusedBatchNormEx");
ASSERT_EQ(node.input_size(), 5);
EXPECT_EQ(node.input(0), "input_cast");
EXPECT_EQ(node.input(1), "scale");
EXPECT_EQ(node.input(2), "offset");
EXPECT_EQ(node.input(3), "mean");
EXPECT_EQ(node.input(4), "var");
auto attr = node.attr();
EXPECT_EQ(attr["num_side_inputs"].i(), 0);
EXPECT_EQ(attr["activation_mode"].s(), "Relu");
found++;
}
if (node.name() == "fused_batch_norm_grad") {
EXPECT_EQ(node.op(), "_FusedBatchNormGradEx");
ASSERT_EQ(node.input_size(), 8);
EXPECT_EQ(node.input(0), "output_grad_cast");
EXPECT_EQ(node.input(1), "input_cast");
EXPECT_EQ(node.input(2), "scale");
EXPECT_EQ(node.input(3), "fused_batch_norm:3");
EXPECT_EQ(node.input(4), "fused_batch_norm:4");
EXPECT_EQ(node.input(5), "fused_batch_norm:5");
EXPECT_EQ(node.input(6), "offset");
EXPECT_EQ(node.input(7), "relu");
auto attr = node.attr();
EXPECT_EQ(attr["num_side_inputs"].i(), 0);
EXPECT_EQ(attr["activation_mode"].s(), "Relu");
found++;
}
}
EXPECT_EQ(found, 3);
if (GetNumAvailableGPUs() > 0) {
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 3);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 3);
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
test::ExpectClose(tensors[1], tensors_expected[1], 1e-2, 1e-2);
test::ExpectClose(tensors[2], tensors_expected[2], 1e-2, 1e-2);
}
}
#endif
TEST_F(RemapperTest, FuseBatchNormWithAddAndRelu) {
if (IsMKLEnabled()) GTEST_SKIP() << "Fusion not available with oneDNN.";
using ::tensorflow::ops::Placeholder;
for (bool is_training : {true, false}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
#if !defined(GOOGLE_CUDA) || !(CUDNN_VERSION >= 7402)
if (is_training) {
LOG(INFO) << "Skip FuseBatchNormWithAddAndRelu"
<< "[is_training=" << is_training << "] "
<< "test. It requires CUDNN_VERSION >= 7402.";
continue;
}
#endif
#if !defined(GOOGLE_CUDA)
if (!is_training) {
LOG(INFO) << "Skip FuseBatchNormWithAddAndRelu"
<< "[is_training=" << is_training << "]";
continue;
}
#endif
const int num_channels = 24;
TensorShape input_shape({2, 8, 8, num_channels});
TensorShape channel_shape({num_channels});
TensorShape empty_shape({0});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT,
ops::Placeholder::Shape(input_shape));
auto input_cast = ops::Cast(s.WithOpName("input_cast"), input, DT_HALF);
auto scale = Placeholder(s.WithOpName("scale"), DT_FLOAT);
auto offset = Placeholder(s.WithOpName("offset"), DT_FLOAT);
auto mean = Placeholder(s.WithOpName("mean"), DT_FLOAT);
auto var = Placeholder(s.WithOpName("var"), DT_FLOAT);
auto side_input = Placeholder(s.WithOpName("side_input"), DT_FLOAT,
ops::Placeholder::Shape(input_shape));
auto side_input_cast =
ops::Cast(s.WithOpName("side_input_cast"), side_input, DT_HALF);
float epsilon = 0.1f;
auto fbn = ops::FusedBatchNormV3(
s.WithOpName("fused_batch_norm"), input_cast, scale, offset, mean, var,
ops::FusedBatchNormV3::IsTraining(is_training)
.Epsilon(epsilon)
.DataFormat("NHWC"));
auto add = ops::Add(s.WithOpName("add"), fbn.y, side_input_cast);
auto relu = ops::Relu(s.WithOpName("relu"), add);
auto fetch = ops::Identity(s.WithOpName("fetch"), relu);
auto input_t = GenerateRandomTensor<DT_FLOAT>(input_shape);
auto scale_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto offset_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto mean_t = GenerateRandomTensor<DT_FLOAT>(is_training ? empty_shape
: channel_shape);
auto var_t = GenerateRandomTensor<DT_FLOAT>(is_training ? empty_shape
: channel_shape);
auto side_input_t = GenerateRandomTensor<DT_FLOAT>({2, 8, 8, num_channels});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"scale", scale_t},
{"offset", offset_t}, {"mean", mean_t},
{"var", var_t}, {"side_input", side_input_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:GPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "relu") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "fused_batch_norm");
found++;
}
if (node.name() == "fused_batch_norm") {
EXPECT_EQ(node.op(), "_FusedBatchNormEx");
ASSERT_EQ(node.input_size(), 6);
EXPECT_EQ(node.input(0), "input_cast");
EXPECT_EQ(node.input(1), "scale");
EXPECT_EQ(node.input(2), "offset");
EXPECT_EQ(node.input(3), "mean");
EXPECT_EQ(node.input(4), "var");
EXPECT_EQ(node.input(5), "side_input_cast");
auto attr = node.attr();
EXPECT_EQ(attr["num_side_inputs"].i(), 1);
EXPECT_EQ(attr["activation_mode"].s(), "Relu");
found++;
}
}
EXPECT_EQ(found, 2);
if (GetNumAvailableGPUs() > 0) {
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
}
}
}
#if defined(GOOGLE_CUDA) && CUDNN_VERSION >= 7402
TEST_F(RemapperTest, FuseBatchNormGradWithAddAndReluGrad) {
if (IsMKLEnabled()) GTEST_SKIP() << "Fusion not available with oneDNN.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
bool is_training = true;
const int num_channels = 24;
TensorShape input_shape({2, 8, 8, num_channels});
TensorShape channel_shape({num_channels});
TensorShape empty_shape({0});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT,
ops::Placeholder::Shape(input_shape));
auto input_cast = ops::Cast(s.WithOpName("input_cast"), input, DT_HALF);
auto scale = Placeholder(s.WithOpName("scale"), DT_FLOAT);
auto offset = Placeholder(s.WithOpName("offset"), DT_FLOAT);
auto mean = Placeholder(s.WithOpName("mean"), DT_FLOAT);
auto var = Placeholder(s.WithOpName("var"), DT_FLOAT);
auto side_input = Placeholder(s.WithOpName("side_input"), DT_FLOAT,
ops::Placeholder::Shape(input_shape));
auto side_input_cast =
ops::Cast(s.WithOpName("side_input_cast"), side_input, DT_HALF);
float epsilon = 0.1f;
auto fbn = ops::FusedBatchNormV3(
s.WithOpName("fused_batch_norm"), input_cast, scale, offset, mean, var,
ops::FusedBatchNormV3::IsTraining(is_training)
.Epsilon(epsilon)
.DataFormat("NHWC"));
auto fbn_side_input =
ops::FusedBatchNormV3(s.WithOpName("fused_batch_norm_side_input"),
side_input_cast, scale, offset, mean, var,
ops::FusedBatchNormV3::IsTraining(is_training)
.Epsilon(epsilon)
.DataFormat("NHWC"));
auto add = ops::Add(s.WithOpName("add"), fbn.y, fbn_side_input.y);
auto relu = ops::Relu(s.WithOpName("relu"), add);
auto output_grad =
Placeholder(s.WithOpName("output_grad"), DT_FLOAT,
ops::Placeholder::Shape({2, 8, 8, num_channels}));
auto output_grad_cast =
ops::Cast(s.WithOpName("output_grad_cast"), output_grad, DT_HALF);
auto relu_grad = ops::internal::ReluGrad(s.WithOpName("relu_grad"),
output_grad_cast, relu);
auto fbn_grad = ops::FusedBatchNormGradV3(
s.WithOpName("fused_batch_norm_grad"), relu_grad, input_cast, scale,
fbn.reserve_space_1, fbn.reserve_space_2, fbn.reserve_space_3,
ops::FusedBatchNormGradV3::IsTraining(is_training)
.Epsilon(epsilon)
.DataFormat("NHWC"));
auto fbn_side_input_grad = ops::FusedBatchNormGradV3(
s.WithOpName("fused_batch_norm_side_input_grad"), relu_grad,
side_input_cast, scale, fbn_side_input.reserve_space_1,
fbn_side_input.reserve_space_2, fbn_side_input.reserve_space_3,
ops::FusedBatchNormGradV3::IsTraining(is_training)
.Epsilon(epsilon)
.DataFormat("NHWC"));
auto fetch0 = ops::Identity(s.WithOpName("fetch0"), fbn_grad.x_backprop);
auto fetch1 = ops::Identity(s.WithOpName("fetch1"), fbn_grad.scale_backprop);
auto fetch2 = ops::Identity(s.WithOpName("fetch2"), fbn_grad.offset_backprop);
auto fetch3 =
ops::Identity(s.WithOpName("fetch3"), fbn_side_input_grad.x_backprop);
auto fetch4 =
ops::Identity(s.WithOpName("fetch4"), fbn_side_input_grad.scale_backprop);
auto fetch5 = ops::Identity(s.WithOpName("fetch5"),
fbn_side_input_grad.offset_backprop);
auto input_t = GenerateRandomTensor<DT_FLOAT>(input_shape);
auto scale_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto offset_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto mean_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto var_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto side_input_t = GenerateRandomTensor<DT_FLOAT>({2, 8, 8, num_channels});
auto output_grad_t = GenerateRandomTensor<DT_FLOAT>({2, 8, 8, num_channels});
GrapplerItem item;
item.fetch = {"fetch0", "fetch1", "fetch2", "fetch3", "fetch4", "fetch5"};
item.feed = {{"input", input_t},
{"scale", scale_t},
{"offset", offset_t},
{"mean", mean_t},
{"var", var_t},
{"side_input", side_input_t},
{"output_grad", output_grad_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:GPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "relu") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "fused_batch_norm");
found++;
}
if (node.name() == "fused_batch_norm") {
EXPECT_EQ(node.op(), "_FusedBatchNormEx");
ASSERT_EQ(node.input_size(), 6);
EXPECT_EQ(node.input(0), "input_cast");
EXPECT_EQ(node.input(1), "scale");
EXPECT_EQ(node.input(2), "offset");
EXPECT_EQ(node.input(3), "mean");
EXPECT_EQ(node.input(4), "var");
EXPECT_EQ(node.input(5), "fused_batch_norm_side_input");
auto attr = node.attr();
EXPECT_EQ(attr["num_side_inputs"].i(), 1);
EXPECT_EQ(attr["activation_mode"].s(), "Relu");
found++;
}
if (node.name() == "relu_grad") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "fused_batch_norm_grad:5");
found++;
}
if (node.name() == "fused_batch_norm_grad") {
EXPECT_EQ(node.op(), "_FusedBatchNormGradEx");
ASSERT_EQ(node.input_size(), 8);
EXPECT_EQ(node.input(0), "output_grad_cast");
EXPECT_EQ(node.input(1), "input_cast");
EXPECT_EQ(node.input(2), "scale");
EXPECT_EQ(node.input(3), "fused_batch_norm:3");
EXPECT_EQ(node.input(4), "fused_batch_norm:4");
EXPECT_EQ(node.input(5), "fused_batch_norm:5");
EXPECT_EQ(node.input(6), "offset");
EXPECT_EQ(node.input(7), "relu");
auto attr = node.attr();
EXPECT_EQ(attr["num_side_inputs"].i(), 1);
EXPECT_EQ(attr["activation_mode"].s(), "Relu");
found++;
}
}
EXPECT_EQ(found, 4);
if (GetNumAvailableGPUs() > 0) {
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 6);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 6);
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
test::ExpectClose(tensors[1], tensors_expected[1], 1e-2, 1e-2);
test::ExpectClose(tensors[2], tensors_expected[2], 1e-2, 1e-2);
test::ExpectClose(tensors[3], tensors_expected[3], 1e-2, 1e-2);
test::ExpectClose(tensors[4], tensors_expected[4], 1e-2, 1e-2);
test::ExpectClose(tensors[5], tensors_expected[5], 1e-2, 1e-2);
}
}
#endif
class RemapperFuseConvWithBias : public RemapperTest {
public:
template <int dim, DataType DTYPE>
void RunTest() {
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 32, 32, 3});
auto filter_shape = ops::Placeholder::Shape({1, 1, 3, 128});
auto bias_shape = ops::Placeholder::Shape({128});
std::vector<int> strides = {1, 1, 1, 1};
auto input_t = GenerateTensorWithSetRandom<DTYPE>({8, 32, 32, 3});
auto filter_t = GenerateTensorWithSetRandom<DTYPE>({1, 1, 3, 128});
auto bias_t = GenerateTensorWithSetRandom<DTYPE>({128});
if (dim == 3) {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to oneDNN.";
input_shape = ops::Placeholder::Shape({8, 4, 32, 32, 3});
filter_shape = ops::Placeholder::Shape({1, 1, 1, 3, 128});
bias_shape = ops::Placeholder::Shape({128});
strides = {1, 1, 1, 1, 1};
input_t = GenerateTensorWithSetRandom<DTYPE>({8, 4, 32, 32, 3});
filter_t = GenerateTensorWithSetRandom<DTYPE>({1, 1, 1, 3, 128});
bias_t = GenerateTensorWithSetRandom<DTYPE>({128});
}
auto input = Placeholder(s.WithOpName("input"), DTYPE, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DTYPE, filter_shape);
auto bias = Placeholder(s.WithOpName("bias"), DTYPE, bias_shape);
if (dim == 2) {
auto conv =
ops::Conv2D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
auto fetch = ops::Identity(s.WithOpName("fetch"), bias_add);
} else if (dim == 3) {
auto conv =
ops::Conv3D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
auto fetch = ops::Identity(s.WithOpName("fetch"), bias_add);
}
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"filter", filter_t}, {"bias", bias_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "bias_add") {
if (dim == 2) {
EXPECT_EQ(node.op(), "_FusedConv2D");
} else if (dim == 3) {
EXPECT_EQ(node.op(), "_FusedConv3D");
}
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 1);
EXPECT_EQ(node.input(2), "bias");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 1);
EXPECT_EQ(fused_ops[0], "BiasAdd");
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
if (DTYPE == DT_BFLOAT16)
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
else
test::ExpectClose(tensors[0], tensors_expected[0], 1e-6);
}
};
TEST_F(RemapperFuseConvWithBias, Conv2D_F32) { RunTest<2, DT_FLOAT>(); }
TEST_F(RemapperFuseConvWithBias, Conv3D_F32) { RunTest<3, DT_FLOAT>(); }
TEST_F(RemapperFuseConvWithBias, Conv2D_BF16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"FuseConv2DWithBias with bfloat16.";
RunTest<2, DT_BFLOAT16>();
}
TEST_F(RemapperFuseConvWithBias, Conv3D_BF16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"FuseConv3DWithBias with bfloat16.";
RunTest<3, DT_BFLOAT16>();
}
class RemapperFuseConvWithBiasAndActivation : public RemapperTest {
public:
template <int dim, DataType DTYPE>
void RunTest() {
using ::tensorflow::ops::Placeholder;
for (const string& activation : {"Relu", "Relu6", "Elu", "LeakyRelu"}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = Placeholder::Shape({8, 32, 32, 3});
auto filter_shape = Placeholder::Shape({1, 1, 3, 128});
auto bias_shape = Placeholder::Shape({128});
std::vector<int> strides = {1, 1, 1, 1};
auto input_t = GenerateTensorWithSetRandom<DTYPE>({8, 32, 32, 3});
auto filter_t = GenerateTensorWithSetRandom<DTYPE>({1, 1, 3, 128});
auto bias_t = GenerateTensorWithSetRandom<DTYPE>({128});
if (dim == 3) {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to oneDNN.";
input_shape = Placeholder::Shape({8, 4, 32, 32, 3});
filter_shape = Placeholder::Shape({1, 1, 1, 3, 128});
bias_shape = Placeholder::Shape({128});
strides = {1, 1, 1, 1, 1};
input_t = GenerateTensorWithSetRandom<DTYPE>({8, 4, 32, 32, 3});
filter_t = GenerateTensorWithSetRandom<DTYPE>({1, 1, 1, 3, 128});
bias_t = GenerateTensorWithSetRandom<DTYPE>({128});
}
float leakyrelu_alpha = 0.5;
auto input = Placeholder(s.WithOpName("input"), DTYPE, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DTYPE, filter_shape);
auto bias = Placeholder(s.WithOpName("bias"), DTYPE, bias_shape);
if (dim == 2) {
auto conv =
ops::Conv2D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
ops::Identity fetch = [&]() -> ops::Identity {
auto activate = s.WithOpName("activation");
auto fetch = s.WithOpName("fetch");
if (activation == "Relu") {
return ops::Identity(fetch, ops::Relu(activate, bias_add));
} else if (activation == "Relu6") {
return ops::Identity(fetch, ops::Relu6(activate, bias_add));
} else if (activation == "Elu") {
return ops::Identity(fetch, ops::Elu(activate, bias_add));
} else if (activation == "LeakyRelu") {
auto attr = ops::internal::LeakyRelu::Alpha(leakyrelu_alpha);
return ops::Identity(
fetch, ops::internal::LeakyRelu(activate, bias_add, attr));
}
return ops::Identity(fetch, bias);
}();
} else if (dim == 3) {
auto conv =
ops::Conv3D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
ops::Identity fetch = [&]() -> ops::Identity {
auto activate = s.WithOpName( |
1,401 | cpp | tensorflow/tensorflow | batch_op_rewriter | tensorflow/core/grappler/optimizers/inference/batch_op_rewriter.cc | tensorflow/core/grappler/optimizers/inference/batch_op_rewriter_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_INFERENCE_BATCH_OP_REWRITER_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_INFERENCE_BATCH_OP_REWRITER_H_
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer.h"
#include "tensorflow/core/grappler/optimizers/inference/batch_op_rewriter.pb.h"
namespace tensorflow {
namespace grappler {
constexpr char kEnableAdaptiveSchedulerAttr[] = "_enable_adaptive_scheduler";
constexpr char kMinInflightBatchesAttr[] = "_min_inflight_batches";
constexpr char kInitialInflightBatchesAttr[] = "_initial_inflight_batches";
constexpr char kMaxInflightBatchesAttr[] = "_max_inflight_batches";
constexpr char kBatchesToAverageOverAttr[] = "_batches_to_average_over";
constexpr char kFullBatchSchedulingBoostMicros[] =
"_full_batch_scheduling_boost_micros";
constexpr int64_t kMinInflightBatches = 16;
constexpr int64_t kInitialInflightBatches = 16;
constexpr int64_t kBatchesToAverageOver = 10;
constexpr int64_t kMaxInflightBatches = 64;
using ::tensorflow::serving::BatchOpRewriteConfig;
class BatchOpRewriter : public ::tensorflow::grappler::CustomGraphOptimizer {
public:
::tensorflow::Status Init(
const ::tensorflow::RewriterConfig_CustomGraphOptimizer* config) override;
std::string name() const override { return "batch_op_rewriter"; }
bool UsesFunctionLibrary() const override { return false; }
::tensorflow::Status Optimize(
::tensorflow::grappler::Cluster* cluster,
const ::tensorflow::grappler::GrapplerItem& item,
::tensorflow::GraphDef* optimized_graph) override;
private:
BatchOpRewriteConfig config_;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/inference/batch_op_rewriter.h"
#include <functional>
#include <string>
#include "google/protobuf/wrappers.pb.h"
#include "google/protobuf/map.h"
#include "google/protobuf/repeated_field.h"
#include "absl/status/status.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/inference/batch_op_rewriter.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kBatchFunction[] = "BatchFunction";
constexpr char kBatchOpRewriteConfigParamKey[] = "batch_op_rewrite_config";
constexpr char kNumBatchThreadsAttr[] = "num_batch_threads";
constexpr char kMaxBatchSizeAttr[] = "max_batch_size";
constexpr char kBatchTimeoutMicrosAttr[] = "batch_timeout_micros";
constexpr char kAllowedBatchSizesAttr[] = "allowed_batch_sizes";
constexpr char kMaxEnqueuedBatchesAttr[] = "max_enqueued_batches";
constexpr char kEnableLargeBatchSplitting[] = "enable_large_batch_splitting";
constexpr int64 kBoostMicrosNotSet = -1;
using BatchOpRewriteFunction = std::function<void(NodeDef* batch_op)>;
}
using ::tensorflow::GraphDef;
using ::tensorflow::NodeDef;
using ::tensorflow::Status;
using ::tensorflow::grappler::Cluster;
using ::tensorflow::grappler::GrapplerItem;
namespace {
struct AdaptiveBatchSchedulerParams {
int32 initial_inflight_batches;
int32 min_inflight_batches;
int32 max_inflight_batches;
int32 batches_to_average_over;
int64_t full_batch_scheduling_boost_micros;
};
AdaptiveBatchSchedulerParams GetAdaptiveBatchSchedulerParams(
const BatchOpRewriteConfig::AdaptiveBatchSchedulerOption& option) {
AdaptiveBatchSchedulerParams params;
params.min_inflight_batches =
option.has_min_inflight_batches_limit()
? option.min_inflight_batches_limit().value()
: kMinInflightBatches;
params.initial_inflight_batches =
option.has_initial_inflight_batches_limit()
? option.initial_inflight_batches_limit().value()
: kInitialInflightBatches;
params.max_inflight_batches =
option.has_max_inflight_batches_limit()
? option.max_inflight_batches_limit().value()
: kMaxInflightBatches;
params.batches_to_average_over =
option.has_batches_to_average_over()
? option.batches_to_average_over().value()
: kBatchesToAverageOver;
params.full_batch_scheduling_boost_micros =
option.has_full_batch_scheduling_boost_micros()
? option.full_batch_scheduling_boost_micros().value()
: kBoostMicrosNotSet;
return params;
}
void SetNodeAttrs(const AdaptiveBatchSchedulerParams& params, NodeDef* node) {
::tensorflow::graph_transforms::SetNodeAttr(kEnableAdaptiveSchedulerAttr,
true, node);
::tensorflow::graph_transforms::SetNodeAttr(
kMaxInflightBatchesAttr, params.max_inflight_batches, node);
::tensorflow::graph_transforms::SetNodeAttr(
kMinInflightBatchesAttr, params.min_inflight_batches, node);
::tensorflow::graph_transforms::SetNodeAttr(
kInitialInflightBatchesAttr, params.initial_inflight_batches, node);
::tensorflow::graph_transforms::SetNodeAttr(
kBatchesToAverageOverAttr, params.batches_to_average_over, node);
if (params.full_batch_scheduling_boost_micros != -1) {
::tensorflow::graph_transforms::SetNodeAttr(
kFullBatchSchedulingBoostMicros,
params.full_batch_scheduling_boost_micros, node);
}
}
void UpdateBatchOps(GraphDef* graph, BatchOpRewriteFunction rewrite_fn) {
for (int i = 0; i < graph->node_size(); ++i) {
NodeDef* node = graph->mutable_node(i);
if (node->op() == kBatchFunction) {
rewrite_fn(node);
}
}
for (int i = 0; i < graph->library().function_size(); i++) {
FunctionDef* function_def = graph->mutable_library()->mutable_function(i);
for (int j = 0; j < function_def->node_def_size(); j++) {
NodeDef* node = function_def->mutable_node_def(j);
if (node->op() == kBatchFunction) {
rewrite_fn(node);
}
}
}
}
}
Status BatchOpRewriter::Init(
const ::tensorflow::RewriterConfig_CustomGraphOptimizer* config) {
if (config->parameter_map().find(kBatchOpRewriteConfigParamKey) ==
config->parameter_map().end()) {
return absl::InternalError(
"batch_op_rewrite_config param must be set in the rewriter config "
"with a serialized/encoded BatchOpRewriteConfig.");
}
const auto& params =
config->parameter_map().at(kBatchOpRewriteConfigParamKey);
std::string unencoded;
if (params.s().empty()) {
VLOG(2) << "Empty batch-op rewrite config";
return absl::OkStatus();
}
if (!absl::Base64Unescape(params.s(), &unencoded)) {
return absl::InternalError(
"Failed to unencode batch_op_rewrite_config from params.");
}
if (!config_.ParseFromString(unencoded)) {
return absl::InternalError(
"Failed to parse batch_op_rewrite_config from params.");
}
VLOG(2) << "BatchOp Rewrite config is " << config_.DebugString();
return absl::OkStatus();
}
Status BatchOpRewriter::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) {
VLOG(2) << "Running BatchOp Rewriter";
*optimized_graph = item.graph;
bool asbs_overridden = false;
if (config_proto_.has_experimental() &&
config_proto_.experimental().has_session_metadata()) {
const string model_name =
config_proto_.experimental().session_metadata().name();
if (!config_.model_scheduler_options().empty()) {
return absl::InvalidArgumentError(
"model_scheduler_options is deprecated. Please use the "
"adaptive_batch_scheduler_option field in batch_options instead.");
}
auto model_batch_options = config_.batch_options().find(model_name);
if (model_batch_options != config_.batch_options().end()) {
auto& batch_options = model_batch_options->second;
VLOG(2) << "Rewriting batch_options for " << model_name << " to "
<< batch_options.DebugString();
if (batch_options.has_adaptive_batch_scheduler_option()) {
AdaptiveBatchSchedulerParams params = GetAdaptiveBatchSchedulerParams(
batch_options.adaptive_batch_scheduler_option());
if ((params.min_inflight_batches > params.max_inflight_batches) ||
(params.initial_inflight_batches < params.min_inflight_batches) ||
(params.initial_inflight_batches > params.max_inflight_batches)) {
return absl::InvalidArgumentError(absl::StrCat(
"Requires min_inflight_batches <= initial_inflight_batches "
"and initial_inflight_batches <= max_inflight_batches; Got "
"{min_inflight_batches : ",
params.min_inflight_batches,
", initial_inflight_batches : ", params.initial_inflight_batches,
", max_inflight_batches : ", params.max_inflight_batches, "}."));
}
asbs_overridden = true;
UpdateBatchOps(optimized_graph, [¶ms](NodeDef* batch_op) {
SetNodeAttrs(params, batch_op);
});
}
if (config_.enable_adaptive_shared_batching_thread_pool() &&
!asbs_overridden && batch_options.has_num_batch_threads() &&
batch_options.num_batch_threads() != 0) {
return absl::InvalidArgumentError(
"Unable to enable adapative shared batching because it requires "
"num_batch_threads=0 but the BatchOpRewriteConfig is also trying "
"to set num_batch_threads. Set either set "
"enable_adaptive_shared_batching_thread_pool or num_batch_threads "
"but not both.");
}
UpdateBatchOps(optimized_graph, [&batch_options](NodeDef* batch_op) {
if (batch_options.has_num_batch_threads()) {
::tensorflow::graph_transforms::SetNodeAttr(
kNumBatchThreadsAttr, batch_options.num_batch_threads(),
batch_op);
}
if (batch_options.has_max_batch_size()) {
::tensorflow::graph_transforms::SetNodeAttr(
kMaxBatchSizeAttr, batch_options.max_batch_size(), batch_op);
}
if (batch_options.has_batch_timeout_micros()) {
::tensorflow::graph_transforms::SetNodeAttr(
kBatchTimeoutMicrosAttr, batch_options.batch_timeout_micros(),
batch_op);
}
if (!batch_options.allowed_batch_sizes().empty()) {
::tensorflow::graph_transforms::SetNodeAttr(
kAllowedBatchSizesAttr, batch_options.allowed_batch_sizes(),
batch_op);
}
if (batch_options.has_max_enqueued_batches()) {
::tensorflow::graph_transforms::SetNodeAttr(
kMaxEnqueuedBatchesAttr, batch_options.max_enqueued_batches(),
batch_op);
}
if (batch_options.has_disable_large_batch_splitting()) {
::tensorflow::graph_transforms::SetNodeAttr(
kEnableLargeBatchSplitting,
!batch_options.disable_large_batch_splitting(), batch_op);
}
});
}
}
if (asbs_overridden) {
return absl::OkStatus();
}
if (config_.enable_adaptive_shared_batching_thread_pool()) {
UpdateBatchOps(optimized_graph, [](NodeDef* batch_op) {
::tensorflow::graph_transforms::SetNodeAttr(kNumBatchThreadsAttr, 0,
batch_op);
});
}
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(BatchOpRewriter, "batch_op_rewrite");
}
} | #include "tensorflow/core/grappler/optimizers/inference/batch_op_rewriter.h"
#include <vector>
#include "google/protobuf/wrappers.pb.h"
#include "absl/container/flat_hash_map.h"
#include "absl/strings/escaping.h"
#include "absl/strings/substitute.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/graph_optimizer.h"
#include "tensorflow/core/grappler/optimizers/inference/batch_op_rewriter.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace grappler {
namespace {
using ::tensorflow::GraphDef;
using ::tensorflow::NodeDef;
using ::tensorflow::RewriterConfig_CustomGraphOptimizer;
using ::tensorflow::Status;
using ::tensorflow::grappler::GrapplerItem;
using ::tensorflow::serving::BatchOpRewriteConfig;
void AddBatchOp(GraphDef* graph, int num_batch_threads = 16,
const absl::flat_hash_map<string, int>& reserved_int_attrs = {},
int max_batch_size = 16, int batch_timeout_micros = 10000,
const std::vector<int32>& allowed_batch_sizes = {8, 16},
int max_enqueued_batches = 1000,
bool disable_large_batch_splitting = false) {
auto set_batch_node_attribute = [&](const int32_t num_batch_threads,
NodeDef* batch_op) {
batch_op->set_name("cond/batch/BatchFunction");
batch_op->set_op("BatchFunction");
::tensorflow::graph_transforms::SetNodeAttr("num_batch_threads",
num_batch_threads, batch_op);
::tensorflow::graph_transforms::SetNodeAttr("max_batch_size",
max_batch_size, batch_op);
::tensorflow::graph_transforms::SetNodeAttr("batch_timeout_micros",
batch_timeout_micros, batch_op);
::tensorflow::graph_transforms::SetNodeAttr("allowed_batch_sizes",
allowed_batch_sizes, batch_op);
::tensorflow::graph_transforms::SetNodeAttr("max_enqueued_batches",
max_enqueued_batches, batch_op);
::tensorflow::graph_transforms::SetNodeAttr("enable_large_batch_splitting",
!disable_large_batch_splitting,
batch_op);
if (!reserved_int_attrs.empty()) {
::tensorflow::graph_transforms::SetNodeAttr(kEnableAdaptiveSchedulerAttr,
true, batch_op);
for (const auto& reserved_int_attr : reserved_int_attrs) {
::tensorflow::graph_transforms::SetNodeAttr(
reserved_int_attr.first, reserved_int_attr.second, batch_op);
}
}
};
set_batch_node_attribute(num_batch_threads, graph->add_node());
FunctionDefLibrary* function_def_lib = graph->mutable_library();
FunctionDef* function_def = function_def_lib->add_function();
set_batch_node_attribute(num_batch_threads, function_def->add_node_def());
}
RewriterConfig_CustomGraphOptimizer MakeConfig(
const BatchOpRewriteConfig& config) {
RewriterConfig_CustomGraphOptimizer rewriter_config;
(*rewriter_config.mutable_parameter_map())["batch_op_rewrite_config"].set_s(
absl::Base64Escape(config.SerializeAsString()));
return rewriter_config;
}
class BatchOpRewriterTest : public ::testing::TestWithParam<bool> {};
INSTANTIATE_TEST_SUITE_P(RewriteNumBatchThreads, BatchOpRewriterTest,
::testing::Bool());
TEST_P(BatchOpRewriterTest, Basic) {
GrapplerItem item;
AddBatchOp(&item.graph, 16);
BatchOpRewriteConfig config;
config.set_enable_adaptive_shared_batching_thread_pool(GetParam());
RewriterConfig_CustomGraphOptimizer rewriter_config = MakeConfig(config);
BatchOpRewriter optimizer;
TF_ASSERT_OK(optimizer.Init(&rewriter_config));
GraphDef optimized_graph;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
GraphDef expected_graph;
AddBatchOp(&expected_graph, GetParam() ? 0 : 16);
EXPECT_EQ(optimized_graph.DebugString(), expected_graph.DebugString());
}
TEST_P(BatchOpRewriterTest, InvalidArgumentForAdaptiveBatchScheduler) {
GrapplerItem item;
AddBatchOp(&item.graph, 16);
BatchOpRewriteConfig config;
config.set_enable_adaptive_shared_batching_thread_pool(GetParam());
(*config.mutable_model_scheduler_options())["model_with_override"]
.mutable_batches_to_average_over()
->set_value(1000);
(*config.mutable_model_scheduler_options())["model_with_override"]
.mutable_initial_inflight_batches_limit()
->set_value(8);
(*config.mutable_model_scheduler_options())["model_with_override"]
.mutable_min_inflight_batches_limit()
->set_value(16);
(*config.mutable_model_scheduler_options())["model_with_override"]
.mutable_max_inflight_batches_limit()
->set_value(32);
RewriterConfig_CustomGraphOptimizer rewriter_config = MakeConfig(config);
BatchOpRewriter optimizer;
TF_ASSERT_OK(optimizer.Init(&rewriter_config));
optimizer.config_proto_.mutable_experimental()
->mutable_session_metadata()
->set_version(123);
optimizer.config_proto_.mutable_experimental()
->mutable_session_metadata()
->set_name("model_with_override");
GraphDef optimized_graph;
Status status = optimizer.Optimize(nullptr, item, &optimized_graph);
EXPECT_FALSE(status.ok());
EXPECT_TRUE(errors::IsInvalidArgument(status));
}
TEST_P(BatchOpRewriterTest, AdaptiveBatchScheduler) {
BatchOpRewriteConfig config;
config.set_enable_adaptive_shared_batching_thread_pool(GetParam());
(*config.mutable_batch_options())["model_with_override"]
.mutable_adaptive_batch_scheduler_option()
->mutable_batches_to_average_over()
->set_value(1000);
(*config.mutable_batch_options())["model_with_override"]
.mutable_adaptive_batch_scheduler_option()
->mutable_initial_inflight_batches_limit()
->set_value(16);
(*config.mutable_batch_options())["model_with_override"]
.mutable_adaptive_batch_scheduler_option()
->mutable_min_inflight_batches_limit()
->set_value(8);
(*config.mutable_batch_options())["model_with_override"]
.mutable_adaptive_batch_scheduler_option()
->mutable_max_inflight_batches_limit()
->set_value(32);
(*config.mutable_batch_options())["model_with_override"]
.mutable_adaptive_batch_scheduler_option()
->mutable_full_batch_scheduling_boost_micros()
->set_value(12345);
RewriterConfig_CustomGraphOptimizer rewriter_config = MakeConfig(config);
ConfigProto config_proto;
config_proto.mutable_experimental()->mutable_session_metadata()->set_version(
123);
config_proto.mutable_experimental()->mutable_session_metadata()->set_name(
"model_with_override");
BatchOpRewriter optimizer;
TF_ASSERT_OK(optimizer.InitWithConfig(config_proto, &rewriter_config));
GraphDef optimized_graph;
GrapplerItem item;
AddBatchOp(&item.graph, 16);
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
GraphDef expected_graph;
AddBatchOp(&expected_graph, 16 ,
{{kBatchesToAverageOverAttr, 1000},
{kInitialInflightBatchesAttr, 16},
{kMinInflightBatchesAttr, 8},
{kMaxInflightBatchesAttr, 32},
{kFullBatchSchedulingBoostMicros, 12345}});
EXPECT_EQ(optimized_graph.DebugString(), expected_graph.DebugString());
}
TEST_F(BatchOpRewriterTest, UpdateModelSchedulerOptions) {
BatchOpRewriteConfig config;
config.set_enable_adaptive_shared_batching_thread_pool(true);
(*config.mutable_model_scheduler_options())["model_with_override"]
.mutable_batches_to_average_over()
->set_value(1000);
(*config.mutable_model_scheduler_options())["model_with_override"]
.mutable_initial_inflight_batches_limit()
->set_value(16);
(*config.mutable_model_scheduler_options())["model_with_override"]
.mutable_min_inflight_batches_limit()
->set_value(8);
(*config.mutable_model_scheduler_options())["model_with_override"]
.mutable_max_inflight_batches_limit()
->set_value(32);
RewriterConfig_CustomGraphOptimizer rewriter_config = MakeConfig(config);
ConfigProto config_proto;
config_proto.mutable_experimental()->mutable_session_metadata()->set_version(
123);
config_proto.mutable_experimental()->mutable_session_metadata()->set_name(
"model_with_override");
BatchOpRewriter optimizer;
TF_ASSERT_OK(optimizer.InitWithConfig(config_proto, &rewriter_config));
GraphDef optimized_graph;
GrapplerItem item;
AddBatchOp(&item.graph, 16);
ASSERT_FALSE(optimizer.Optimize(nullptr, item, &optimized_graph).ok());
}
TEST_F(BatchOpRewriterTest, UpdateBatchOptions) {
BatchOpRewriteConfig config;
(*config.mutable_batch_options())["model_with_override"]
.set_num_batch_threads(2);
(*config.mutable_batch_options())["model_with_override"].set_max_batch_size(
128);
(*config.mutable_batch_options())["model_with_override"]
.set_batch_timeout_micros(5000);
const std::vector<int32> allowed_batch_sizes{4, 32};
(*config.mutable_batch_options())["model_with_override"]
.mutable_allowed_batch_sizes()
->Add(allowed_batch_sizes.begin(), allowed_batch_sizes.end());
(*config.mutable_batch_options())["model_with_override"]
.set_max_enqueued_batches(500);
(*config.mutable_batch_options())["model_with_override"]
.set_disable_large_batch_splitting(true);
RewriterConfig_CustomGraphOptimizer rewriter_config = MakeConfig(config);
ConfigProto config_proto;
config_proto.mutable_experimental()->mutable_session_metadata()->set_version(
123);
config_proto.mutable_experimental()->mutable_session_metadata()->set_name(
"model_with_override");
BatchOpRewriter optimizer;
TF_ASSERT_OK(optimizer.InitWithConfig(config_proto, &rewriter_config));
GraphDef optimized_graph;
GrapplerItem item;
AddBatchOp(&item.graph);
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
GraphDef expected_graph;
AddBatchOp(&expected_graph, 2 ,
{} , 128 ,
5000 , allowed_batch_sizes,
500 ,
true );
EXPECT_EQ(optimized_graph.DebugString(), expected_graph.DebugString());
}
TEST_F(BatchOpRewriterTest,
UpdateAdaptiveSharedBatchSchedulerAndNumBatchThreads) {
GrapplerItem item;
AddBatchOp(&item.graph, 16);
BatchOpRewriteConfig config;
config.set_enable_adaptive_shared_batching_thread_pool(true);
(*config.mutable_batch_options())["model_with_override"]
.set_num_batch_threads(2);
RewriterConfig_CustomGraphOptimizer rewriter_config = MakeConfig(config);
ConfigProto config_proto;
config_proto.mutable_experimental()->mutable_session_metadata()->set_version(
123);
config_proto.mutable_experimental()->mutable_session_metadata()->set_name(
"model_with_override");
BatchOpRewriter optimizer;
TF_ASSERT_OK(optimizer.InitWithConfig(config_proto, &rewriter_config));
GraphDef optimized_graph;
ASSERT_FALSE(optimizer.Optimize(nullptr, item, &optimized_graph).ok());
}
}
}
} |
1,402 | cpp | tensorflow/tensorflow | auto_shard | tensorflow/core/grappler/optimizers/data/auto_shard.cc | tensorflow/core/grappler/optimizers/data/auto_shard_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_AUTO_SHARD_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_AUTO_SHARD_H_
#include <string>
#include <vector>
#include "tensorflow/core/framework/dataset_options.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
namespace tensorflow {
namespace grappler {
class AutoShard : public TFDataOptimizerBase {
public:
AutoShard() = default;
~AutoShard() override = default;
string name() const override { return "tf_auto_shard"; }
bool UsesFunctionLibrary() const override { return true; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override;
Status OptimizeAndCollectStats(Cluster* cluster, const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) override;
private:
int64_t num_workers_;
int64_t num_replicas_;
int64_t index_;
tensorflow::data::AutoShardPolicy auto_shard_policy_;
};
namespace internal {
bool IsEligibleRewriteBatchSize(const NodeDef& sink_node,
const MutableGraphView& graph,
std::vector<std::string>* ineligible_reason);
}
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/auto_shard.h"
#include <array>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils/functions.h"
#include "tensorflow/core/kernels/data/shard_dataset_op.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
namespace grappler {
namespace {
using tensorflow::data::AutoShardPolicy;
constexpr char kAssertCardinalityDatasetOpName[] = "AssertCardinalityDataset";
constexpr char kBatchDatasetOpName[] = "BatchDataset";
constexpr char kBatchDatasetV2OpName[] = "BatchDatasetV2";
constexpr char kMapAndBatchDatasetOpName[] = "MapAndBatchDataset";
constexpr char kMapDatasetOpName[] = "MapDataset";
constexpr char kShardDatasetOpName[] = "ShardDataset";
constexpr char kShuffleDatasetOpName[] = "ShuffleDataset";
constexpr char kShuffleDatasetV2OpName[] = "ShuffleDatasetV2";
constexpr char kShuffleDatasetV3OpName[] = "ShuffleDatasetV3";
constexpr char kParallelBatchDatasetOpName[] = "ParallelBatchDataset";
constexpr char kPrefetchDatasetOpName[] = "PrefetchDataset";
constexpr char kFinalizeDatasetOpName[] = "FinalizeDataset";
constexpr char kOptionsDatasetOpName[] = "OptionsDataset";
constexpr char kRebatchDatasetOpName[] = "RebatchDataset";
constexpr char kRebatchDatasetV2OpName[] = "RebatchDatasetV2";
constexpr char kTensorDatasetOpName[] = "TensorDataset";
constexpr char kTensorSliceDatasetOpName[] = "TensorSliceDataset";
constexpr char kPlaceholderOpName[] = "Placeholder";
constexpr char kConstOpName[] = "Const";
constexpr char kNumWorkersAttrName[] = "num_workers";
constexpr char kNumReplicasAttrName[] = "num_replicas";
constexpr char kIndexAttrName[] = "index";
constexpr char kAutoShardPolicyAttrName[] = "auto_shard_policy";
constexpr char kReshuffleEachIteration[] = "reshuffle_each_iteration";
constexpr char kOutputShapes[] = "output_shapes";
constexpr char kOutputTypes[] = "output_types";
constexpr std::array<const char*, 6> kReaderDatasetOps = {
"ArrayRecordDataset",
"FixedLengthRecordDataset",
"RecordIODataset",
"SSTableDataset",
"TextLineDataset",
"TFRecordDataset"
};
constexpr std::array<const char*, 2> kMultipleInputsDatasetOps = {
"ConcatenateDataset",
"ZipDataset"
};
constexpr std::array<const char*, 32> kPassThroughOps = {
"_Retval",
"AssertNextDataset",
"BatchDataset",
"CacheDataset",
"ExperimentalMapAndBatchDataset",
"ExperimentalParseExampleDataset",
"ExperimentalRebatchDataset",
"FilterDataset",
"FinalizeDataset",
"Identity",
"MapAndBatchDataset",
"MapDataset",
"MaxIntraOpParallelismDataset",
"ModelDataset",
"OptimizeDataset",
"OptionsDataset",
"PaddedBatchDataset",
"ParallelBatchDataset",
"ParallelMapDataset",
"ParseExampleDataset",
"PrefetchDataset",
"PrivateThreadPoolDataset",
"ReduceDataset",
"RebatchDataset",
"RepeatDataset",
"ShardDataset",
"ShuffleAndRepeatDataset",
"ShuffleDataset",
"SkipDataset",
"TakeDataset",
"UnbatchDataset",
"WindowDataset",
};
constexpr std::array<const char*, 5> kFuncDatasetOps = {
"ExperimentalParallelInterleaveDataset",
"FlatMapDataset",
"InterleaveDataset",
"LegacyParallelInterleaveDataset",
"ParallelInterleaveDataset",
};
constexpr std::array<const char*, 5> kUnshardableSourceDatasetOps = {
"GeneratorDataset",
"RangeDataset",
"SparseTensorsSliceDataset",
"TensorDataset",
"TensorSliceDataset",
};
constexpr std::array<const char*, 20> kBatchSizeOrthogonalDatasetOps = {
"AssertCardinalityDataset",
"AssertNextDataset",
"BytesProducedStatsDataset",
"CacheDataset",
"FinalizeDataset",
"Identity",
"LatencyStatsDataset",
"MaxIntraOpParallelismDataset",
"ModelDataset",
"NonSerializableDataset",
"OptimizeDataset",
"OptionsDataset",
"ParseExampleDataset",
"PrefetchDataset",
"PrivateThreadPoolDataset",
"RebatchDataset",
"RepeatDataset",
"SetStatsAggregatorDataset",
"SleepDataset",
"ThreadPoolDataset",
};
constexpr std::array<const char*, 3> kBatchDatasetOps = {
kBatchDatasetOpName,
kMapAndBatchDatasetOpName,
kParallelBatchDatasetOpName,
};
Status OptimizeGraph(const GrapplerItem& item, int64_t num_workers,
int64_t index, AutoShardPolicy policy,
int64_t num_replicas, GraphDef* output,
AutoShardPolicy* policy_applied);
template <std::size_t SIZE>
bool IsDatasetNodeOfType(const NodeDef& node,
const std::array<const char*, SIZE>& arr) {
for (const auto& dataset_op_name : arr) {
if (tensorflow::data::MatchesAnyVersion(dataset_op_name,
node.op())) {
return true;
}
}
return false;
}
Status AddShardNode(MutableGraphView* graph, const NodeDef& add_before,
int64_t num_workers, int64_t index) {
NodeDef new_node;
new_node.set_op(kShardDatasetOpName);
graph_utils::SetUniqueGraphNodeName(kShardDatasetOpName, graph->graph(),
&new_node);
NodeDef* num_shards_node =
graph_utils::AddScalarConstNode<int64_t>(num_workers, graph);
NodeDef* index_node = graph_utils::AddScalarConstNode<int64_t>(index, graph);
new_node.add_input(add_before.input(0));
new_node.add_input(num_shards_node->name());
new_node.add_input(index_node->name());
(*(new_node.mutable_attr()))[data::ShardDatasetOp::kRequireNonEmpty].set_b(
true);
NodeDef* add_after = graph->GetNode(add_before.input(0));
if (absl::StrContains(add_after->op(), "Dataset")) {
if (add_after->attr().count(kOutputShapes) > 0) {
graph_utils::CopyAttribute(kOutputShapes, *add_after, &new_node);
} else {
tensorflow::TensorShapeProto* shape =
(*(new_node.mutable_attr()))[kOutputShapes]
.mutable_list()
->add_shape();
shape->set_unknown_rank(true);
}
if (add_after->attr().count(kOutputTypes) > 0) {
graph_utils::CopyAttribute(kOutputTypes, *add_after, &new_node);
} else if (add_after->attr().count("Toutput_types") > 0) {
(*(new_node.mutable_attr()))[kOutputTypes] =
add_after->attr().at("Toutput_types");
} else {
(*(new_node.mutable_attr()))[kOutputTypes].mutable_list()->add_type(
tensorflow::DataType::DT_STRING);
}
} else {
return errors::NotFound(
"Unable to shard this input. You may need to wrap the inputs to your "
"reader dataset in a TensorSliceDataset. Input node is ",
add_after->DebugString());
}
NodeDef* new_node_graph = graph->AddNode(std::move(new_node));
TF_RETURN_IF_ERROR(
graph->UpdateFanouts(add_after->name(), new_node_graph->name()));
return absl::OkStatus();
}
Status AddShuffleDataset(MutableGraphView* graph, const NodeDef& add_before,
const string& buffer_size_node,
const string& seed_node, const string& seed2_node,
bool reshuffle_each_iteration) {
NodeDef* add_after = graph->GetNode(add_before.input(0));
NodeDef new_node;
new_node.set_op(kShuffleDatasetOpName);
graph_utils::SetUniqueGraphNodeName(kShuffleDatasetOpName, graph->graph(),
&new_node);
new_node.add_input(add_before.input(0));
new_node.add_input(buffer_size_node);
new_node.add_input(seed_node);
new_node.add_input(seed2_node);
graph_utils::CopyAttribute(kOutputShapes, *add_after, &new_node);
graph_utils::CopyAttribute(kOutputTypes, *add_after, &new_node);
AttrValue reshuffle_attr;
reshuffle_attr.set_b(reshuffle_each_iteration);
(*new_node.mutable_attr())[kReshuffleEachIteration] = reshuffle_attr;
NodeDef* new_node_graph = graph->AddNode(std::move(new_node));
TF_RETURN_IF_ERROR(
graph->UpdateFanouts(add_after->name(), new_node_graph->name()));
return absl::OkStatus();
}
Status AddShuffleDatasetV2(MutableGraphView* graph, const NodeDef& add_before,
const string& buffer_size_node,
const string& seed_generator_node) {
NodeDef* add_after = graph->GetNode(add_before.input(0));
NodeDef new_node;
new_node.set_op(kShuffleDatasetV2OpName);
graph_utils::SetUniqueGraphNodeName(kShuffleDatasetV2OpName, graph->graph(),
&new_node);
new_node.add_input(add_before.input(0));
new_node.add_input(buffer_size_node);
new_node.add_input(seed_generator_node);
graph_utils::CopyAttribute(kOutputShapes, *add_after, &new_node);
graph_utils::CopyAttribute(kOutputTypes, *add_after, &new_node);
NodeDef* new_node_graph = graph->AddNode(std::move(new_node));
TF_RETURN_IF_ERROR(
graph->UpdateFanouts(add_after->name(), new_node_graph->name()));
return absl::OkStatus();
}
Status AddShuffleDatasetV3(MutableGraphView* graph, const NodeDef& add_before,
const string& buffer_size_node,
const string& seed_node, const string& seed2_node,
const string& seed_generator_node,
bool reshuffle_each_iteration) {
NodeDef* add_after = graph->GetNode(add_before.input(0));
NodeDef new_node;
new_node.set_op(kShuffleDatasetV3OpName);
graph_utils::SetUniqueGraphNodeName(kShuffleDatasetV3OpName, graph->graph(),
&new_node);
new_node.add_input(add_before.input(0));
new_node.add_input(buffer_size_node);
new_node.add_input(seed_node);
new_node.add_input(seed2_node);
new_node.add_input(seed_generator_node);
graph_utils::CopyAttribute(kOutputShapes, *add_after, &new_node);
graph_utils::CopyAttribute(kOutputTypes, *add_after, &new_node);
AttrValue reshuffle_attr;
reshuffle_attr.set_b(reshuffle_each_iteration);
(*new_node.mutable_attr())[kReshuffleEachIteration] = reshuffle_attr;
NodeDef* new_node_graph = graph->AddNode(std::move(new_node));
TF_RETURN_IF_ERROR(
graph->UpdateFanouts(add_after->name(), new_node_graph->name()));
return absl::OkStatus();
}
bool ReaderOpInFunction(const NodeDef& node,
const FunctionLibraryDefinition& flib) {
auto f_attr_it = node.attr().find("f");
if (f_attr_it == node.attr().end()) return false;
const FunctionDef* func = flib.Find(f_attr_it->second.func().name());
for (int i = 0; i < func->node_def_size(); i++) {
NodeDef node_in_func = func->node_def(i);
if (IsDatasetNodeOfType(node_in_func, kReaderDatasetOps) &&
node_in_func.input_size() > 0) {
return true;
}
if (IsDatasetNodeOfType(func->node_def(i), kFuncDatasetOps) &&
ReaderOpInFunction(func->node_def(i), flib)) {
return true;
}
}
return false;
}
Status RemoveShuffleDataset(MutableGraphView* graph, const NodeDef& node,
absl::flat_hash_set<string>* nodes_to_delete,
string* op_name, string* buffer_size_node,
string* seed_node, string* seed2_node,
bool* reshuffle_each_iteration) {
if (node.op() == kShuffleDatasetOpName) {
*op_name = node.op();
*buffer_size_node = node.input(1);
*seed_node = node.input(2);
*seed2_node = node.input(3);
*reshuffle_each_iteration = node.attr().at(kReshuffleEachIteration).b();
TF_RETURN_IF_ERROR(graph->UpdateFanouts(node.name(), node.input(0)));
nodes_to_delete->insert(node.name());
}
for (const auto& fanin : graph->GetFanins(node, true)) {
TF_RETURN_IF_ERROR(RemoveShuffleDataset(
graph, *fanin.node, nodes_to_delete, op_name, buffer_size_node,
seed_node, seed2_node, reshuffle_each_iteration));
}
return absl::OkStatus();
}
Status RemoveShuffleDatasetV2(MutableGraphView* graph, const NodeDef& node,
absl::flat_hash_set<string>* nodes_to_delete,
string* op_name, string* buffer_size_node,
string* seed_generator_node) {
if (node.op() == kShuffleDatasetV2OpName) {
*op_name = node.op();
*buffer_size_node = node.input(1);
*seed_generator_node = node.input(2);
TF_RETURN_IF_ERROR(graph->UpdateFanouts(node.name(), node.input(0)));
nodes_to_delete->insert(node.name());
}
for (const auto& fanin : graph->GetFanins(node, true)) {
TF_RETURN_IF_ERROR(
RemoveShuffleDatasetV2(graph, *fanin.node, nodes_to_delete, op_name,
buffer_size_node, seed_generator_node));
}
return absl::OkStatus();
}
Status RemoveShuffleDatasetV3(MutableGraphView* graph, const NodeDef& node,
absl::flat_hash_set<string>* nodes_to_delete,
string* op_name, string* buffer_size_node,
string* seed_node, string* seed2_node,
string* seed_generator_node,
bool* reshuffle_each_iteration) {
if (node.op() == kShuffleDatasetV3OpName) {
*op_name = node.op();
*buffer_size_node = node.input(1);
*seed_node = node.input(2);
*seed2_node = node.input(3);
*seed_generator_node = node.input(4);
*reshuffle_each_iteration = node.attr().at(kReshuffleEachIteration).b();
TF_RETURN_IF_ERROR(graph->UpdateFanouts(node.name(), node.input(0)));
nodes_to_delete->insert(node.name());
}
for (const auto& fanin : graph->GetFanins(node, true)) {
TF_RETURN_IF_ERROR(RemoveShuffleDatasetV3(
graph, *fanin.node, nodes_to_delete, op_name, buffer_size_node,
seed_node, seed2_node, seed_generator_node, reshuffle_each_iteration));
}
return absl::OkStatus();
}
Status ProcessDatasetSourceNode(MutableGraphView* graph, const NodeDef& node,
absl::flat_hash_set<string>* nodes_to_delete,
int64_t num_workers, int64_t index) {
string shuffle_op_name = "";
string buffer_size_node = "";
string seed_node = "";
string seed2_node = "";
string seed_generator_node = "";
bool reshuffle_each_iteration;
TF_RETURN_IF_ERROR(AddShardNode(graph, node, num_workers, index));
TF_RETURN_IF_ERROR(RemoveShuffleDataset(
graph, node, nodes_to_delete, &shuffle_op_name, &buffer_size_node,
&seed_node, &seed2_node, &reshuffle_each_iteration));
if (shuffle_op_name.empty()) {
TF_RETURN_IF_ERROR(
RemoveShuffleDatasetV2(graph, node, nodes_to_delete, &shuffle_op_name,
&buffer_size_node, &seed_generator_node));
}
if (shuffle_op_name.empty()) {
TF_RETURN_IF_ERROR(RemoveShuffleDatasetV3(
graph, node, nodes_to_delete, &shuffle_op_name, &buffer_size_node,
&seed_node, &seed2_node, &seed_generator_node,
&reshuffle_each_iteration));
}
if (shuffle_op_name == kShuffleDatasetOpName) {
TF_RETURN_IF_ERROR(AddShuffleDataset(graph, node, buffer_size_node,
seed_node, seed2_node,
reshuffle_each_iteration));
} else if (shuffle_op_name == kShuffleDatasetV2OpName) {
TF_RETURN_IF_ERROR(AddShuffleDatasetV2(graph, node, buffer_size_node,
seed_generator_node));
} else if (shuffle_op_name == kShuffleDatasetV3OpName) {
TF_RETURN_IF_ERROR(AddShuffleDatasetV3(
graph, node, buffer_size_node, seed_node, seed2_node,
seed_generator_node, reshuffle_each_iteration));
}
return absl::OkStatus();
}
const NodeDef* FindFuncAndTensorSliceDataset(
const NodeDef* node, int64_t num_workers, int64_t index,
FunctionLibraryDefinition* flib, MutableGraphView* graph,
absl::flat_hash_set<string>* nodes_to_delete) {
if (IsDatasetNodeOfType(*node, kFuncDatasetOps)) {
const NodeDef* input_node = graph_utils::GetInputNode(*node, *graph, 0);
if (input_node->op() == kTensorSliceDatasetOpName ||
input_node->op() == kTensorDatasetOpName) {
const NodeDef* next_input_node =
graph_utils::GetInputNode(*input_node, *graph, 0);
if (next_input_node->op() == kPlaceholderOpName) {
return node;
}
}
}
if (!IsDatasetNodeOfType(*node, kPassThroughOps)) {
return nullptr;
}
const NodeDef* input_node = graph_utils::GetInputNode(*node, *graph, 0);
return FindFuncAndTensorSliceDataset(input_node, num_workers, index, flib,
graph, nodes_to_delete);
}
enum class DropRemainderValue { kUnknown, kTrue, kFalse };
DropRemainderValue GetDropRemainder(const MutableGraphView& graph,
const NodeDef& batch_node) {
const NodeDef* drop_remainder = nullptr;
if (batch_node.op() == kBatchDatasetOpName ||
batch_node.op() == kBatchDatasetV2OpName) {
drop_remainder = graph.GetNode(batch_node.input(2));
} else if (batch_node.op() == kParallelBatchDatasetOpName) {
drop_remainder = graph.GetNode(batch_node.input(3));
} else if (batch_node.op() == kMapAndBatchDatasetOpName) {
int drop_remainder_index =
3 + batch_node.attr().at("Targuments").list().shape_size();
if (drop_remainder_index >= batch_node.input_size()) {
LOG(ERROR) << "Fail to find the drop_remainder of op: "
<< batch_node.DebugString();
return DropRemainderValue::kUnknown;
}
drop_remainder = graph.GetNode(batch_node.input(drop_remainder_index));
} else {
LOG(ERROR) << "Expect a batch node but get " << batch_node.DebugString();
return DropRemainderValue::kUnknown;
}
if (!IsConstant(*drop_remainder)) {
return DropRemainderValue::kUnknown;
}
bool drop_remainder_value;
if (!GetNodeAttr(*drop_remainder, "value", &drop_remainder_value).ok()) {
return DropRemainderValue::kUnknown;
}
return drop_remainder_value ? DropRemainderValue::kTrue
: DropRemainderValue::kFalse;
}
Status RecursivelyHandleOp(const NodeDef& node, int64_t num_workers,
int64_t index, FunctionLibraryDefinition* flib,
MutableGraphView* graph,
absl::flat_hash_set<string>* nodes_to_delete) {
if (node.op() == kAssertCardinalityDatasetOpName) {
LOG(WARNING) << "The `assert_cardinality` transformation is currently not "
"handled by the auto-shard rewrite and will be removed.";
nodes_to_delete->insert(node.name());
TF_RETURN_IF_ERROR(graph->UpdateFanouts(node.name(), node.input(0)));
const NodeDef* input_node = graph_utils::GetInputNode(node, *graph, 0);
return RecursivelyHandleOp(*input_node, num_workers, index, flib, graph,
nodes_to_delete);
}
if (IsDatasetNodeOfType(node, kUnshardableSourceDatasetOps)) {
return errors::NotFound("Found an unshardable source dataset: ",
node.DebugString());
}
if (IsDatasetNodeOfType(node, kMultipleInputsDatasetOps)) {
for (int i = 0; i < node.input_size(); ++i) {
const NodeDef* input_node = graph_utils::GetInputNode(node, *graph, i);
TF_RETURN_IF_ERROR(RecursivelyHandleOp(*input_node, num_workers, index,
flib, graph, nodes_to_delete));
}
return absl::OkStatus();
}
if (IsDatasetNodeOfType(node, kFuncDatasetOps)) {
const NodeDef* input_node = graph_utils::GetInputNode(node, *graph, 0);
const NodeDef* flat_map_node = FindFuncAndTensorSliceDataset(
input_node, num_workers, index, flib, graph, nodes_to_delete);
if (flat_map_node != nullptr) {
auto fanouts = graph->GetFanouts(*flat_map_node, false);
if (fanouts.size() == 1) {
return ProcessDatasetSourceNode(graph, *fanouts.begin()->node,
nodes_to_delete, num_workers, index);
}
}
}
if ((IsDatasetNodeOfType(node, kFuncDatasetOps) ||
IsDatasetNodeOfType(node, kPassThroughOps)) &&
ReaderOpInFunction(node, *flib)) {
return ProcessDatasetSourceNode(graph, node, nodes_to_delete, num_workers,
index);
}
if (IsDatasetNodeOfType(node, kReaderDatasetOps)) {
return ProcessDatasetSourceNode(graph, node, nodes_to_delete, num_workers,
index);
}
if (!IsDatasetNodeOfType(node, kFuncDatasetOps) &&
!IsDatasetNodeOfType(node, kPassThroughOps)) {
return errors::NotFound(
"Did not find a shardable source, walked to ",
"a node which is not a dataset: ", node.DebugString(),
". Consider either turning off auto-sharding or switching the "
"auto_shard_policy to DATA to shard this dataset. You can do this by "
"creating a new `tf.data.Options()` object then setting "
"`options.experimental_distribute.auto_shard_policy = "
"AutoShardPolicy.DATA` before applying the options object to the "
"dataset via `dataset.with_options(options)`.");
}
const NodeDef* input_node = graph_utils::GetInputNode(node, *graph, 0);
return RecursivelyHandleOp(*input_node, num_workers, index, flib, graph,
nodes_to_delete);
}
Status ShardByFile(const NodeDef& sink_node, int64_t num_workers, int64_t index,
FunctionLibraryDefinition* flib, MutableGraphView* graph) {
absl::flat_hash_set<string> nodes_to_delete;
TF_RETURN_IF_ERROR(RecursivelyHandleOp(sink_node, num_workers, index, flib,
graph, &nodes_to_delete));
return graph->DeleteNodes(nodes_to_delete);
}
Status RewriteRebatchV2ToV1(const NodeDef& sink_node, int64_t num_replicas,
MutableGraphView* graph) {
NodeDef* input_node = graph_utils::GetInputNode(sink_node, *graph);
if (input_node->op() != kRebatchDatasetV2OpName) {
return absl::OkStatus();
}
NodeDef* rebatch_node = input_node;
rebatch_node->set_op(kRebatchDatasetOpName);
rebatch_node->mutable_input()->DeleteSubrange(1, 2);
if (num_replicas < 1) {
return errors::InvalidArgument(
"Cannot rewrite RebatchDatasetV2 to legacy RebatchDataset with invalid "
"num_replicas argument. `num_replicas` is ",
num_replicas, ", but expected to be >= 1.");
}
auto num_replicas_node = graph_utils::AddScalarConstNode(num_replicas, graph);
rebatch_node->add_input(num_replicas_node->name());
(*rebatch_node->mutable_attr())["use_fallback"].set_b(true);
auto* shapes_attr =
gtl::FindOrNull(*rebatch_node->mutable_attr(), "output_shapes");
if (shapes_attr == nullptr) {
return errors::InvalidArgument(
"Cannot rewrite RebatchDatasetV2 with missing `output_shapes` attr.");
}
for (int i = 0; i < shapes_attr->list().shape_size(); ++i) {
auto* shape = shapes_attr->mutable_list()->mutable_shape(i);
if (shape->unknown_rank()) continue;
shape->mutable_dim(0)->set_size(-1);
}
return absl::OkStatus();
}
Status ShardByData(const NodeDef& sink_node, int64_t num_workers, int64_t index,
int64_t num_replicas, MutableGraphView* graph) {
const NodeDef* shard_before = &sink_node;
NodeDef* input_node = graph_utils::GetInputNode(sink_node, *graph);
while (input_node->op() == kPrefetchDatasetOpName ||
input_node->op() == kOptionsDatasetOpName ||
input_node->op() == kFinalizeDatasetOpName) {
shard_before = input_node;
input_node = graph_utils::GetInputNode(*input_node, *graph);
}
TF_RETURN_IF_ERROR(RewriteRebatchV2ToV1(*shard_before, num_replicas, graph));
return AddShardNode(graph, *shard_before, num_workers, index);
}
Status ShardByHint(const NodeDef& sink_node, int64_t num_workers, int64_t index,
int64_t num_replicas, MutableGraphView* graph) {
auto get_shard_node = [graph](const NodeDef& node) -> const NodeDef* {
if (node.op() != kShardDatasetOpName) return nullptr;
auto num_workers_node = graph->GetNode(node.input(1));
if (num_workers_node->op() != kConstOpName) return nullptr;
if (num_workers_node->attr().at("value").tensor().int64_va | #include "tensorflow/core/grappler/optimizers/data/auto_shard.h"
#include <string>
#include <vector>
#include "absl/strings/str_join.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
using ::tensorflow::grappler::graph_tests_utils::MakeBatchV2Node;
using ::tensorflow::grappler::graph_tests_utils::MakeMapAndBatchNode;
using ::tensorflow::grappler::graph_tests_utils::MakeParallelBatchNode;
using ::tensorflow::test::function::GDef;
using ::tensorflow::test::function::NDef;
using ::testing::UnorderedElementsAre;
void FinishItem(GrapplerItem* item, const string& input_node_name) {
*item->graph.add_node() =
NDef("map_before_rebatch", "MapDataset", {input_node_name},
{{"f", "__inference_Dataset_map_normalize_8232"},
{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}});
*item->graph.add_node() =
NDef("num_replicas", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}});
*item->graph.add_node() =
NDef("rebatch", "RebatchDataset", {"map_before_rebatch", "num_replicas"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}});
*item->graph.add_node() =
NDef("prefetch_count", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}});
*item->graph.add_node() =
NDef("prefetch", "PrefetchDataset", {"rebatch", "prefetch_count"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}});
*item->graph.add_node() = NDef("Sink", "Identity", {"prefetch"}, {});
item->fetch.push_back("Sink");
}
NodeDef AddCardinalityAttr(NodeDef node, int64_t cardinality) {
(*node.mutable_attr())[data::kCardinalityAttrForRewrite].set_i(cardinality);
return node;
}
TEST(RewriteBatchTest, InfiniteSource) {
GrapplerItem item;
item.graph = GDef({
NDef("files", "Const", {},
{{"values", std::vector<std::string>{"file1", "file2"}},
{"dtype", DT_STRING}}),
NDef("tf_record", "TFRecordDataset", {"file"}, {}),
NDef("repeat_count", "Const", {}, {{"value", -1}, {"dtype", DT_INT32}}),
NDef("repeat", "RepeatDataset", {"tf_record", "repeat_count"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", true}, {"dtype", DT_BOOL}}),
AddCardinalityAttr(
MakeBatchV2Node("batch", "repeat", "batch_size", "drop_remainder",
false),
data::kInfiniteCardinality),
});
FinishItem(&item, "batch");
MutableGraphView graph(&item.graph);
NodeDef* sink_node = nullptr;
TF_ASSERT_OK(graph_utils::GetFetchNode(graph, item, &sink_node));
std::vector<std::string> ineligible_reason;
EXPECT_TRUE(internal::IsEligibleRewriteBatchSize(*sink_node, graph,
&ineligible_reason))
<< absl::StrJoin(ineligible_reason, ",");
}
TEST(RewriteBatchTest, InfiniteSourceMapAndBatch) {
GrapplerItem item;
item.graph = GDef({
NDef("files", "Const", {},
{{"values", std::vector<std::string>{"file1", "file2"}},
{"dtype", DT_STRING}}),
NDef("tf_record", "TFRecordDataset", {"file"}, {}),
NDef("repeat_count", "Const", {}, {{"value", -1}, {"dtype", DT_INT32}}),
NDef("repeat", "RepeatDataset", {"tf_record", "repeat_count"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 2}, {"dtype", DT_INT64}}),
NDef("drop_remainder", "Const", {},
{{"value", true}, {"dtype", DT_BOOL}}),
AddCardinalityAttr(
MakeMapAndBatchNode("batch", "repeat", "batch_size",
"num_parallel_calls", "drop_remainder"),
data::kInfiniteCardinality),
});
FinishItem(&item, "batch");
MutableGraphView graph(&item.graph);
NodeDef* sink_node = nullptr;
TF_ASSERT_OK(graph_utils::GetFetchNode(graph, item, &sink_node));
std::vector<std::string> ineligible_reason;
EXPECT_TRUE(internal::IsEligibleRewriteBatchSize(*sink_node, graph,
&ineligible_reason))
<< absl::StrJoin(ineligible_reason, ",");
}
TEST(RewriteBatchTest, InfiniteSourceParallelBatch) {
GrapplerItem item;
item.graph = GDef({
NDef("files", "Const", {},
{{"values", std::vector<std::string>{"file1", "file2"}},
{"dtype", DT_STRING}}),
NDef("tf_record", "TFRecordDataset", {"file"}, {}),
NDef("repeat_count", "Const", {}, {{"value", -1}, {"dtype", DT_INT32}}),
NDef("repeat", "RepeatDataset", {"tf_record", "repeat_count"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 2}, {"dtype", DT_INT64}}),
NDef("drop_remainder", "Const", {},
{{"value", true}, {"dtype", DT_BOOL}}),
AddCardinalityAttr(
MakeParallelBatchNode("batch", "repeat", "batch_size",
"num_parallel_calls", "drop_remainder",
"true"),
data::kInfiniteCardinality),
});
FinishItem(&item, "batch");
MutableGraphView graph(&item.graph);
NodeDef* sink_node = nullptr;
TF_ASSERT_OK(graph_utils::GetFetchNode(graph, item, &sink_node));
std::vector<std::string> ineligible_reason;
EXPECT_TRUE(internal::IsEligibleRewriteBatchSize(*sink_node, graph,
&ineligible_reason))
<< absl::StrJoin(ineligible_reason, ",");
}
TEST(RewriteBatchTest, FiniteSourceNoDropRemainder) {
GrapplerItem item;
item.graph = GDef({
NDef("files", "Const", {},
{{"values", std::vector<std::string>{"file1", "file2"}},
{"dtype", DT_STRING}}),
NDef("tf_record", "TFRecordDataset", {"file"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
AddCardinalityAttr(
MakeBatchV2Node("batch", "tf_record", "batch_size", "drop_remainder",
false),
data::kUnknownCardinality),
});
FinishItem(&item, "batch");
MutableGraphView graph(&item.graph);
NodeDef* sink_node = nullptr;
TF_ASSERT_OK(graph_utils::GetFetchNode(graph, item, &sink_node));
std::vector<std::string> ineligible_reason;
EXPECT_TRUE(internal::IsEligibleRewriteBatchSize(*sink_node, graph,
&ineligible_reason))
<< absl::StrJoin(ineligible_reason, ",");
}
TEST(RewriteBatchTest, FiniteSourceDropRemainder) {
GrapplerItem item;
item.graph = GDef({
NDef("files", "Const", {},
{{"values", std::vector<std::string>{"file1", "file2"}},
{"dtype", DT_STRING}}),
NDef("tf_record", "TFRecordDataset", {"file"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", true}, {"dtype", DT_BOOL}}),
AddCardinalityAttr(
MakeBatchV2Node("batch", "tf_record", "batch_size", "drop_remainder",
false),
1337),
});
FinishItem(&item, "batch");
MutableGraphView graph(&item.graph);
NodeDef* sink_node = nullptr;
TF_ASSERT_OK(graph_utils::GetFetchNode(graph, item, &sink_node));
std::vector<std::string> ineligible_reason;
EXPECT_FALSE(internal::IsEligibleRewriteBatchSize(*sink_node, graph,
&ineligible_reason));
EXPECT_THAT(ineligible_reason,
UnorderedElementsAre("BATCH_DROP_REMAINDER_NOT_INFINITE"));
}
TEST(RewriteBatchTest, UnknownCardinalitySourceDropRemainder) {
GrapplerItem item;
item.graph = GDef({
NDef("files", "Const", {},
{{"values", std::vector<std::string>{"file1", "file2"}},
{"dtype", DT_STRING}}),
NDef("tf_record", "TFRecordDataset", {"file"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", true}, {"dtype", DT_BOOL}}),
AddCardinalityAttr(
MakeBatchV2Node("batch", "tf_record", "batch_size", "drop_remainder",
false),
data::kUnknownCardinality),
});
FinishItem(&item, "batch");
MutableGraphView graph(&item.graph);
NodeDef* sink_node = nullptr;
TF_ASSERT_OK(graph_utils::GetFetchNode(graph, item, &sink_node));
std::vector<std::string> ineligible_reason;
EXPECT_FALSE(internal::IsEligibleRewriteBatchSize(*sink_node, graph,
&ineligible_reason));
EXPECT_THAT(ineligible_reason,
UnorderedElementsAre("BATCH_DROP_REMAINDER_NOT_INFINITE"));
}
TEST(RewriteBatchTest, FiniteSourceDropRemainderUnknown) {
GrapplerItem item;
item.graph = GDef({
NDef("files", "Const", {},
{{"values", std::vector<std::string>{"file1", "file2"}},
{"dtype", DT_STRING}}),
NDef("tf_record", "TFRecordDataset", {"file"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "RandomBool", {}, {}),
AddCardinalityAttr(
MakeBatchV2Node("batch", "tf_record", "batch_size", "drop_remainder",
false),
data::kUnknownCardinality),
});
FinishItem(&item, "batch");
MutableGraphView graph(&item.graph);
NodeDef* sink_node = nullptr;
TF_ASSERT_OK(graph_utils::GetFetchNode(graph, item, &sink_node));
std::vector<std::string> ineligible_reason;
EXPECT_FALSE(internal::IsEligibleRewriteBatchSize(*sink_node, graph,
&ineligible_reason));
EXPECT_THAT(ineligible_reason,
UnorderedElementsAre("BATCH_DROP_REMAINDER_UNKNOWN"));
}
TEST(RewriteBatchTest, DropRemainderCardinalityNotAvailable) {
GrapplerItem item;
item.graph = GDef({
NDef("files", "Const", {},
{{"values", std::vector<std::string>{"file1", "file2"}},
{"dtype", DT_STRING}}),
NDef("tf_record", "TFRecordDataset", {"file"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {}, {{"value", true}}),
MakeBatchV2Node("batch", "tf_record", "batch_size", "drop_remainder",
false),
});
FinishItem(&item, "batch");
MutableGraphView graph(&item.graph);
NodeDef* sink_node = nullptr;
TF_ASSERT_OK(graph_utils::GetFetchNode(graph, item, &sink_node));
std::vector<std::string> ineligible_reason;
EXPECT_FALSE(internal::IsEligibleRewriteBatchSize(*sink_node, graph,
&ineligible_reason));
EXPECT_THAT(ineligible_reason,
UnorderedElementsAre("BATCH_CARDINALITY_NOT_AVAILABLE"));
}
TEST(RewriteBatchTest, OpNotSupported) {
GrapplerItem item;
item.graph = GDef({
NDef("files", "Const", {},
{{"values", std::vector<std::string>{"file1", "file2"}},
{"dtype", DT_STRING}}),
NDef("tf_record", "TFRecordDataset", {"file"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", true}, {"dtype", DT_BOOL}}),
AddCardinalityAttr(
MakeBatchV2Node("batch", "tf_record", "batch_size", "drop_remainder",
false),
data::kUnknownCardinality),
NDef("take_count", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeTakeNode("take", "batch", "take_count"),
});
FinishItem(&item, "take");
MutableGraphView graph(&item.graph);
NodeDef* sink_node = nullptr;
TF_ASSERT_OK(graph_utils::GetFetchNode(graph, item, &sink_node));
std::vector<std::string> ineligible_reason;
EXPECT_FALSE(internal::IsEligibleRewriteBatchSize(*sink_node, graph,
&ineligible_reason));
EXPECT_THAT(ineligible_reason,
UnorderedElementsAre("OP_NOT_SUPPORTED_TakeDataset",
"BATCH_DROP_REMAINDER_NOT_INFINITE"));
}
TEST(RewriteBatchTest, BatchNotFound) {
GrapplerItem item;
item.graph = GDef({
NDef("files", "Const", {},
{{"values", std::vector<std::string>{"file1", "file2"}},
{"dtype", DT_STRING}}),
NDef("tf_record", "TFRecordDataset", {"file"}, {}),
graph_tests_utils::MakeTakeNode("take", "tf_record", "take_count"),
NDef("take_count", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
});
FinishItem(&item, "take");
MutableGraphView graph(&item.graph);
NodeDef* sink_node = nullptr;
TF_ASSERT_OK(graph_utils::GetFetchNode(graph, item, &sink_node));
std::vector<std::string> ineligible_reason;
EXPECT_FALSE(internal::IsEligibleRewriteBatchSize(*sink_node, graph,
&ineligible_reason));
EXPECT_THAT(ineligible_reason, UnorderedElementsAre("BATCH_NOT_FOUND"));
}
TEST(RewriteBatchTest, InfiniteSourceNoRebatch) {
GrapplerItem item;
item.graph = GDef({
NDef("files", "Const", {},
{{"values", std::vector<std::string>{"file1", "file2"}},
{"dtype", DT_STRING}}),
NDef("tf_record", "TFRecordDataset", {"file"}, {}),
NDef("repeat_count", "Const", {}, {{"value", -1}, {"dtype", DT_INT32}}),
NDef("repeat", "RepeatDataset", {"tf_record", "repeat_count"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", true}, {"dtype", DT_BOOL}}),
AddCardinalityAttr(
MakeBatchV2Node("batch", "repeat", "batch_size", "drop_remainder",
false),
data::kInfiniteCardinality),
NDef("Sink", "Identity", {"batch"}, {}),
});
item.fetch.push_back("Sink");
MutableGraphView graph(&item.graph);
NodeDef* sink_node = nullptr;
TF_ASSERT_OK(graph_utils::GetFetchNode(graph, item, &sink_node));
std::vector<std::string> ineligible_reason;
EXPECT_TRUE(internal::IsEligibleRewriteBatchSize(*sink_node, graph,
&ineligible_reason))
<< absl::StrJoin(ineligible_reason, ",");
}
}
}
} |
1,403 | cpp | tensorflow/tensorflow | disable_prefetch_legacy_autotune | tensorflow/core/grappler/optimizers/data/disable_prefetch_legacy_autotune.cc | tensorflow/core/grappler/optimizers/data/disable_prefetch_legacy_autotune_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_DISABLE_PREFETCH_LEGACY_AUTOTUNE_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_DISABLE_PREFETCH_LEGACY_AUTOTUNE_H_
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
namespace tensorflow {
namespace grappler {
constexpr char kAutotune[] = "autotune";
class DisablePrefetchLegacyAutotune : public TFDataOptimizerBase {
public:
DisablePrefetchLegacyAutotune() = default;
~DisablePrefetchLegacyAutotune() override = default;
string name() const override { return "disable_prefetch_legacy_autotune"; };
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
if (!config) return absl::OkStatus();
const string& autotune = config->parameter_map().at(kAutotune).s();
if (autotune == "true") {
autotune_ = true;
} else if (autotune == "false") {
autotune_ = false;
} else {
return errors::InvalidArgument("Received an invalid value for parameter ",
kAutotune, ": ", autotune);
}
return absl::OkStatus();
}
Status OptimizeAndCollectStats(Cluster* cluster, const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) override;
private:
bool autotune_ = true;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/disable_prefetch_legacy_autotune.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kLegacyAutotune[] = "legacy_autotune";
constexpr char kPrefetchDataset[] = "PrefetchDataset";
}
Status DisablePrefetchLegacyAutotune::OptimizeAndCollectStats(
Cluster* cluster, const GrapplerItem& item, GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
if (!autotune_) {
VLOG(1) << "The optimization disable_prefetch_legacy_autotune is not "
"applied if autotune is off.";
return absl::OkStatus();
}
MutableGraphView graph(output);
for (NodeDef& node : *output->mutable_node()) {
if (node.op() == kPrefetchDataset) {
if (node.attr().find(kLegacyAutotune) == node.attr().end() ||
node.attr().at(kLegacyAutotune).b()) {
(*node.mutable_attr())[kLegacyAutotune].set_b(false);
stats->num_changes++;
}
}
}
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(DisablePrefetchLegacyAutotune,
"disable_prefetch_legacy_autotune");
}
} | #include "tensorflow/core/grappler/optimizers/data/disable_prefetch_legacy_autotune.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
using test::function::NDef;
Status OptimizeWithDisablePrefetchLegacyAutotune(const GrapplerItem &item,
GraphDef *output,
bool autotune) {
DisablePrefetchLegacyAutotune optimizer;
RewriterConfig_CustomGraphOptimizer config;
if (autotune) {
(*config.mutable_parameter_map())["autotune"].set_s("true");
} else {
(*config.mutable_parameter_map())["autotune"].set_s("false");
}
TF_RETURN_IF_ERROR(optimizer.Init(&config));
return optimizer.Optimize(nullptr, item, output);
}
class RewriteTest : public ::testing::TestWithParam<bool> {};
TEST_P(RewriteTest, DisablePrefetchLegacyAutotune) {
const bool autotune = GetParam();
GrapplerItem item;
item.graph = test::function::GDef({
NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("prefetch1", "PrefetchDataset", {"range"},
{{"legacy_autotune", true}}),
NDef("prefetch2", "PrefetchDataset", {"prefetch1"},
{{"legacy_autotune", false}}),
NDef("prefetch3", "PrefetchDataset", {"prefetch2"}, {}),
});
GraphDef output;
TF_ASSERT_OK(
OptimizeWithDisablePrefetchLegacyAutotune(item, &output, autotune));
NodeDef prefetch_node1 =
output.node(graph_utils::FindGraphNodeWithName("prefetch1", output));
EXPECT_EQ(prefetch_node1.attr().at("legacy_autotune").b(), !autotune);
NodeDef prefetch_node2 =
output.node(graph_utils::FindGraphNodeWithName("prefetch2", output));
EXPECT_FALSE(prefetch_node2.attr().at("legacy_autotune").b());
NodeDef prefetch_node3 =
output.node(graph_utils::FindGraphNodeWithName("prefetch3", output));
if (autotune) {
EXPECT_FALSE(prefetch_node3.attr().at("legacy_autotune").b());
} else {
EXPECT_TRUE(prefetch_node3.attr().find("legacy_autotune") ==
prefetch_node3.attr().end());
}
}
INSTANTIATE_TEST_SUITE_P(Test, RewriteTest, ::testing::Values(false, true));
}
}
} |
1,404 | cpp | tensorflow/tensorflow | parallel_batch | tensorflow/core/grappler/optimizers/data/parallel_batch.cc | tensorflow/core/grappler/optimizers/data/parallel_batch_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_PARALLEL_BATCH_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_PARALLEL_BATCH_H_
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
namespace tensorflow {
namespace grappler {
class ParallelBatch : public TFDataOptimizerBase {
public:
ParallelBatch() = default;
~ParallelBatch() override = default;
string name() const override { return "parallel_batch"; }
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
Status OptimizeAndCollectStats(Cluster* cluster, const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) override;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/parallel_batch.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
namespace tensorflow {
namespace grappler {
Status ParallelBatch::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
MutableGraphView graph(output);
for (NodeDef& node : *output->mutable_node()) {
if (node.op() == "BatchDatasetV2" || node.op() == "PaddedBatchDatasetV2") {
(*node.mutable_attr())["parallel_copy"].set_b(true);
stats->num_changes++;
}
}
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(ParallelBatch, "parallel_batch");
}
} | #include "tensorflow/core/grappler/optimizers/data/parallel_batch.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
TEST(ParallelBatch, BatchDataset) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 5}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
NDef("batch", "BatchDatasetV2",
{"range", "batch_size", "drop_remainder"}, {})});
ParallelBatch optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("batch", output));
int index = graph_utils::FindGraphNodeWithName("batch", output);
EXPECT_TRUE(output.node(index).attr().at("parallel_copy").b());
}
TEST(ParallelBatch, PaddedBatchDataset) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 5}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
NDef("batch", "PaddedBatchDatasetV2",
{"range", "batch_size", "drop_remainder"}, {})});
ParallelBatch optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("batch", output));
int index = graph_utils::FindGraphNodeWithName("batch", output);
EXPECT_TRUE(output.node(index).attr().at("parallel_copy").b());
}
}
}
} |
1,405 | cpp | tensorflow/tensorflow | filter_fusion | tensorflow/core/grappler/optimizers/data/filter_fusion.cc | tensorflow/core/grappler/optimizers/data/filter_fusion_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_FILTER_FUSION_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_FILTER_FUSION_H_
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
namespace tensorflow {
namespace grappler {
class FilterFusion : public TFDataOptimizerBase {
public:
FilterFusion() = default;
~FilterFusion() override = default;
string name() const override { return "filter_fusion"; };
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
Status OptimizeAndCollectStats(Cluster* cluster, const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) override;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/filter_fusion.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/fusion_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
NodeDef MakeFusedFilterNode(const NodeDef& first_filter_node,
const NodeDef& second_filter_node,
const FunctionDef& fused_function,
MutableGraphView* graph) {
NodeDef fused_node;
graph_utils::SetUniqueGraphNodeName("fused_filter", graph->graph(),
&fused_node);
fused_node.set_op("FilterDataset");
fused_node.add_input(first_filter_node.input(0));
auto attr = first_filter_node.attr().at("predicate");
*attr.mutable_func()->mutable_name() = fused_function.signature().name();
(*fused_node.mutable_attr())["predicate"] = std::move(attr);
graph_utils::CopyAttribute("Targuments", first_filter_node, &fused_node);
for (auto key : {"output_shapes", "output_types"})
graph_utils::CopyAttribute(key, second_filter_node, &fused_node);
graph_utils::MaybeSetFusedMetadata(first_filter_node, second_filter_node,
&fused_node);
return fused_node;
}
}
Status FilterFusion::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
GraphDef sorted_old_graph = item.graph;
TF_RETURN_IF_ERROR(TopologicalSort(&sorted_old_graph));
*output = sorted_old_graph;
MutableGraphView graph(output);
absl::flat_hash_set<string> nodes_to_delete;
FunctionLibraryDefinition function_library(OpRegistry::Global(),
output->library());
auto get_filter_node = [](const NodeDef& node) -> const NodeDef* {
if (node.op() == "FilterDataset" && node.input_size() == 1) return &node;
return nullptr;
};
auto make_fused_function =
[&](const NodeDef* first_filter_node,
const NodeDef* second_filter_node) -> FunctionDef* {
const auto& parent_fun = first_filter_node->attr().at("predicate");
const FunctionDef* first_func =
function_library.Find(parent_fun.func().name());
const auto& fun = second_filter_node->attr().at("predicate");
const FunctionDef* second_func = function_library.Find(fun.func().name());
if (!fusion_utils::HasSameSignature(first_func->signature(),
second_func->signature())) {
VLOG(1) << "Can't fuse Filters because they have different signature\n";
return nullptr;
}
return fusion_utils::FuseFunctions(
*first_func, *second_func, "fused_predicate",
fusion_utils::SameSignature, fusion_utils::SameInput,
fusion_utils::LazyConjunctionOutput, fusion_utils::LazyConjunctionNodes,
output->mutable_library());
};
for (const NodeDef& node : sorted_old_graph.node()) {
const NodeDef* second_filter_node = get_filter_node(node);
if (!second_filter_node) continue;
const NodeDef* first_filter_node =
get_filter_node(*graph_utils::GetInputNode(*second_filter_node, graph));
if (!first_filter_node) continue;
const auto* fused_predicate =
make_fused_function(first_filter_node, second_filter_node);
if (!fused_predicate) continue;
const auto* fused_filter_node = graph.AddNode(MakeFusedFilterNode(
*first_filter_node, *second_filter_node, *fused_predicate, &graph));
TF_RETURN_IF_ERROR(graph.UpdateFanouts(second_filter_node->name(),
fused_filter_node->name()));
TF_RETURN_IF_ERROR(function_library.AddFunctionDef(*fused_predicate));
nodes_to_delete.insert(first_filter_node->name());
nodes_to_delete.insert(second_filter_node->name());
stats->num_changes++;
}
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(FilterFusion, "filter_fusion");
}
} | #include "tensorflow/core/grappler/optimizers/data/filter_fusion.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
using graph_tests_utils::MakeFilterNode;
TEST(FilterFusionTest, FuseTwoFilterIntoOne) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeFilterNode("filter1", "range"),
MakeFilterNode("filter2", "filter1")},
{
test::function::IsZero(),
});
FilterFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("FilterDataset", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("filter1", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("filter2", output));
}
TEST(FilterFusionTest, FuseThreeNodesIntoOne) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("filename", "Const", {}, {{"value", ""}, {"dtype", DT_STRING}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeFilterNode("filter1", "range"), MakeFilterNode("filter2", "filter1"),
MakeFilterNode("filter3", "filter2"),
NDef("cache", "CacheDataset", {"filter3", "filename"}, {})},
{
test::function::IsZero(),
});
FilterFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("FilterDataset", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("filter1", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("filter2", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("filter3", output));
}
}
}
} |
1,406 | cpp | tensorflow/tensorflow | map_parallelization | tensorflow/core/grappler/optimizers/data/map_parallelization.cc | tensorflow/core/grappler/optimizers/data/map_parallelization_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_MAP_PARALLELIZATION_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_MAP_PARALLELIZATION_H_
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
namespace tensorflow {
namespace grappler {
constexpr char kAutotune[] = "autotune";
class MapParallelization : public TFDataOptimizerBase {
public:
MapParallelization() = default;
~MapParallelization() override = default;
string name() const override { return "map_parallelization"; };
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
if (!config) return absl::OkStatus();
const string& autotune = config->parameter_map().at(kAutotune).s();
if (autotune == "true") {
autotune_ = true;
} else if (autotune == "false") {
autotune_ = false;
} else {
return errors::InvalidArgument("Received an invalid value for parameter ",
kAutotune, ": ", autotune);
}
return absl::OkStatus();
}
Status OptimizeAndCollectStats(Cluster* cluster, const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) override;
private:
bool autotune_ = true;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/map_parallelization.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kMapDataset[] = "MapDataset";
constexpr char kParallelMapDataset[] = "ParallelMapDatasetV2";
NodeDef MakeParallelMap(const string& name, MutableGraphView* graph) {
int index = graph_utils::FindGraphNodeWithName(name, *graph->graph());
DCHECK_NE(index, -1) << "Failed to find node " << name
<< " in the optimized graph.";
NodeDef parallel_map = graph->graph()->node(index);
graph_utils::SetUniqueGraphNodeName(kParallelMapDataset, graph->graph(),
¶llel_map);
parallel_map.set_op(kParallelMapDataset);
auto* num_parallel_calls = graph_utils::AddScalarConstNode(
static_cast<int64_t>(data::model::kAutotune), graph);
parallel_map.add_input(num_parallel_calls->name());
parallel_map.mutable_attr()->erase("force_synchronous");
AddNodeAttr("deterministic", "true", ¶llel_map);
return parallel_map;
}
}
Status MapParallelization::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
if (!autotune_) {
VLOG(1) << "The optimization map_parallelization is not applied if "
"autotune is off.";
return absl::OkStatus();
}
MutableGraphView graph(output);
if (graph_utils::IsItemDerivedFromFunctionDef(item, graph))
return absl::OkStatus();
absl::flat_hash_set<string> nodes_to_delete;
FunctionLibraryDefinition function_library(OpRegistry::Global(),
item.graph.library());
auto get_map_node = [](const NodeDef& node) -> const NodeDef* {
if (node.op() == kMapDataset) return &node;
return nullptr;
};
for (const NodeDef& node : item.graph.node()) {
const NodeDef* map_node = get_map_node(node);
if (!map_node) continue;
auto* function =
function_library.Find(map_node->attr().at("f").func().name());
if (function_utils::IsFunctionStateful(function_library, *function, true) ||
(map_node->attr().contains("force_synchronous") &&
map_node->attr().at("force_synchronous").b())) {
continue;
}
auto* parallel_map =
graph.AddNode(MakeParallelMap(map_node->name(), &graph));
TF_RETURN_IF_ERROR(
graph.UpdateFanouts(map_node->name(), parallel_map->name()));
nodes_to_delete.insert(map_node->name());
stats->num_changes++;
}
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(MapParallelization, "map_parallelization");
}
} | #include "tensorflow/core/grappler/optimizers/data/map_parallelization.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
Status OptimizeWithMapParallelization(const GrapplerItem& item,
GraphDef* output, bool autotune) {
MapParallelization optimizer;
RewriterConfig_CustomGraphOptimizer config;
if (autotune) {
(*config.mutable_parameter_map())["autotune"].set_s("true");
} else {
(*config.mutable_parameter_map())["autotune"].set_s("false");
}
TF_RETURN_IF_ERROR(optimizer.Init(&config));
return optimizer.Optimize(nullptr, item, output);
}
using graph_tests_utils::MakeMapNode;
const char stateless_fun_name[] = "XTimesTwo";
const char stateful_fun_name[] = "RandomUniformFn";
class AutotuneSetting : public ::testing::TestWithParam<bool> {};
TEST_P(AutotuneSetting, MapParallelizationTest) {
const bool autotune = GetParam();
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeMapNode("map", "range", stateless_fun_name),
NDef("Sink", "Identity", {"map"}, {})},
{
test::function::XTimesTwo(),
});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithMapParallelization(item, &output, autotune));
EXPECT_EQ(graph_utils::ContainsNodeWithOp("ParallelMapDatasetV2", output),
autotune);
EXPECT_EQ(graph_utils::ContainsGraphNodeWithName("map", output), !autotune);
}
INSTANTIATE_TEST_SUITE_P(Test, AutotuneSetting, ::testing::Values(false, true));
class FromFunctionDef : public ::testing::TestWithParam<string> {};
TEST_P(FromFunctionDef, MapParallelizationTest) {
const string op = GetParam();
bool from_function_def = (op == "_Retval");
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeMapNode("map", "range", stateless_fun_name),
NDef("Sink", op, {"map"}, {})},
{
test::function::XTimesTwo(),
});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithMapParallelization(item, &output, true));
EXPECT_EQ(graph_utils::ContainsNodeWithOp("ParallelMapDatasetV2", output),
!from_function_def);
EXPECT_EQ(graph_utils::ContainsGraphNodeWithName("map", output),
from_function_def);
}
INSTANTIATE_TEST_SUITE_P(Test, FromFunctionDef,
::testing::Values("Identity", "_Retval"));
TEST(ParallelizeAssert, MapParallelizationTest) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("filename", "Const", {}, {{"value", ""}, {"dtype", DT_STRING}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeMapNode("map1", "range", stateful_fun_name),
MakeMapNode("map2", "map1", stateless_fun_name),
NDef("cache", "CacheDataset", {"map2", "filename"}, {}),
NDef("Sink", "Identity", {"cache"}, {})},
{
test::function::XTimesTwo(),
test::function::RandomUniform(),
});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithMapParallelization(item, &output, true));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("ParallelMapDatasetV2", output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("map1", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("map2", output));
}
}
}
} |
1,407 | cpp | tensorflow/tensorflow | make_sloppy | tensorflow/core/grappler/optimizers/data/make_sloppy.cc | tensorflow/core/grappler/optimizers/data/make_sloppy_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_MAKE_SLOPPY_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_MAKE_SLOPPY_H_
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
namespace tensorflow {
namespace grappler {
class MakeSloppy : public TFDataOptimizerBase {
public:
MakeSloppy() = default;
~MakeSloppy() override = default;
string name() const override { return "make_sloppy"; }
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
Status OptimizeAndCollectStats(Cluster* cluster, const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) override;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/make_sloppy.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
namespace tensorflow {
namespace grappler {
Status MakeSloppy::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
MutableGraphView graph(output);
for (NodeDef& node : *output->mutable_node()) {
if (graph_utils::HasSloppyAttr(node.op())) {
(*node.mutable_attr())["sloppy"].set_b(true);
stats->num_changes++;
}
if (graph_utils::HasDeterministicAttr(node.op()) &&
node.attr().at("deterministic").s() == "default") {
(*node.mutable_attr())["deterministic"].set_s("false");
stats->num_changes++;
}
}
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(MakeSloppy, "make_sloppy");
}
} | #include "tensorflow/core/grappler/optimizers/data/make_sloppy.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
TEST(MakeSloppy, ParallelInterleave) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("cycle_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("block_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelInterleaveV2Node(
"interleave", "range", "cycle_length", "block_length",
"num_parallel_calls", "XTimesTwo", false)},
{
test::function::XTimesTwo(),
});
MakeSloppy optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("interleave", output));
int index = graph_utils::FindGraphNodeWithName("interleave", output);
EXPECT_TRUE(output.node(index).attr().at("sloppy").b());
}
TEST(MakeSloppy, ParallelMap) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelMapNode("map", "range",
"num_parallel_calls", "XTimesTwo",
false)},
{
test::function::XTimesTwo(),
});
MakeSloppy optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("map", output));
int index = graph_utils::FindGraphNodeWithName("map", output);
EXPECT_TRUE(output.node(index).attr().at("sloppy").b());
}
TEST(MakeSloppy, ParseExampleDataset) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParseExampleNode("parse_example", "range",
"num_parallel_calls",
false)},
{});
MakeSloppy optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("parse_example", output));
int index = graph_utils::FindGraphNodeWithName("parse_example", output);
EXPECT_TRUE(output.node(index).attr().at("sloppy").b());
}
TEST(ChangeDefault, ParallelInterleave) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("cycle_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("block_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelInterleaveV4Node(
"interleave", "range", "cycle_length", "block_length",
"num_parallel_calls", "XTimesTwo", "default")},
{
test::function::XTimesTwo(),
});
MakeSloppy optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("interleave", output));
int index = graph_utils::FindGraphNodeWithName("interleave", output);
EXPECT_EQ(output.node(index).attr().at("deterministic").s(), "false");
}
TEST(ChangeDefault, ParallelMap) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelMapV2Node(
"map", "range", "num_parallel_calls", "XTimesTwo",
"default")},
{
test::function::XTimesTwo(),
});
MakeSloppy optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("map", output));
int index = graph_utils::FindGraphNodeWithName("map", output);
EXPECT_EQ(output.node(index).attr().at("deterministic").s(), "false");
}
TEST(ChangeDefault, ParallelBatch) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
graph_tests_utils::MakeParallelBatchNode(
"batch", "range", "batch_size", "num_parallel_calls",
"drop_remainder", "default")},
{});
MakeSloppy optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("batch", output));
int index = graph_utils::FindGraphNodeWithName("batch", output);
EXPECT_EQ(output.node(index).attr().at("deterministic").s(), "false");
}
}
}
} |
1,408 | cpp | tensorflow/tensorflow | graph_utils | tensorflow/core/grappler/optimizers/data/graph_utils.cc | tensorflow/core/grappler/optimizers/data/graph_utils_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_GRAPH_UTILS_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_GRAPH_UTILS_H_
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
namespace grappler {
namespace graph_utils {
template <typename Predicate, typename Collection>
int GetFirstElementIndexWithPredicate(const Predicate& predicate,
const Collection& collection) {
unsigned idx = 0;
for (auto&& element : collection) {
if (predicate(element)) {
return idx;
}
idx++;
}
return -1;
}
NodeDef* AddNode(StringPiece name, StringPiece op,
const std::vector<string>& inputs,
const std::vector<std::pair<string, AttrValue>>& attributes,
MutableGraphView* graph);
NodeDef* AddScalarPlaceholder(DataType dtype, MutableGraphView* graph);
template <typename T>
NodeDef* AddScalarConstNode(T v, MutableGraphView* graph) {
static_assert(!std::is_same<T, T>::value,
"Invalid specialization of this method for type T.");
return {};
}
template <>
NodeDef* AddScalarConstNode(bool v, MutableGraphView* graph);
template <>
NodeDef* AddScalarConstNode(double v, MutableGraphView* graph);
template <>
NodeDef* AddScalarConstNode(float v, MutableGraphView* graph);
template <>
NodeDef* AddScalarConstNode(int v, MutableGraphView* graph);
template <>
NodeDef* AddScalarConstNode(int64_t v, MutableGraphView* graph);
template <>
NodeDef* AddScalarConstNode(StringPiece v, MutableGraphView* graph);
template <typename T>
Status GetScalarConstNodeValue(const NodeDef& node, T* value) {
static_assert(!std::is_same<T, T>::value,
"Invalid specialization of this method fo rtype T.");
}
template <>
Status GetScalarConstNodeValue(const NodeDef& node, int64_t* value);
template <>
Status GetScalarConstNodeValue(const NodeDef& node, bool* value);
bool Compare(const GraphDef& g1, const GraphDef& g2);
bool ContainsGraphNodeWithName(StringPiece name, const GraphDef& graph);
bool ContainsGraphFunctionWithName(StringPiece name,
const FunctionDefLibrary& library);
bool ContainsNodeWithOp(StringPiece op, const GraphDef& graph);
int FindGraphNodeWithName(StringPiece name, const GraphDef& graph);
int FindGraphFunctionWithName(StringPiece name,
const FunctionDefLibrary& library);
int FindGraphNodeWithOp(StringPiece op, const GraphDef& graph);
NodeDef* GetInputNode(const NodeDef& node, const MutableGraphView& graph);
NodeDef* GetInputNode(const NodeDef& node, const MutableGraphView& graph,
int64_t i);
Status GetDatasetOutputTypesAttr(const NodeDef& node,
DataTypeVector* output_types);
std::vector<int> FindAllGraphNodesWithOp(const string& op,
const GraphDef& graph);
void SetUniqueGraphNodeName(StringPiece prefix, GraphDef* graph, NodeDef* node);
void SetUniqueGraphFunctionName(StringPiece prefix,
const FunctionDefLibrary* library,
FunctionDef* function);
void CopyAttribute(const string& attribute_name, const NodeDef& from,
NodeDef* to_node);
void ConcatAttributeList(const string& attribute_name, const NodeDef& first,
const NodeDef& second, NodeDef* to_node);
Status EnsureNodeNamesUnique(Graph* g);
Status GetFetchNode(const MutableGraphView& graph, const GrapplerItem& item,
NodeDef** fetch_node);
bool IsItemDerivedFromFunctionDef(const GrapplerItem& item,
const MutableGraphView& graph_view);
void MaybeSetFusedMetadata(const NodeDef& node1, const NodeDef& node2,
NodeDef* fused_node);
bool CopyShapesAndTypesAttrs(const NodeDef& from, NodeDef* to_node);
bool HasSloppyAttr(const string& op);
bool HasReplicateOnSplitAttr(const string& op);
bool HasDeterministicAttr(const string& op);
Status SetMetadataName(const std::string& name, NodeDef* node);
}
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include <cstddef>
#include "tensorflow/core/framework/dataset_metadata.pb.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/strcat.h"
namespace tensorflow {
namespace grappler {
namespace graph_utils {
namespace {
constexpr char kConstOpName[] = "Const";
constexpr char kRetValOp[] = "_Retval";
constexpr char kOutputShapes[] = "output_shapes";
constexpr char kOutputTypes[] = "output_types";
constexpr char kToutputTypes[] = "Toutput_types";
template <typename Predicate, typename Collection>
std::vector<int> GetElementIndicesWithPredicate(const Predicate& predicate,
const Collection& collection) {
std::vector<int> indices = {};
unsigned idx = 0;
for (auto&& element : collection) {
if (predicate(element)) {
indices.push_back(idx);
}
idx++;
}
return indices;
}
std::vector<int> CreateNameIndex(const GraphDef& graph) {
std::map<string, int> names;
for (int i = 0; i < graph.node_size(); ++i) {
names[graph.node(i).name()] = i;
}
std::vector<int> index(graph.node_size());
int i = 0;
for (const auto& pair : names) {
index[i++] = pair.second;
}
return index;
}
std::vector<int> CreateInputIndex(const NodeDef& node) {
std::map<string, int> inputs;
for (int i = 0; i < node.input_size(); ++i) {
inputs[node.input(i)] = i;
}
std::vector<int> index(node.input_size());
int i = 0;
for (const auto& pair : inputs) {
index[i++] = pair.second;
}
return index;
}
NodeDef* AddScalarConstNodeHelper(
DataType dtype, const std::function<void(TensorProto*)>& add_value,
MutableGraphView* graph) {
NodeDef node;
node.set_op(kConstOpName);
SetUniqueGraphNodeName(kConstOpName, graph->graph(), &node);
(*node.mutable_attr())["dtype"].set_type(dtype);
std::unique_ptr<tensorflow::TensorProto> tensor =
std::make_unique<tensorflow::TensorProto>();
std::unique_ptr<tensorflow::TensorShapeProto> tensor_shape =
std::make_unique<tensorflow::TensorShapeProto>();
tensor->set_allocated_tensor_shape(tensor_shape.release());
tensor->set_dtype(dtype);
add_value(tensor.get());
(*node.mutable_attr())["value"].set_allocated_tensor(tensor.release());
return graph->AddNode(std::move(node));
}
}
NodeDef* AddScalarPlaceholder(DataType dtype, MutableGraphView* graph) {
NodeDef node;
node.set_op("Placeholder");
SetUniqueGraphNodeName(node.op(), graph->graph(), &node);
(*node.mutable_attr())["dtype"].set_type(dtype);
TensorShapeProto* shape = (*node.mutable_attr())["shape"].mutable_shape();
shape->set_unknown_rank(false);
return graph->AddNode(std::move(node));
}
NodeDef* AddNode(StringPiece name, StringPiece op,
const std::vector<string>& inputs,
const std::vector<std::pair<string, AttrValue>>& attributes,
MutableGraphView* graph) {
NodeDef node;
if (!name.empty()) {
node.set_name(string(name));
} else {
SetUniqueGraphNodeName(op, graph->graph(), &node);
}
node.set_op(string(op));
for (const string& input : inputs) {
node.add_input(input);
}
for (const auto& attr : attributes) {
(*node.mutable_attr())[attr.first] = attr.second;
}
return graph->AddNode(std::move(node));
}
template <>
NodeDef* AddScalarConstNode(bool v, MutableGraphView* graph) {
return AddScalarConstNodeHelper(
DT_BOOL, [v](TensorProto* proto) { proto->add_bool_val(v); }, graph);
}
template <>
NodeDef* AddScalarConstNode(double v, MutableGraphView* graph) {
return AddScalarConstNodeHelper(
DT_DOUBLE, [v](TensorProto* proto) { proto->add_double_val(v); }, graph);
}
template <>
NodeDef* AddScalarConstNode(float v, MutableGraphView* graph) {
return AddScalarConstNodeHelper(
DT_FLOAT, [v](TensorProto* proto) { proto->add_float_val(v); }, graph);
}
template <>
NodeDef* AddScalarConstNode(int v, MutableGraphView* graph) {
return AddScalarConstNodeHelper(
DT_INT32, [v](TensorProto* proto) { proto->add_int_val(v); }, graph);
}
template <>
NodeDef* AddScalarConstNode(int64_t v, MutableGraphView* graph) {
return AddScalarConstNodeHelper(
DT_INT64, [v](TensorProto* proto) { proto->add_int64_val(v); }, graph);
}
template <>
NodeDef* AddScalarConstNode(StringPiece v, MutableGraphView* graph) {
return AddScalarConstNodeHelper(
DT_STRING,
[v](TensorProto* proto) { proto->add_string_val(v.data(), v.size()); },
graph);
}
Status GetScalarConstNodeValueHelper(
const NodeDef& node, DataType dtype,
const std::function<void(const Tensor&)>& get_value) {
if (node.op() != kConstOpName)
return errors::InvalidArgument("Node ", node.name(),
" is not a Const node. Op: ", node.op());
Tensor tensor;
TF_RETURN_IF_ERROR(GetNodeAttr(node, "value", &tensor));
if (!TensorShapeUtils::IsScalar(tensor.shape())) {
return errors::InvalidArgument(
"Node ", node.name(),
" should be a scalar but has shape: ", tensor.shape());
}
if (tensor.dtype() != dtype) {
return errors::InvalidArgument(
"Node ", node.name(), " should have type ", DataTypeString(dtype),
" but has type: ", DataTypeString(tensor.dtype()));
}
get_value(tensor);
return absl::OkStatus();
}
template <>
Status GetScalarConstNodeValue(const NodeDef& node, int64_t* value) {
return GetScalarConstNodeValueHelper(
node, DT_INT64,
[value](const Tensor& tensor) { *value = tensor.scalar<int64_t>()(); });
}
template <>
Status GetScalarConstNodeValue(const NodeDef& node, bool* value) {
return GetScalarConstNodeValueHelper(
node, DT_BOOL,
[value](const Tensor& tensor) { *value = tensor.scalar<bool>()(); });
}
bool Compare(const GraphDef& g1, const GraphDef& g2) {
if (g1.node_size() != g2.node_size()) {
return false;
}
std::vector<int> name_index1 = CreateNameIndex(g1);
std::vector<int> name_index2 = CreateNameIndex(g2);
for (int i = 0; i < g1.node_size(); ++i) {
int idx1 = name_index1[i];
int idx2 = name_index2[i];
if (g1.node(idx1).op() != g2.node(idx2).op()) {
return false;
}
if (g1.node(idx1).name() != g2.node(idx2).name()) {
return false;
}
if (g1.node(idx1).input_size() != g2.node(idx2).input_size()) {
return false;
}
std::vector<int> input_index1 = CreateInputIndex(g1.node(idx1));
std::vector<int> input_index2 = CreateInputIndex(g2.node(idx2));
for (int j = 0; j < g1.node(idx1).input_size(); ++j) {
if (!IsSameInput(g1.node(idx1).input(input_index1[j]),
g2.node(idx2).input(input_index2[j]))) {
return false;
}
}
}
return true;
}
bool ContainsGraphFunctionWithName(StringPiece name,
const FunctionDefLibrary& library) {
return FindGraphFunctionWithName(name, library) != -1;
}
bool ContainsGraphNodeWithName(StringPiece name, const GraphDef& graph) {
return FindGraphNodeWithName(name, graph) != -1;
}
bool ContainsNodeWithOp(StringPiece op, const GraphDef& graph) {
return FindGraphNodeWithOp(op, graph) != -1;
}
int FindGraphFunctionWithName(StringPiece name,
const FunctionDefLibrary& library) {
return GetFirstElementIndexWithPredicate(
[&name](const FunctionDef& function) {
return function.signature().name() == name;
},
library.function());
}
int FindGraphNodeWithName(StringPiece name, const GraphDef& graph) {
return GetFirstElementIndexWithPredicate(
[&name](const NodeDef& node) { return node.name() == name; },
graph.node());
}
int FindGraphNodeWithOp(StringPiece op, const GraphDef& graph) {
return GetFirstElementIndexWithPredicate(
[&op](const NodeDef& node) { return node.op() == op; }, graph.node());
}
std::vector<int> FindAllGraphNodesWithOp(const string& op,
const GraphDef& graph) {
return GetElementIndicesWithPredicate(
[&op](const NodeDef& node) { return node.op() == op; }, graph.node());
}
NodeDef* GetInputNode(const NodeDef& node, const MutableGraphView& graph) {
if (node.input_size() == 0) return nullptr;
MutableGraphView::InputPort input_port = graph.GetInputPort(node.name(), 0);
return graph.GetRegularFanin(input_port).node;
}
NodeDef* GetInputNode(const NodeDef& node, const MutableGraphView& graph,
int64_t i) {
if (node.input_size() <= i) return nullptr;
MutableGraphView::InputPort input_port = graph.GetInputPort(node.name(), i);
return graph.GetRegularFanin(input_port).node;
}
Status GetDatasetOutputTypesAttr(const NodeDef& node,
DataTypeVector* output_types) {
for (const string& attr_name : {"output_types", "Toutput_types"}) {
if (node.attr().contains(attr_name)) {
return GetNodeAttr(node, attr_name, output_types);
}
}
return errors::InvalidArgument("Could not find output_types attr for node: ",
node.name(), " with op: ", node.op());
}
void SetUniqueGraphNodeName(StringPiece prefix, GraphDef* graph,
NodeDef* node) {
string name = string(prefix);
int id = graph->node_size();
while (ContainsGraphNodeWithName(name, *graph)) {
if (name.rfind("_generated") != string::npos &&
(name.rfind("_generated") == (name.size() - strlen("_generated")))) {
name.insert(name.rfind("_generated"), strings::StrCat("/_", id));
} else {
name = strings::StrCat(prefix, "/_", id);
}
++id;
}
node->set_name(std::move(name));
}
void SetUniqueGraphFunctionName(StringPiece prefix,
const FunctionDefLibrary* library,
FunctionDef* function) {
string name = string(prefix);
int id = library->function_size();
while (ContainsGraphFunctionWithName(name, *library)) {
name = strings::StrCat(prefix, "/_", id);
++id;
}
function->mutable_signature()->set_name(std::move(name));
}
void CopyAttribute(const string& attribute_name, const NodeDef& from,
NodeDef* to_node) {
(*to_node->mutable_attr())[attribute_name] = from.attr().at(attribute_name);
}
void ConcatAttributeList(const string& attribute_name, const NodeDef& first,
const NodeDef& second, NodeDef* to_node) {
CopyAttribute(attribute_name, first, to_node);
(*to_node->mutable_attr())
.at(attribute_name)
.mutable_list()
->MergeFrom(second.attr().at(attribute_name).list());
}
Status EnsureNodeNamesUnique(Graph* g) {
std::unordered_map<string, int> name_map;
for (auto node : g->op_nodes()) {
const string& prefix = node->name();
if (auto entry = gtl::FindOrNull(name_map, prefix)) {
string unique_name;
do {
unique_name = strings::StrCat(prefix, "_", ++(*entry));
} while (name_map.find(unique_name) != name_map.end());
name_map.insert({unique_name, 0});
node->set_name(std::move(unique_name));
} else {
name_map.insert({node->name(), 0});
}
}
return absl::OkStatus();
}
Status GetFetchNode(const MutableGraphView& graph, const GrapplerItem& item,
NodeDef** fetch_node) {
if (item.fetch.size() != 1) {
return errors::InvalidArgument(
"Expected only one fetch node but there were ", item.fetch.size(), ": ",
absl::StrJoin(item.fetch, ", "));
}
*fetch_node = graph.GetNode(item.fetch.at(0));
return absl::OkStatus();
}
bool IsItemDerivedFromFunctionDef(const GrapplerItem& item,
const MutableGraphView& graph_view) {
for (const auto& fetch_name : item.fetch) {
auto fetch = graph_view.GetNode(fetch_name);
if (fetch != nullptr && fetch->op() != kRetValOp) {
return false;
}
}
return true;
}
void MaybeSetFusedMetadata(const NodeDef& node1, const NodeDef& node2,
NodeDef* fused_node) {
data::Metadata metadata1;
if (node1.attr().contains("metadata")) {
metadata1.ParseFromString(node1.attr().at("metadata").s());
}
data::Metadata metadata2;
if (node2.attr().contains("metadata")) {
metadata2.ParseFromString(node2.attr().at("metadata").s());
}
data::Metadata fused_metadata;
auto normalize_name = [](const string& name) {
return name.empty() ? "?" : name;
};
*fused_metadata.mutable_name() =
strings::StrCat("fused(", normalize_name(metadata1.name()), ",",
normalize_name(metadata2.name()), ")");
fused_metadata.SerializeToString(
(*fused_node->mutable_attr())["metadata"].mutable_s());
}
bool CopyShapesAndTypesAttrs(const NodeDef& from, NodeDef* to_node) {
auto* attr = gtl::FindOrNull(from.attr(), kOutputTypes);
attr = (attr == nullptr ? gtl::FindOrNull(from.attr(), kToutputTypes) : attr);
if (attr == nullptr) return false;
(*to_node->mutable_attr())[kOutputTypes] = *attr;
attr = gtl::FindOrNull(from.attr(), kOutputShapes);
if (attr == nullptr) return false;
(*to_node->mutable_attr())[kOutputShapes] = *attr;
return true;
}
namespace {
const auto* kSloppyAttrOps = new absl::flat_hash_set<string>{
"ParallelInterleaveDatasetV2",
"ParallelMapDataset",
"ParseExampleDataset",
};
const auto* kReplicateOnSplitAttrOps = new absl::flat_hash_set<string>{
"TensorSliceDataset",
"RangeDataset",
};
const auto* kDeterministicAttrOps = new absl::flat_hash_set<string>{
"LegacyParallelInterleaveDatasetV2",
"ParallelInterleaveDatasetV3",
"ParallelInterleaveDatasetV4",
"ParallelMapDatasetV2",
"ParallelBatchDataset",
};
}
bool HasSloppyAttr(const string& op) { return kSloppyAttrOps->contains(op); }
bool HasReplicateOnSplitAttr(const string& op) {
return kReplicateOnSplitAttrOps->contains(op);
}
bool HasDeterministicAttr(const string& op) {
return kDeterministicAttrOps->contains(op);
}
Status SetMetadataName(const std::string& name, NodeDef* node) {
data::Metadata metadata;
if (node->attr().contains("metadata")) {
metadata.ParseFromString(node->attr().at("metadata").s());
}
if (!metadata.name().empty()) {
return errors::InvalidArgument("Node ", node->name(),
" already has a metadata name \"",
metadata.name(), "\".");
}
*metadata.mutable_name() = name;
metadata.SerializeToString((*node->mutable_attr())["metadata"].mutable_s());
return absl::OkStatus();
}
}
}
} | #include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/framework/dataset_metadata.pb.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace graph_utils {
namespace {
using test::function::NDef;
constexpr char kOutputShapes[] = "output_shapes";
constexpr char kOutputTypes[] = "output_types";
constexpr char kToutputTypes[] = "Toutput_types";
TEST(GraphUtilsTest, GetFirstElementIndexWithPredicate) {
std::vector<int> vec({1, 2, 3, 4, 5, 6});
auto result = GetFirstElementIndexWithPredicate(
[](int elem) { return elem % 3 == 0; }, vec);
EXPECT_EQ(result, 2);
result = GetFirstElementIndexWithPredicate(
[](int elem) { return elem % 7 == 0; }, vec);
EXPECT_EQ(result, -1);
}
TEST(GraphUtilsTest, AddScalarConstNodeBool) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* bool_node = AddScalarConstNode<bool>(true, &graph);
EXPECT_TRUE(ContainsGraphNodeWithName(bool_node->name(), *graph.graph()));
EXPECT_EQ(bool_node->attr().at("value").tensor().bool_val(0), true);
}
TEST(GraphUtilsTest, AddScalarConstNodeDouble) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* double_node = AddScalarConstNode<double>(3.14, &graph);
EXPECT_TRUE(ContainsGraphNodeWithName(double_node->name(), *graph.graph()));
EXPECT_FLOAT_EQ(double_node->attr().at("value").tensor().double_val(0), 3.14);
}
TEST(GraphUtilsTest, AddScalarConstNodeFloat) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* float_node = AddScalarConstNode<float>(3.14, &graph);
EXPECT_TRUE(ContainsGraphNodeWithName(float_node->name(), *graph.graph()));
EXPECT_FLOAT_EQ(float_node->attr().at("value").tensor().float_val(0), 3.14);
}
TEST(GraphUtilsTest, AddScalarConstNodeInt) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* int_node = AddScalarConstNode<int>(42, &graph);
EXPECT_TRUE(ContainsGraphNodeWithName(int_node->name(), *graph.graph()));
EXPECT_EQ(int_node->attr().at("value").tensor().int_val(0), 42);
}
TEST(GraphUtilsTest, AddScalarConstNodeInt64) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* int64_node = AddScalarConstNode<int64_t>(42, &graph);
EXPECT_TRUE(ContainsGraphNodeWithName(int64_node->name(), *graph.graph()));
EXPECT_EQ(int64_node->attr().at("value").tensor().int64_val(0), 42);
}
TEST(GraphUtilsTest, AddScalarConstNodeString) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* string_node = AddScalarConstNode<StringPiece>("hello", &graph);
EXPECT_TRUE(ContainsGraphNodeWithName(string_node->name(), *graph.graph()));
EXPECT_EQ(string_node->attr().at("value").tensor().string_val(0), "hello");
}
TEST(GraphUtilsTest, GetScalarConstNodeInt64) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* int64_node = AddScalarConstNode<int64_t>(128, &graph);
int64_t result;
EXPECT_TRUE(GetScalarConstNodeValue<int64_t>(*int64_node, &result).ok());
EXPECT_EQ(result, 128);
}
TEST(GraphUtilsTest, GetScalarConstNodeBool) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* bool_node = AddScalarConstNode<bool>(true, &graph);
bool result;
EXPECT_TRUE(GetScalarConstNodeValue<bool>(*bool_node, &result).ok());
EXPECT_EQ(result, true);
}
TEST(GraphUtilsTest, GetScalarConstNodeErrorWithNonConst) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* non_const = AddScalarPlaceholder(DT_INT64, &graph);
int64_t result;
Status s = GetScalarConstNodeValue<int64_t>(*non_const, &result);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(),
"Node Placeholder is not a Const node. Op: Placeholder");
}
TEST(GraphUtilsTest, GetScalarConstNodeErrorWithType) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* int64_node = AddScalarConstNode<int64_t>(128, &graph);
bool result;
Status s = GetScalarConstNodeValue<bool>(*int64_node, &result);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(),
"Node Const should have type bool but has type: int64");
}
TEST(GraphUtilsTest, GetScalarConstNodeErrorWithVector) {
NodeDef node;
node.set_name("Const");
node.set_op("Const");
(*node.mutable_attr())["dtype"].set_type(DT_INT64);
auto tensor = (*node.mutable_attr())["value"].mutable_tensor();
tensor->set_dtype(DT_INT64);
tensor->mutable_tensor_shape()->mutable_dim()->Add()->set_size(1);
tensor->add_int64_val(128);
int64_t result;
Status s = GetScalarConstNodeValue<int64_t>(node, &result);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(), "Node Const should be a scalar but has shape: [1]");
}
TEST(GraphUtilsTest, Compare) {
GraphDef graph_def_a;
MutableGraphView graph_a(&graph_def_a);
GraphDef graph_def_b;
MutableGraphView graph_b(&graph_def_b);
EXPECT_TRUE(Compare(graph_def_a, graph_def_b));
AddNode("A", "OpA", {}, {}, &graph_a);
AddNode("B", "OpB", {"A"}, {}, &graph_a);
EXPECT_FALSE(Compare(graph_def_a, graph_def_b));
graph_def_b.mutable_node()->CopyFrom(graph_def_a.node());
EXPECT_TRUE(Compare(graph_def_a, graph_def_b));
}
TEST(GraphUtilsTest, ContainsGraphNodeWithName) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
EXPECT_TRUE(!ContainsGraphNodeWithName("A", *graph.graph()));
AddNode("A", "OpA", {}, {}, &graph);
EXPECT_TRUE(ContainsGraphNodeWithName("A", *graph.graph()));
EXPECT_TRUE(graph.DeleteNodes({"A"}).ok());
EXPECT_TRUE(!ContainsGraphNodeWithName("A", *graph.graph()));
}
TEST(GraphUtilsTest, ContainsGraphFunctionWithName) {
FunctionDefLibrary library;
EXPECT_FALSE(ContainsGraphFunctionWithName("new_function", library));
FunctionDef* new_function = library.add_function();
SetUniqueGraphFunctionName("new_function", &library, new_function);
EXPECT_TRUE(
ContainsGraphFunctionWithName(new_function->signature().name(), library));
}
TEST(GraphUtilsTest, ContainsNodeWithOp) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
EXPECT_TRUE(!ContainsNodeWithOp("OpA", *graph.graph()));
AddNode("A", "OpA", {}, {}, &graph);
EXPECT_TRUE(ContainsNodeWithOp("OpA", *graph.graph()));
EXPECT_TRUE(graph.DeleteNodes({"A"}).ok());
EXPECT_TRUE(!ContainsNodeWithOp("OpA", *graph.graph()));
}
TEST(GraphUtilsTest, FindGraphNodeWithName) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
EXPECT_EQ(FindGraphNodeWithName("A", *graph.graph()), -1);
AddNode("A", "OpA", {}, {}, &graph);
EXPECT_NE(FindGraphNodeWithName("A", *graph.graph()), -1);
EXPECT_TRUE(graph.DeleteNodes({"A"}).ok());
EXPECT_EQ(FindGraphNodeWithName("A", *graph.graph()), -1);
}
TEST(GraphUtilsTest, FindGraphFunctionWithName) {
FunctionDefLibrary library;
EXPECT_EQ(FindGraphFunctionWithName("new_function", library), -1);
FunctionDef* new_function = library.add_function();
SetUniqueGraphFunctionName("new_function", &library, new_function);
EXPECT_NE(
FindGraphFunctionWithName(new_function->signature().name(), library), -1);
}
TEST(GraphUtilsTest, FindGraphNodeWithOp) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
EXPECT_EQ(FindGraphNodeWithOp("OpA", *graph.graph()), -1);
AddNode("A", "OpA", {}, {}, &graph);
AddNode("B", "OpB", {"A"}, {}, &graph);
AddNode("A2", "OpA", {"A"}, {}, &graph);
EXPECT_EQ(FindGraphNodeWithOp("OpA", *graph.graph()), 0);
EXPECT_TRUE(graph.DeleteNodes({"B"}).ok());
EXPECT_EQ(FindGraphNodeWithOp("OpB", *graph.graph()), -1);
EXPECT_EQ(FindGraphNodeWithName("A2", *graph.graph()), 1);
}
TEST(GraphUtilsTest, FindAllGraphNodesWithOp) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
EXPECT_EQ(FindGraphNodeWithOp("OpA", *graph.graph()), -1);
AddNode("A", "OpA", {}, {}, &graph);
AddNode("B", "OpB", {"A"}, {}, &graph);
AddNode("A2", "OpA", {"B"}, {}, &graph);
std::vector<int> result_indices =
FindAllGraphNodesWithOp("OpA", *graph.graph());
EXPECT_EQ(result_indices.size(), 2);
EXPECT_EQ(result_indices.at(0), 0);
EXPECT_EQ(result_indices.at(1), 2);
EXPECT_TRUE(graph.DeleteNodes({"A2"}).ok());
std::vector<int> result_indices_new =
FindAllGraphNodesWithOp("OpA", *graph.graph());
EXPECT_EQ(result_indices_new.size(), 1);
EXPECT_EQ(result_indices_new.at(0), 0);
}
TEST(GraphUtilsTest, SetUniqueGraphNodeName) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* node1 = AddNode("", "A", {}, {}, &graph);
NodeDef* node2 = AddNode("", "A", {}, {}, &graph);
EXPECT_NE(node1->name(), node2->name());
EXPECT_TRUE(graph.DeleteNodes({node1->name()}).ok());
NodeDef* node3 = AddNode("", "A", {}, {}, &graph);
EXPECT_NE(node2->name(), node3->name());
}
TEST(GraphUtilsTest, SetUniqueGraphFunctionName) {
FunctionDefLibrary library;
FunctionDef* new_function = library.add_function();
SetUniqueGraphFunctionName("new_function", &library, new_function);
FunctionDef* other_function = library.add_function();
SetUniqueGraphFunctionName("new_function", &library, other_function);
EXPECT_NE(new_function->signature().name(),
other_function->signature().name());
}
TEST(GraphUtilsTest, GetInputNode) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* node1 = AddNode("", "A", {}, {}, &graph);
NodeDef* node2 = AddNode("", "A", {node1->name()}, {}, &graph);
EXPECT_EQ(GetInputNode(*node2, graph), node1);
EXPECT_EQ(GetInputNode(*node1, graph), nullptr);
}
TEST(GraphUtilsTest, GetIthInputNode) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* node1 = AddNode("", "A", {}, {}, &graph);
NodeDef* node2 = AddNode("", "A", {}, {}, &graph);
NodeDef* node3 = AddNode("", "A", {node1->name(), node2->name()}, {}, &graph);
EXPECT_EQ(GetInputNode(*node3, graph), node1);
EXPECT_EQ(GetInputNode(*node3, graph, 1), node2);
EXPECT_EQ(GetInputNode(*node3, graph, 0), node1);
EXPECT_EQ(GetInputNode(*node3, graph, 2), nullptr);
EXPECT_EQ(GetInputNode(*node1, graph), nullptr);
}
TEST(GraphUtilsTest, EnsureNodeNamesUnique) {
Graph g(OpRegistry::Global());
Node *const_0, *const_1, *const_2;
Tensor tensor(DT_INT32, {});
tensor.scalar<int32>()() = 5;
for (auto node : {&const_0, &const_1}) {
TF_EXPECT_OK(NodeBuilder("Const", "Const")
.Attr("value", tensor)
.Attr("dtype", DT_INT32)
.Finalize(&g, node));
}
TF_EXPECT_OK(NodeBuilder("Const_1", "Const")
.Attr("value", tensor)
.Attr("dtype", DT_INT32)
.Finalize(&g, &const_2));
TF_EXPECT_OK(EnsureNodeNamesUnique(&g));
EXPECT_NE(const_0->name(), const_1->name());
EXPECT_NE(const_1->name(), const_2->name());
EXPECT_NE(const_0->name(), const_2->name());
}
TEST(GraphUtilsTest, TestGetFetchNode) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef* node1 = AddNode("node1", "Identity", {}, {}, &graph);
NodeDef* node2 = AddNode("node2", "Identity", {node1->name()}, {}, &graph);
NodeDef* node3 = AddNode("node3", "Identity", {node2->name()}, {}, &graph);
item.fetch.push_back(node3->name());
NodeDef* sink_node;
TF_EXPECT_OK(GetFetchNode(graph, item, &sink_node));
EXPECT_EQ(sink_node->name(), node3->name());
}
TEST(GraphUtilsTest, TestFindSinkNodeMultipleFetches) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef* node1 = AddNode("node1", "Identity", {}, {}, &graph);
NodeDef* node2 = AddNode("node2", "Identity", {node1->name()}, {}, &graph);
NodeDef* node3 = AddNode("node3", "Identity", {node2->name()}, {}, &graph);
item.fetch.push_back(node2->name());
item.fetch.push_back(node3->name());
NodeDef* sink_node;
Status s = GetFetchNode(graph, item, &sink_node);
EXPECT_FALSE(s.ok());
}
TEST(GraphUtilsTest, TestFindSinkNodeNoFetches) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef* node1 = AddNode("node1", "Identity", {}, {}, &graph);
NodeDef* node2 = AddNode("node2", "Identity", {node1->name()}, {}, &graph);
AddNode("node3", "Identity", {node2->name()}, {}, &graph);
NodeDef* sink_node;
Status s = GetFetchNode(graph, item, &sink_node);
EXPECT_FALSE(s.ok());
}
TEST(GraphUtilsTest, TestCopyShapesAndTypesAttrsNoShapes) {
NodeDef from = NDef("range", "RangeDataset", {},
{{kOutputTypes, absl::Span<const DataType>{}}});
NodeDef to_node;
EXPECT_FALSE(CopyShapesAndTypesAttrs(from, &to_node));
}
TEST(GraphUtilsTest, TestCopyShapesAndTypesAttrsNoTypes) {
NodeDef from = NDef("range", "RangeDataset", {},
{{kOutputShapes, absl::Span<const TensorShape>{}}});
NodeDef to_node;
EXPECT_FALSE(CopyShapesAndTypesAttrs(from, &to_node));
}
TEST(GraphUtilsTest, TestCopyShapesAndTypesAttrsOutputTypes) {
NodeDef from = NDef("range", "RangeDataset", {},
{{kOutputShapes, 666}, {kOutputTypes, 888}});
NodeDef to_node;
EXPECT_TRUE(CopyShapesAndTypesAttrs(from, &to_node));
EXPECT_EQ(to_node.attr().at(kOutputShapes).i(), 666);
EXPECT_EQ(to_node.attr().at(kOutputTypes).i(), 888);
}
TEST(GraphUtilsTest, TestCopyShapesAndTypesAttrsToutputTypes) {
NodeDef from = NDef("tensor", "TensorDataset", {},
{{kOutputShapes, 666}, {kToutputTypes, 888}});
NodeDef to_node;
EXPECT_TRUE(CopyShapesAndTypesAttrs(from, &to_node));
EXPECT_EQ(to_node.attr().at(kOutputShapes).i(), 666);
EXPECT_EQ(to_node.attr().at(kOutputTypes).i(), 888);
}
TEST(GraphUtilsTest, TestSetMetadataName) {
NodeDef node = NDef("range", "RangeDataset", {},
{{kOutputShapes, 666}, {kOutputTypes, 888}});
EXPECT_TRUE(SetMetadataName("metadata_name", &node).ok());
EXPECT_TRUE(node.attr().contains("metadata"));
data::Metadata metadata;
metadata.ParseFromString(node.attr().at("metadata").s());
EXPECT_EQ("metadata_name", metadata.name());
EXPECT_FALSE(SetMetadataName("new_metadata_name", &node).ok());
}
}
}
}
} |
1,409 | cpp | tensorflow/tensorflow | inject_prefetch | tensorflow/core/grappler/optimizers/data/inject_prefetch.cc | tensorflow/core/grappler/optimizers/data/inject_prefetch_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_INJECT_PREFETCH_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_INJECT_PREFETCH_H_
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
namespace tensorflow {
namespace grappler {
constexpr char kAutotune[] = "autotune";
class InjectPrefetch : public TFDataOptimizerBase {
public:
InjectPrefetch() = default;
~InjectPrefetch() override = default;
std::string name() const override { return "inject_prefetch"; };
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
if (!config) return absl::OkStatus();
const std::string& autotune = config->parameter_map().at(kAutotune).s();
if (autotune == "true") {
autotune_ = true;
} else if (autotune == "false") {
autotune_ = false;
} else {
return errors::InvalidArgument("Received an invalid value for parameter ",
kAutotune, ": ", autotune);
}
return absl::OkStatus();
}
Status OptimizeAndCollectStats(Cluster* cluster, const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) override;
protected:
bool autotune_ = true;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/inject_prefetch.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kPrefetchDataset[] = "PrefetchDataset";
constexpr std::array<const char*, 5> kAsyncTransforms = {
"MapAndBatchDataset", "ParallelBatchDataset", "ParallelInterleaveDataset",
"ParallelMapDataset", "PrefetchDataset"};
constexpr std::array<const char*, 8> kDatasetsToSkip = {
"AssertNextDataset",
"ExperimentalAssertNextDataset",
"IgnoreErrorsDataset",
"OptionsDataset",
"ModelDataset",
"OptimizeDataset",
"MaxIntraOpParallelismDataset",
"PrivateThreadPoolDataset",
};
bool ShouldInjectPrefetch(const NodeDef* last_node,
const MutableGraphView& graph) {
while (last_node != nullptr &&
absl::c_any_of(kDatasetsToSkip, [last_node](const char* dataset) {
return data::MatchesAnyVersion(dataset, last_node->op());
})) {
last_node = graph_utils::GetInputNode(*last_node, graph);
}
if (last_node == nullptr) {
VLOG(1) << "The optimization inject_prefetch is not applied because graph "
"rewrite failed to find a dataset node.";
return false;
}
if (absl::c_any_of(kAsyncTransforms, [last_node](const char* dataset) {
return data::MatchesAnyVersion(dataset, last_node->op());
})) {
VLOG(1) << "The optimization inject_prefetch is not applied because the "
"last transformation of the input pipeline is an asynchronous "
"transformation: "
<< last_node->op();
return false;
}
return true;
}
}
Status InjectPrefetch::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
if (!autotune_) {
VLOG(1) << "The optimization inject_prefetch is not applied if autotune is "
"off.";
return absl::OkStatus();
}
MutableGraphView graph(output);
if (graph_utils::IsItemDerivedFromFunctionDef(item, graph)) {
return absl::OkStatus();
}
if (item.fetch.size() != 1) {
return errors::InvalidArgument(
"Expected only one fetch node but there were ", item.fetch.size(), ": ",
absl::StrJoin(item.fetch, ", "));
}
NodeDef* sink_node = graph.GetNode(item.fetch.at(0));
NodeDef* last_node = graph_utils::GetInputNode(*sink_node, graph);
if (!ShouldInjectPrefetch(last_node, graph)) {
return absl::OkStatus();
}
NodeDef prefetch_node;
graph_utils::SetUniqueGraphNodeName(
strings::StrCat("inject/prefetch_", last_node->name()), graph.graph(),
&prefetch_node);
prefetch_node.set_op(kPrefetchDataset);
*prefetch_node.mutable_input()->Add() = last_node->name();
NodeDef* autotune_value =
graph_utils::AddScalarConstNode(data::model::kAutotune, &graph);
*prefetch_node.mutable_input()->Add() = autotune_value->name();
if (!graph_utils::CopyShapesAndTypesAttrs(*last_node, &prefetch_node))
return absl::OkStatus();
TF_RETURN_IF_ERROR(
graph_utils::SetMetadataName(prefetch_node.name(), &prefetch_node));
auto* added_node = graph.AddNode(std::move(prefetch_node));
TF_RETURN_IF_ERROR(
graph.UpdateFanouts(last_node->name(), added_node->name()));
stats->num_changes++;
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(InjectPrefetch, "inject_prefetch");
}
} | #include "tensorflow/core/grappler/optimizers/data/inject_prefetch.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
using test::function::NDef;
constexpr char kOptionsDataset[] = "OptionsDataset";
constexpr char kParallelMapDataset[] = "ParallelMapDatasetV2";
constexpr char kPrefetchDataset[] = "PrefetchDataset";
Status Optimize(InjectPrefetch &optimizer, const GrapplerItem &item,
GraphDef *output, bool autotune) {
RewriterConfig_CustomGraphOptimizer config;
if (autotune) {
(*config.mutable_parameter_map())["autotune"].set_s("true");
} else {
(*config.mutable_parameter_map())["autotune"].set_s("false");
}
TF_RETURN_IF_ERROR(optimizer.Init(&config));
return optimizer.Optimize(nullptr, item, output);
}
Status OptimizeWithInjectPrefetch(const GrapplerItem &item, GraphDef *output,
bool autotune) {
InjectPrefetch optimizer;
return Optimize(optimizer, item, output, autotune);
}
class InjectPrefetchParameterizedTest : public ::testing::TestWithParam<bool> {
};
TEST_P(InjectPrefetchParameterizedTest, TestAutotuneSetting) {
const bool autotune = GetParam();
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("Sink", "Identity", {"range"}, {})});
item.fetch.push_back("Sink");
GraphDef inject_prefetch_output;
TF_ASSERT_OK(
OptimizeWithInjectPrefetch(item, &inject_prefetch_output, autotune));
EXPECT_EQ(autotune, graph_utils::ContainsNodeWithOp(kPrefetchDataset,
inject_prefetch_output));
EXPECT_EQ(autotune, graph_utils::ContainsGraphNodeWithName(
"inject/prefetch_range", inject_prefetch_output));
}
INSTANTIATE_TEST_SUITE_P(AutotuneSetting, InjectPrefetchParameterizedTest,
::testing::Values(false, true));
TEST(InjectPrefetchTest, FromFunctionDef) {
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("Sink", "_Retval", {"range"}, {})});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithInjectPrefetch(item, &output, true));
EXPECT_FALSE(graph_utils::ContainsNodeWithOp(kPrefetchDataset, output));
}
TEST(InjectPrefetchTest, AlreadyPrefetched) {
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("prefetch", kPrefetchDataset, {"range"}, {}),
NDef("Sink", "Identity", {"prefetch"}, {})});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithInjectPrefetch(item, &output, true));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp(kPrefetchDataset, output));
EXPECT_EQ(6, output.node_size());
}
TEST(InjectPrefetchTest, AlreadyParallelMap) {
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("parallel_map", kParallelMapDataset, {"range"},
{{"f", "__inference_Dataset_map_normalize_8232"},
{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("Sink", "Identity", {"parallel_map"}, {})});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithInjectPrefetch(item, &output, true));
EXPECT_FALSE(graph_utils::ContainsNodeWithOp(kPrefetchDataset, output));
EXPECT_EQ(6, output.node_size());
}
TEST(InjectPrefetchTest, OptionsFollowedByPrefetched) {
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("prefetch", kPrefetchDataset, {"range"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("options", kOptionsDataset, {"prefetch"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("Sink", "Identity", {"options"}, {})});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithInjectPrefetch(item, &output, true));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("inject/prefetch_options",
output));
EXPECT_EQ(7, output.node_size());
}
}
}
} |
1,410 | cpp | tensorflow/tensorflow | map_and_batch_fusion | tensorflow/core/grappler/optimizers/data/map_and_batch_fusion.cc | tensorflow/core/grappler/optimizers/data/map_and_batch_fusion_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_MAP_AND_BATCH_FUSION_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_MAP_AND_BATCH_FUSION_H_
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
namespace tensorflow {
namespace grappler {
class MapAndBatchFusion : public TFDataOptimizerBase {
public:
MapAndBatchFusion() = default;
~MapAndBatchFusion() override = default;
string name() const override { return "map_and_batch_fusion"; };
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
Status OptimizeAndCollectStats(Cluster* cluster, const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) override;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/map_and_batch_fusion.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kFusedOpName[] = "MapAndBatchDataset";
constexpr char kParallelMap[] = "ParallelMapDataset";
constexpr char kParallelMapV2[] = "ParallelMapDatasetV2";
bool IsParallelMap(const NodeDef& node) {
return node.op() == kParallelMap || node.op() == kParallelMapV2;
}
NodeDef MakeMapAndBatchNode(const NodeDef& map_node, const NodeDef& batch_node,
MutableGraphView* graph) {
NodeDef new_node;
new_node.set_op(kFusedOpName);
graph_utils::SetUniqueGraphNodeName(kFusedOpName, graph->graph(), &new_node);
new_node.add_input(map_node.input(0));
int num_other_args;
if (IsParallelMap(map_node)) {
num_other_args = map_node.input_size() - 2;
} else {
num_other_args = map_node.input_size() - 1;
}
for (int i = 0; i < num_other_args; i++) {
new_node.add_input(map_node.input(i + 1));
}
new_node.add_input(batch_node.input(1));
if (map_node.op() == kParallelMap) {
NodeDef* v = graph->GetNode(map_node.input(map_node.input_size() - 1));
NodeDef* tmp = graph_utils::AddScalarConstNode<int64_t>(
v->attr().at("value").tensor().int_val(0), graph);
new_node.add_input(tmp->name());
} else if (map_node.op() == kParallelMapV2) {
new_node.add_input(map_node.input(map_node.input_size() - 1));
} else {
NodeDef* tmp = graph_utils::AddScalarConstNode<int64_t>(1, graph);
new_node.add_input(tmp->name());
}
if (batch_node.op() == "BatchDatasetV2") {
new_node.add_input(batch_node.input(2));
} else {
NodeDef* tmp = graph_utils::AddScalarConstNode<bool>(false, graph);
new_node.add_input(tmp->name());
}
for (auto key : {"f", "Targuments"}) {
graph_utils::CopyAttribute(key, map_node, &new_node);
}
graph_utils::CopyShapesAndTypesAttrs(batch_node, &new_node);
for (auto key : {"preserve_cardinality"}) {
if (gtl::FindOrNull(map_node.attr(), key)) {
graph_utils::CopyAttribute(key, map_node, &new_node);
}
}
graph_utils::MaybeSetFusedMetadata(map_node, batch_node, &new_node);
return new_node;
}
}
Status MapAndBatchFusion::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
MutableGraphView graph(output);
absl::flat_hash_set<string> nodes_to_delete;
for (const NodeDef& node : item.graph.node()) {
if (node.op() != "BatchDataset" && node.op() != "BatchDatasetV2") {
continue;
}
const NodeDef& batch_node = node;
NodeDef* node2 = graph_utils::GetInputNode(batch_node, graph);
if (node2->op() != "MapDataset" && !IsParallelMap(*node2)) {
continue;
}
NodeDef* map_node = node2;
auto* new_node =
graph.AddNode(MakeMapAndBatchNode(*map_node, batch_node, &graph));
TF_RETURN_IF_ERROR(
graph.UpdateFanouts(batch_node.name(), new_node->name()));
nodes_to_delete.insert(map_node->name());
nodes_to_delete.insert(batch_node.name());
stats->num_changes++;
}
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(MapAndBatchFusion, "map_and_batch_fusion");
}
} | #include "tensorflow/core/grappler/optimizers/data/map_and_batch_fusion.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
TEST(MapAndBatchFusionTest, FuseMapAndBatchNodesIntoOne) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
std::vector<std::pair<string, AttrValue>> range_attrs;
NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs,
range_attrs, &graph);
NodeDef *captured_input_node =
graph_utils::AddScalarConstNode<StringPiece>("hello", &graph);
NodeDef *map_node;
{
std::vector<string> map_inputs(2);
map_inputs[0] = range_node->name();
map_inputs[1] = captured_input_node->name();
std::vector<std::pair<string, AttrValue>> map_attrs(2);
AttrValue f_attr;
SetAttrValue("f", &f_attr);
map_attrs[0] = std::make_pair("f", f_attr);
AttrValue args_attr;
SetAttrValue("Targuments", &args_attr);
map_attrs[1] = std::make_pair("Targuments", args_attr);
map_node =
graph_utils::AddNode("", "MapDataset", map_inputs, map_attrs, &graph);
}
NodeDef *batch_size_node =
graph_utils::AddScalarConstNode<int64_t>(5, &graph);
NodeDef *batch_node;
{
std::vector<string> batch_inputs(2);
batch_inputs[0] = map_node->name();
batch_inputs[1] = batch_size_node->name();
std::vector<std::pair<string, AttrValue>> batch_attrs(2);
AttrValue shapes_attr;
SetAttrValue("output_shapes", &shapes_attr);
batch_attrs[0] = std::make_pair("output_shapes", shapes_attr);
AttrValue types_attr;
SetAttrValue("output_types", &types_attr);
batch_attrs[1] = std::make_pair("output_types", types_attr);
batch_node = graph_utils::AddNode("", "BatchDataset", batch_inputs,
batch_attrs, &graph);
}
MapAndBatchFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(map_node->name(), output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(batch_node->name(), output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("MapAndBatchDataset", output));
NodeDef map_and_batch_node = output.node(
graph_utils::FindGraphNodeWithOp("MapAndBatchDataset", output));
EXPECT_EQ(map_and_batch_node.input_size(), 5);
EXPECT_EQ(map_and_batch_node.input(0), map_node->input(0));
EXPECT_EQ(map_and_batch_node.input(1), map_node->input(1));
EXPECT_EQ(map_and_batch_node.input(2), batch_node->input(1));
NodeDef num_parallel_calls_node = output.node(
graph_utils::FindGraphNodeWithName(map_and_batch_node.input(3), output));
EXPECT_EQ(num_parallel_calls_node.attr().at("value").tensor().int64_val(0),
1);
NodeDef drop_remainder_node = output.node(
graph_utils::FindGraphNodeWithName(map_and_batch_node.input(4), output));
EXPECT_EQ(drop_remainder_node.attr().at("value").tensor().bool_val(0), false);
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("f"),
map_node->attr().at("f")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("Targuments"),
map_node->attr().at("Targuments")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("output_shapes"),
batch_node->attr().at("output_shapes")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("output_types"),
batch_node->attr().at("output_types")));
}
TEST(MapAndBatchFusionTest, FuseMapAndBatchV2NodesIntoOne) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
std::vector<std::pair<string, AttrValue>> range_attrs;
NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs,
range_attrs, &graph);
NodeDef *captured_input_node =
graph_utils::AddScalarConstNode<StringPiece>("hello", &graph);
NodeDef *map_node;
{
std::vector<string> map_inputs(2);
map_inputs[0] = range_node->name();
map_inputs[1] = captured_input_node->name();
std::vector<std::pair<string, AttrValue>> map_attrs(2);
AttrValue f_attr;
SetAttrValue("f", &f_attr);
map_attrs[0] = std::make_pair("f", f_attr);
AttrValue args_attr;
SetAttrValue("Targuments", &args_attr);
map_attrs[1] = std::make_pair("Targuments", args_attr);
map_node =
graph_utils::AddNode("", "MapDataset", map_inputs, map_attrs, &graph);
}
NodeDef *batch_size_node =
graph_utils::AddScalarConstNode<int64_t>(5, &graph);
NodeDef *drop_remainder_node =
graph_utils::AddScalarConstNode<bool>(true, &graph);
NodeDef *batch_node;
{
std::vector<string> batch_inputs(3);
batch_inputs[0] = map_node->name();
batch_inputs[1] = batch_size_node->name();
batch_inputs[2] = drop_remainder_node->name();
std::vector<std::pair<string, AttrValue>> batch_attrs(2);
AttrValue shapes_attr;
SetAttrValue("output_shapes", &shapes_attr);
batch_attrs[0] = std::make_pair("output_shapes", shapes_attr);
AttrValue types_attr;
SetAttrValue("output_types", &types_attr);
batch_attrs[1] = std::make_pair("output_types", types_attr);
batch_node = graph_utils::AddNode("", "BatchDatasetV2", batch_inputs,
batch_attrs, &graph);
}
MapAndBatchFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(map_node->name(), output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(batch_node->name(), output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("MapAndBatchDataset", output));
NodeDef map_and_batch_node = output.node(
graph_utils::FindGraphNodeWithOp("MapAndBatchDataset", output));
EXPECT_EQ(map_and_batch_node.input_size(), 5);
EXPECT_EQ(map_and_batch_node.input(0), map_node->input(0));
EXPECT_EQ(map_and_batch_node.input(1), map_node->input(1));
EXPECT_EQ(map_and_batch_node.input(2), batch_node->input(1));
NodeDef num_parallel_calls_node = output.node(
graph_utils::FindGraphNodeWithName(map_and_batch_node.input(3), output));
EXPECT_EQ(num_parallel_calls_node.attr().at("value").tensor().int64_val(0),
1);
EXPECT_EQ(map_and_batch_node.input(4), batch_node->input(2));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("f"),
map_node->attr().at("f")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("Targuments"),
map_node->attr().at("Targuments")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("output_shapes"),
batch_node->attr().at("output_shapes")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("output_types"),
batch_node->attr().at("output_types")));
}
TEST(MapAndBatchFusionTest, FuseParallelMapAndBatchNodesIntoOne) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
std::vector<std::pair<string, AttrValue>> range_attrs;
NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs,
range_attrs, &graph);
NodeDef *captured_input_node =
graph_utils::AddScalarConstNode<StringPiece>("hello", &graph);
NodeDef *num_parallel_calls_node =
graph_utils::AddScalarConstNode<int>(2, &graph);
NodeDef *map_node;
{
std::vector<string> map_inputs(3);
map_inputs[0] = range_node->name();
map_inputs[1] = captured_input_node->name();
map_inputs[2] = num_parallel_calls_node->name();
std::vector<std::pair<string, AttrValue>> map_attrs(2);
AttrValue f_attr;
SetAttrValue("f", &f_attr);
map_attrs[0] = std::make_pair("f", f_attr);
AttrValue args_attr;
SetAttrValue("Targuments", &args_attr);
map_attrs[1] = std::make_pair("Targuments", args_attr);
map_node = graph_utils::AddNode("", "ParallelMapDataset", map_inputs,
map_attrs, &graph);
}
NodeDef *batch_size_node =
graph_utils::AddScalarConstNode<int64_t>(5, &graph);
NodeDef *batch_node;
{
std::vector<string> batch_inputs(2);
batch_inputs[0] = map_node->name();
batch_inputs[1] = batch_size_node->name();
std::vector<std::pair<string, AttrValue>> batch_attrs(2);
AttrValue shapes_attr;
SetAttrValue("output_shapes", &shapes_attr);
batch_attrs[0] = std::make_pair("output_shapes", shapes_attr);
AttrValue types_attr;
SetAttrValue("output_types", &types_attr);
batch_attrs[1] = std::make_pair("output_types", types_attr);
batch_node = graph_utils::AddNode("", "BatchDataset", batch_inputs,
batch_attrs, &graph);
}
MapAndBatchFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(map_node->name(), output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(batch_node->name(), output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("MapAndBatchDataset", output));
NodeDef map_and_batch_node = output.node(
graph_utils::FindGraphNodeWithOp("MapAndBatchDataset", output));
EXPECT_EQ(map_and_batch_node.input_size(), 5);
EXPECT_EQ(map_and_batch_node.input(0), map_node->input(0));
EXPECT_EQ(map_and_batch_node.input(1), map_node->input(1));
EXPECT_EQ(map_and_batch_node.input(2), batch_node->input(1));
NodeDef num_parallel_calls_node2 = output.node(
graph_utils::FindGraphNodeWithName(map_and_batch_node.input(3), output));
EXPECT_EQ(num_parallel_calls_node2.attr().at("value").tensor().int64_val(0),
2);
NodeDef drop_remainder_node = output.node(
graph_utils::FindGraphNodeWithName(map_and_batch_node.input(4), output));
EXPECT_EQ(drop_remainder_node.attr().at("value").tensor().bool_val(0), false);
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("f"),
map_node->attr().at("f")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("Targuments"),
map_node->attr().at("Targuments")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("output_shapes"),
batch_node->attr().at("output_shapes")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("output_types"),
batch_node->attr().at("output_types")));
}
TEST(MapAndBatchFusionTest, FuseParallelMapV2AndBatchNodesIntoOne) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
std::vector<std::pair<string, AttrValue>> range_attrs;
NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs,
range_attrs, &graph);
NodeDef *captured_input_node =
graph_utils::AddScalarConstNode<StringPiece>("hello", &graph);
NodeDef *num_parallel_calls_node =
graph_utils::AddScalarConstNode<int64_t>(2, &graph);
NodeDef *map_node;
{
std::vector<string> map_inputs(3);
map_inputs[0] = range_node->name();
map_inputs[1] = captured_input_node->name();
map_inputs[2] = num_parallel_calls_node->name();
std::vector<std::pair<string, AttrValue>> map_attrs(2);
AttrValue f_attr;
SetAttrValue("f", &f_attr);
map_attrs[0] = std::make_pair("f", f_attr);
AttrValue args_attr;
SetAttrValue("Targuments", &args_attr);
map_attrs[1] = std::make_pair("Targuments", args_attr);
map_node = graph_utils::AddNode("", "ParallelMapDatasetV2", map_inputs,
map_attrs, &graph);
}
NodeDef *batch_size_node =
graph_utils::AddScalarConstNode<int64_t>(5, &graph);
NodeDef *batch_node;
{
std::vector<string> batch_inputs(2);
batch_inputs[0] = map_node->name();
batch_inputs[1] = batch_size_node->name();
std::vector<std::pair<string, AttrValue>> batch_attrs(2);
AttrValue shapes_attr;
SetAttrValue("output_shapes", &shapes_attr);
batch_attrs[0] = std::make_pair("output_shapes", shapes_attr);
AttrValue types_attr;
SetAttrValue("output_types", &types_attr);
batch_attrs[1] = std::make_pair("output_types", types_attr);
batch_node = graph_utils::AddNode("", "BatchDataset", batch_inputs,
batch_attrs, &graph);
}
MapAndBatchFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(map_node->name(), output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(batch_node->name(), output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("MapAndBatchDataset", output));
NodeDef map_and_batch_node = output.node(
graph_utils::FindGraphNodeWithOp("MapAndBatchDataset", output));
EXPECT_EQ(map_and_batch_node.input_size(), 5);
EXPECT_EQ(map_and_batch_node.input(0), map_node->input(0));
EXPECT_EQ(map_and_batch_node.input(1), map_node->input(1));
EXPECT_EQ(map_and_batch_node.input(2), batch_node->input(1));
NodeDef num_parallel_calls_node2 = output.node(
graph_utils::FindGraphNodeWithName(map_and_batch_node.input(3), output));
EXPECT_EQ(num_parallel_calls_node2.attr().at("value").tensor().int64_val(0),
2);
NodeDef drop_remainder_node = output.node(
graph_utils::FindGraphNodeWithName(map_and_batch_node.input(4), output));
EXPECT_EQ(drop_remainder_node.attr().at("value").tensor().bool_val(0), false);
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("f"),
map_node->attr().at("f")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("Targuments"),
map_node->attr().at("Targuments")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("output_shapes"),
batch_node->attr().at("output_shapes")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("output_types"),
batch_node->attr().at("output_types")));
}
TEST(MapAndBatchFusionTest, NoChange) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
std::vector<std::pair<string, AttrValue>> range_attrs;
NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs,
range_attrs, &graph);
NodeDef *batch_size_node =
graph_utils::AddScalarConstNode<int64_t>(5, &graph);
std::vector<string> batch_inputs(2);
batch_inputs[0] = range_node->name();
batch_inputs[1] = batch_size_node->name();
std::vector<std::pair<string, AttrValue>> batch_attrs(2);
AttrValue shapes_attr;
SetAttrValue("output_shapes", &shapes_attr);
batch_attrs[0] = std::make_pair("output_shapes", shapes_attr);
AttrValue types_attr;
SetAttrValue("output_types", &types_attr);
batch_attrs[1] = std::make_pair("output_types", types_attr);
graph_utils::AddNode("", "BatchDataset", batch_inputs, batch_attrs, &graph);
MapAndBatchFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::Compare(*graph.graph(), output));
}
}
}
} |
1,411 | cpp | tensorflow/tensorflow | remove_compression_map | tensorflow/core/grappler/optimizers/data/remove_compression_map.cc | tensorflow/core/grappler/optimizers/data/remove_compression_map_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_REMOVE_COMPRESSION_MAP_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_REMOVE_COMPRESSION_MAP_H_
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
namespace tensorflow {
namespace grappler {
class RemoveCompressionMap : public TFDataOptimizerBase {
public:
RemoveCompressionMap() = default;
~RemoveCompressionMap() override = default;
string name() const override { return "remove_compression_map"; }
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
Status OptimizeAndCollectStats(Cluster* cluster, const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) override;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/remove_compression_map.h"
#include <string>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace grappler {
namespace {
absl::StatusOr<std::string> GetCompressionFunctionName(const GraphDef& graph) {
for (const auto& function : graph.library().function()) {
for (const auto& node : function.node_def()) {
if (node.op() == "CompressElement") {
return function.signature().name();
}
}
}
return errors::Internal("Compression function not found.");
}
absl::StatusOr<NodeDef> GetCompressionMapNode(const GraphDef& graph) {
TF_ASSIGN_OR_RETURN(std::string compression_function_name,
GetCompressionFunctionName(graph));
for (const auto& node : graph.node()) {
if (node.op() != "ParallelMapDatasetV2") {
continue;
}
if (auto it = node.attr().find("f");
it != node.attr().end() && it->second.has_func() &&
it->second.func().name() == compression_function_name) {
return node;
}
}
return errors::Internal("Compression map node not found.");
}
}
Status RemoveCompressionMap::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
TF_ASSIGN_OR_RETURN(NodeDef compression_map_node,
GetCompressionMapNode(*output));
MutableGraphView graph(output);
for (const auto& compression_map_output :
graph.GetFanout(graph.GetOutputPort(compression_map_node.name(), 0))) {
compression_map_output.node->clear_input();
compression_map_output.node->add_input(compression_map_node.input().Get(0));
++stats->num_changes;
}
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(RemoveCompressionMap, "remove_compression_map");
}
} | #include "tensorflow/core/grappler/optimizers/data/remove_compression_map.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace grappler {
namespace {
using ::testing::HasSubstr;
TEST(RemoveCompressionMap, Success) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("Const/_0",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 0}}),
NDef("Const/_1",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 10}}),
NDef("Const/_2",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 1}}),
NDef("RangeDataset/_3",
"RangeDataset",
{"Const/_0",
"Const/_1",
"Const/_2"},
{}),
NDef("Const/_4",
"Const",
{},
{{"dtype", DT_INT64},
{"value", -1}}),
graph_tests_utils::MakeParallelMapV2Node(
"ParallelMapDatasetV2/_5",
"RangeDataset/_3",
"Const/_4",
"__inference_Dataset_map_lambda_10",
"default"),
NDef("dataset",
"_Retval",
{"ParallelMapDatasetV2/_5"},
{{"T", DT_VARIANT}}),
NDef("Sink",
"Identity",
{"ParallelMapDatasetV2/_5"},
{{"T", DT_VARIANT}})},
{FunctionDefHelper::Create(
"__inference_Dataset_map_lambda_10",
{"args_0: int64"},
{"identity: variant"},
{},
{
{{"CompressElement"},
"CompressElement",
{"args_0"},
{{"input_types", DT_INT64}}},
{{"Identity"},
"Identity",
{"CompressElement:compressed:0"},
{{"T", DT_VARIANT}}},
},
{})});
RemoveCompressionMap optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithName("dataset", output);
EXPECT_EQ(output.node(index).input(0), "RangeDataset/_3");
}
TEST(RemoveCompressionMap, FailureNoMap) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef({NDef("Const/_0",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 0}}),
NDef("Const/_1",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 10}}),
NDef("Const/_2",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 1}}),
NDef("RangeDataset/_3",
"RangeDataset",
{"Const/_0",
"Const/_1",
"Const/_2"},
{}),
NDef("dataset",
"_Retval",
{"RangeDataset/_3"},
{{"T", DT_VARIANT}}),
NDef("Sink",
"Identity",
{"RangeDataset/_3"},
{{"T", DT_VARIANT}})});
RemoveCompressionMap optimizer;
GraphDef output;
ASSERT_THAT(optimizer.Optimize(nullptr, item, &output),
testing::StatusIs(error::INTERNAL,
HasSubstr("Compression function not found.")));
}
}
}
} |
1,412 | cpp | tensorflow/tensorflow | map_and_filter_fusion | tensorflow/core/grappler/optimizers/data/map_and_filter_fusion.cc | tensorflow/core/grappler/optimizers/data/map_and_filter_fusion_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_MAP_AND_FILTER_FUSION_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_MAP_AND_FILTER_FUSION_H_
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
namespace tensorflow {
namespace grappler {
class MapAndFilterFusion : public TFDataOptimizerBase {
public:
MapAndFilterFusion() = default;
~MapAndFilterFusion() override = default;
string name() const override { return "map_and_filter_fusion"; };
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
Status OptimizeAndCollectStats(Cluster* cluster, const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) override;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/map_and_filter_fusion.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/substitute.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/fusion_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/kernels/function_ops.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
NodeDef MakeFusedNode(const NodeDef& map_node, const NodeDef& filter_node,
const FunctionDef& fused_function,
MutableGraphView* graph) {
NodeDef fused_node;
graph_utils::SetUniqueGraphNodeName("fused_map", graph->graph(), &fused_node);
fused_node.set_op(map_node.op());
for (int i = 0; i < map_node.input_size(); ++i) {
fused_node.add_input(map_node.input(i));
}
auto attr = map_node.attr().at("f");
attr.mutable_func()->set_name(fused_function.signature().name());
(*fused_node.mutable_attr())["f"] = std::move(attr);
graph_utils::CopyAttribute("Targuments", map_node, &fused_node);
graph_utils::CopyShapesAndTypesAttrs(map_node, &fused_node);
for (auto key :
{"use_inter_op_parallelism", "sloppy", "preserve_cardinality"}) {
if (gtl::FindOrNull(map_node.attr(), key)) {
graph_utils::CopyAttribute(key, map_node, &fused_node);
}
}
graph_utils::MaybeSetFusedMetadata(map_node, filter_node, &fused_node);
(*fused_node.mutable_attr())["output_types"]
.mutable_list()
->mutable_type()
->Add(DT_BOOL);
(*fused_node.mutable_attr())["output_shapes"]
.mutable_list()
->mutable_shape()
->Add();
return fused_node;
}
NodeDef MakeFilterNode(const NodeDef& fused_map,
const FunctionDef& fused_map_func,
MutableGraphView* graph, FunctionDefLibrary* library) {
NodeDef filter_node;
graph_utils::SetUniqueGraphNodeName("FilterByLast", graph->graph(),
&filter_node);
filter_node.set_op("FilterDataset");
filter_node.add_input(fused_map.name());
graph_utils::CopyShapesAndTypesAttrs(fused_map, &filter_node);
AddNodeAttr("Targuments", std::vector<DataType>({}), &filter_node);
OpDef fused_sig = fused_map_func.signature();
FunctionDef* func = library->add_function();
OpDef* sig = func->mutable_signature();
sig->set_name("GetLast");
for (const auto& arg : fused_sig.output_arg()) {
*(sig->add_input_arg()) = arg;
}
OpDef::ArgDef* arg = sig->add_output_arg();
arg->set_name("predicate_result");
arg->set_description("predicate result computed in the fused map");
arg->set_type(DT_BOOL);
sig->set_description("returns the last argument");
(*func->mutable_ret())["predicate_result"] = strings::StrCat(
fused_sig.output_arg(fused_sig.output_arg_size() - 1).name(), ":0");
(*filter_node.mutable_attr())["predicate"] =
FunctionDefHelper::FunctionRef(func->signature().name()).proto;
return filter_node;
}
NodeDef MakeMapNode(const NodeDef& updated_filter, const NodeDef& original_map,
const FunctionDef& fused_map_func, MutableGraphView* graph,
FunctionDefLibrary* library) {
NodeDef map_node;
graph_utils::SetUniqueGraphNodeName("DropLast", graph->graph(), &map_node);
map_node.set_op("MapDataset");
map_node.add_input(updated_filter.name());
graph_utils::CopyShapesAndTypesAttrs(original_map, &map_node);
AddNodeAttr("Targuments", std::vector<DataType>({}), &map_node);
for (auto key : {"use_inter_op_parallelism", "preserve_cardinality"}) {
if (gtl::FindOrNull(original_map.attr(), key)) {
graph_utils::CopyAttribute(key, original_map, &map_node);
}
}
OpDef fused_sig = fused_map_func.signature();
FunctionDef* func = library->add_function();
OpDef* sig = func->mutable_signature();
sig->set_name("DropLast");
for (const auto& o : fused_sig.output_arg()) {
*(sig->add_input_arg()) = o;
}
for (int i = 0; i < fused_sig.output_arg_size() - 1; ++i) {
auto arg_i = fused_sig.output_arg(i);
*(sig->add_output_arg()) = arg_i;
(*func->mutable_ret())[arg_i.name()] = strings::StrCat(arg_i.name(), ":0");
}
sig->set_description("drops the last argument");
(*map_node.mutable_attr())["f"] =
FunctionDefHelper::FunctionRef(func->signature().name()).proto;
return map_node;
}
}
Status MapAndFilterFusion::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
GraphDef sorted_old_graph = item.graph;
TF_RETURN_IF_ERROR(TopologicalSort(&sorted_old_graph));
*output = sorted_old_graph;
MutableGraphView graph(output);
absl::flat_hash_set<string> nodes_to_delete;
FunctionLibraryDefinition function_library(OpRegistry::Global(),
item.graph.library());
auto get_map_node = [](const NodeDef& node) -> const NodeDef* {
if ((node.op() == "MapDataset" && node.input_size() == 1) ||
(node.op() == "ParallelMapDataset" && node.input_size() == 2)) {
return &node;
}
return nullptr;
};
auto get_filter_node = [](const NodeDef& node) -> const NodeDef* {
if (node.op() == "FilterDataset" && node.input_size() == 1) return &node;
return nullptr;
};
auto make_fused_function = [&function_library, &output](
const NodeDef* map_node,
const NodeDef* filter_node) -> FunctionDef* {
const auto& parent_fun = map_node->attr().at("f");
const FunctionDef* map_func =
function_library.Find(parent_fun.func().name());
const auto& fun = filter_node->attr().at("predicate");
const FunctionDef* filter_func = function_library.Find(fun.func().name());
if (!fusion_utils::CanCompose(map_func->signature(),
filter_func->signature())) {
VLOG(1) << "Can't fuse map and filter because the output signature of "
"the map function does not match the input signature of the "
"filter function\n";
return nullptr;
}
return fusion_utils::FuseFunctions(
*map_func, *filter_func, "fused_map_and_filter_function",
fusion_utils::CombineSignature, fusion_utils::ComposeInput,
fusion_utils::CombineOutput, fusion_utils::MergeNodes,
output->mutable_library());
};
for (const NodeDef& node : sorted_old_graph.node()) {
const NodeDef* filter_node = get_filter_node(node);
if (!filter_node) continue;
const NodeDef* map_node =
get_map_node(*graph_utils::GetInputNode(*filter_node, graph));
if (!map_node) continue;
const auto* fused_function = make_fused_function(map_node, filter_node);
if (fused_function == nullptr) continue;
const auto* fused_maps = graph.AddNode(
MakeFusedNode(*map_node, *filter_node, *fused_function, &graph));
const auto* new_filter_node = graph.AddNode(MakeFilterNode(
*fused_maps, *fused_function, &graph, output->mutable_library()));
const auto* new_map_node =
graph.AddNode(MakeMapNode(*new_filter_node, *map_node, *fused_function,
&graph, output->mutable_library()));
TF_RETURN_IF_ERROR(
graph.UpdateFanouts(filter_node->name(), new_map_node->name()));
TF_RETURN_IF_ERROR(function_library.AddFunctionDef(*fused_function));
nodes_to_delete.insert(map_node->name());
nodes_to_delete.insert(filter_node->name());
stats->num_changes++;
}
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(MapAndFilterFusion, "map_and_filter_fusion");
}
} | #include "tensorflow/core/grappler/optimizers/data/map_and_filter_fusion.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
using graph_tests_utils::MakeFilterNode;
using graph_tests_utils::MakeMapNode;
using graph_tests_utils::MakeParallelMapNode;
TEST(MapAndFilterFusionTest, FuseMapAndFilter) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeMapNode("map", "range"), MakeFilterNode("filter", "map")},
{
test::function::XTimesTwo(),
test::function::IsZero(),
});
MapAndFilterFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("map", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("filter", output));
EXPECT_EQ(graph_utils::FindAllGraphNodesWithOp("MapDataset", output).size(),
2);
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("FilterDataset", output));
}
TEST(MapAndFilterFusionTest, FuseParallelMapAndFilter) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 3}, {"dtype", "DT_INT32"}}),
MakeParallelMapNode("map", "range", "num_parallel_calls", "XTimesTwo",
false),
MakeFilterNode("filter", "map")},
{
test::function::XTimesTwo(),
test::function::IsZero(),
});
MapAndFilterFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("map", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("filter", output));
ASSERT_TRUE(graph_utils::ContainsNodeWithOp("ParallelMapDataset", output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("MapDataset", output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("FilterDataset", output));
auto& map_node = output.node(
graph_utils::FindGraphNodeWithOp("ParallelMapDataset", output));
EXPECT_FALSE(map_node.attr().at("sloppy").b()) << map_node.DebugString();
EXPECT_EQ(map_node.input_size(), 2);
}
TEST(MapAndFilterFusionTest, FuseMapAndFilterWithExtraChild) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("filename", "Const", {}, {{"value", ""}, {"dtype", DT_STRING}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeMapNode("map", "range"), MakeFilterNode("filter", "map"),
NDef("cache", "CacheDataset", {"filter", "filename"}, {})},
{
test::function::XTimesTwo(),
test::function::IsZero(),
});
MapAndFilterFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("map", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("filter", output));
EXPECT_EQ(graph_utils::FindAllGraphNodesWithOp("MapDataset", output).size(),
2);
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("FilterDataset", output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("CacheDataset", output));
}
TEST(MapAndFilterFusionTest, FuseParallelMapAndFilterWithExtraChild) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("filename", "Const", {}, {{"value", ""}, {"dtype", DT_STRING}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 3}, {"dtype", "DT_INT32"}}),
MakeParallelMapNode("map", "range", "num_parallel_calls", "XTimesTwo",
true),
MakeFilterNode("filter", "map"),
NDef("cache", "CacheDataset", {"filter", "filename"}, {})},
{
test::function::XTimesTwo(),
test::function::IsZero(),
});
MapAndFilterFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("map", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("filter", output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("FilterDataset", output));
ASSERT_TRUE(graph_utils::ContainsNodeWithOp("ParallelMapDataset", output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("CacheDataset", output));
auto& map_node = output.node(
graph_utils::FindGraphNodeWithOp("ParallelMapDataset", output));
EXPECT_TRUE(map_node.attr().at("sloppy").b()) << map_node.DebugString();
EXPECT_EQ(map_node.input_size(), 2);
}
}
}
} |
1,413 | cpp | tensorflow/tensorflow | make_deterministic | tensorflow/core/grappler/optimizers/data/make_deterministic.cc | tensorflow/core/grappler/optimizers/data/make_deterministic_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_MAKE_DETERMINISTIC_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_MAKE_DETERMINISTIC_H_
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
namespace tensorflow {
namespace grappler {
class MakeDeterministic : public TFDataOptimizerBase {
public:
MakeDeterministic() = default;
~MakeDeterministic() override = default;
string name() const override { return "make_deterministic"; };
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
Status OptimizeAndCollectStats(Cluster* cluster, const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) override;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/make_deterministic.h"
#include <algorithm>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/optimizers/data/split_utils.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kInterleaveOp[] = "InterleaveDataset";
constexpr char kParallelInterleaveOp[] = "ParallelInterleaveDataset";
constexpr char kLegacyParallelInterleaveOp[] =
"LegacyParallelInterleaveDatasetV2";
constexpr char kMapOp[] = "MapDataset";
constexpr char kParallelMapOp[] = "ParallelMapDataset";
constexpr char kParallelMapOpV2[] = "ParallelMapDatasetV2";
constexpr char kMapAndBatchOp[] = "MapAndBatchDataset";
constexpr char kBatchOp[] = "BatchDataset";
constexpr char kBatchV2Op[] = "BatchDatasetV2";
constexpr char kParallelBatchOp[] = "ParallelBatchDataset";
constexpr char kPrefetchOp[] = "PrefetchDataset";
constexpr std::array<const char*, 9> kDeterministicStatefulOps = {
"TextLineDataset", "FixedLengthRecordDataset", "TFRecordDataset",
"TensorSliceDataset", "RangeDataset", "SSTableDataset", "RecordIODataset",
"Print", "Assert"};
constexpr std::array<const char*, 13> kDeterministicStatefulOpsWhenAsync = {
"RandomUniform",
"RandomUniformInt",
"RandomStandardNormal",
"ParameterizedTruncatedNormal",
"TruncatedNormal",
"RandomShuffle",
"Multinomial",
"RandomGamma",
"RandomGammaGrad",
"RandomPoisson",
"RandomCrop",
"SampleDistortedBoundingBox",
"SampleDistortedBoundingBoxV2"};
bool IsDeterministicWhenRunInParallel(const std::string& stateful_op) {
for (auto op_in_array : kDeterministicStatefulOps) {
if (data::MatchesAnyVersion(op_in_array, stateful_op)) {
return true;
}
}
return false;
}
bool IsDeterministicWhenRunAsynchronously(const std::string& stateful_op) {
for (auto op_in_array : kDeterministicStatefulOps) {
if (data::MatchesAnyVersion(op_in_array, stateful_op)) {
return true;
}
}
for (auto op_in_array : kDeterministicStatefulOpsWhenAsync) {
if (data::MatchesAnyVersion(op_in_array, stateful_op)) {
return true;
}
}
return false;
}
bool IsParallelInterleave(const std::string& op) {
return data::MatchesAnyVersion(kParallelInterleaveOp, op) ||
op == kLegacyParallelInterleaveOp;
}
bool IsParallelMap(const std::string& op) {
return data::MatchesAnyVersion(kParallelMapOp, op);
}
bool IsParallelBatch(const std::string& op) {
return data::MatchesAnyVersion(kParallelBatchOp, op);
}
bool IsMapAndBatch(const std::string& op) {
return data::MatchesAnyVersion(kMapAndBatchOp, op);
}
bool IsPrefetch(const std::string& op) {
return data::MatchesAnyVersion(kPrefetchOp, op);
}
bool IntroducesFunctionParallelism(const std::string& op) {
return IsParallelInterleave(op) || IsParallelMap(op) || IsMapAndBatch(op);
}
bool IntroducesAsynchrony(const std::string& op) {
return IntroducesFunctionParallelism(op) || IsPrefetch(op) ||
IsParallelBatch(op);
}
absl::flat_hash_map<absl::string_view, const NodeDef*> NameToNode(
const FunctionDef& function) {
absl::flat_hash_map<absl::string_view, const NodeDef*> name_to_node;
for (const NodeDef& node : function.node_def()) {
name_to_node.insert({node.name(), &node});
}
return name_to_node;
}
NodeDef* GetMutableNode(const string& node_name, MutableGraphView* graph) {
int index = graph_utils::FindGraphNodeWithName(node_name, *graph->graph());
DCHECK_NE(index, -1) << "Failed to find node " << node_name
<< " in the optimized graph.";
return graph->graph()->mutable_node(index);
}
Status ConvertMapOrInterleave(const string& node_name,
MutableGraphView* graph) {
NodeDef* node = GetMutableNode(node_name, graph);
auto Targuments = node->attr().find("Targuments");
if (Targuments == node->attr().end()) {
return errors::Internal("Failed to find Targuments attribute for node ",
node_name);
}
int num_inputs_after_rewrite;
if (IsParallelInterleave(node->op())) {
node->set_op(kInterleaveOp);
num_inputs_after_rewrite = 3 + Targuments->second.list().type_size();
} else {
DCHECK(IsParallelMap(node->op()));
node->set_op(kMapOp);
num_inputs_after_rewrite = 1 + Targuments->second.list().type_size();
}
int inputs_processed = 0;
for (int i = 0; i < node->input_size(); i++) {
std::string input = node->input(i);
if (IsControlInput(input)) {
continue;
}
if (inputs_processed >= num_inputs_after_rewrite) {
node->set_input(i, absl::StrCat("^", input));
}
inputs_processed++;
}
if (inputs_processed < num_inputs_after_rewrite) {
return errors::Internal("Found only ", inputs_processed, " inputs to node ",
node_name, ", but expected to find at least ",
num_inputs_after_rewrite);
}
node->mutable_attr()->erase("deterministic");
node->mutable_attr()->erase("sloppy");
return absl::OkStatus();
}
absl::flat_hash_set<absl::string_view> GetAllTransitiveDependencies(
const FunctionDef& function_def,
const absl::flat_hash_set<absl::string_view>& nodes) {
std::vector<absl::string_view> nodes_to_process;
std::copy(nodes.begin(), nodes.end(), std::back_inserter(nodes_to_process));
absl::flat_hash_map<absl::string_view, const NodeDef*> name_to_node =
NameToNode(function_def);
absl::flat_hash_set<absl::string_view> dependencies;
while (!nodes_to_process.empty()) {
absl::string_view node_name = nodes_to_process.back();
nodes_to_process.pop_back();
if (dependencies.contains(node_name)) {
continue;
}
dependencies.insert(node_name);
auto iter = name_to_node.find(node_name);
if (iter == name_to_node.end()) {
continue;
}
for (absl::string_view inp : iter->second->input()) {
absl::string_view inp_node = inp.substr(0, inp.find(':'));
if (inp_node.at(0) == '^') {
inp_node = inp_node.substr(1);
}
if (name_to_node.contains(inp_node)) {
nodes_to_process.push_back(inp_node);
}
}
}
return dependencies;
}
Status SplitMap(
const FunctionLibraryDefinition& library, const string& map_node_name,
MutableGraphView* graph,
const absl::flat_hash_set<absl::string_view>& nondeterministic_nodes) {
NodeDef* map_node = GetMutableNode(map_node_name, graph);
NameAttrList func = map_node->attr().at("f").func();
const FunctionDef* function_def = library.Find(func.name());
if (!function_def) {
return errors::Internal("Could not look up function ", func.name(),
" in FunctionLibraryDefinition");
}
absl::flat_hash_set<absl::string_view> nodes_to_move =
GetAllTransitiveDependencies(*function_def, nondeterministic_nodes);
VLOG(2) << "Will move nodes to nonparallel function: "
<< absl::StrJoin(nodes_to_move, ", ");
int64_t num_captured_arguments =
map_node->attr().find("Targuments")->second.list().type_size();
TF_ASSIGN_OR_RETURN(
split_utils::SplitResults split_results,
split_utils::SplitFunction(*function_def, nodes_to_move,
num_captured_arguments, library));
if (split_results.first_function_output_types.empty()) {
return errors::Unimplemented(
"The case where the first function has no outputs is unimplemented.");
}
bool is_map_and_batch = map_node->op() == kMapAndBatchOp;
NodeDef* first_map_node_ptr;
{
NodeDef first_map_node;
graph_utils::SetUniqueGraphNodeName(
strings::StrCat("make_deterministic_sequential_map/", map_node->name()),
graph->graph(), &first_map_node);
first_map_node.set_op(kMapOp);
int num_control_deps = NumControlInputs(*map_node);
int num_extra_inputs = is_map_and_batch ? 3 : 1;
int control_deps_index = map_node->input_size() - num_control_deps;
int extra_inputs_index = control_deps_index - num_extra_inputs;
for (int i = 0; i < extra_inputs_index; i++) {
DCHECK(!IsControlInput(map_node->input(i)));
first_map_node.add_input(map_node->input(i));
}
for (int i = extra_inputs_index; i < control_deps_index; i++) {
DCHECK(!IsControlInput(map_node->input(i)));
first_map_node.add_input(absl::StrCat("^", map_node->input(i)));
}
for (int i = control_deps_index; i < map_node->input_size(); i++) {
DCHECK(IsControlInput(map_node->input(i)));
first_map_node.add_input(map_node->input(i));
}
NameAttrList* name_attr_list =
(*first_map_node.mutable_attr())["f"].mutable_func();
name_attr_list->set_name(split_results.first_function.signature().name());
graph_utils::CopyAttribute("Targuments", *map_node, &first_map_node);
for (auto key : {"use_inter_op_parallelism", "preserve_cardinality"}) {
if (gtl::FindOrNull(map_node->attr(), key)) {
graph_utils::CopyAttribute(key, *map_node, &first_map_node);
}
}
AddNodeAttr("output_types", split_results.first_function_output_types,
&first_map_node);
TensorShapeProto unknown_shape;
unknown_shape.set_unknown_rank(true);
std::vector<TensorShapeProto> output_shapes(
split_results.first_function_output_types.size(), unknown_shape);
AddNodeAttr("output_shapes", output_shapes, &first_map_node);
first_map_node_ptr = graph->AddNode(std::move(first_map_node));
}
NodeDef* second_map_node_ptr;
{
NodeDef second_map_node;
string node_name =
map_node->op() == kMapAndBatchOp ? "map_and_batch" : "parallel_map";
graph_utils::SetUniqueGraphNodeName(
strings::StrCat("make_deterministic_parallel_", node_name, "/",
map_node->name()),
graph->graph(), &second_map_node);
second_map_node.set_op(map_node->op());
second_map_node.add_input(first_map_node_ptr->name());
for (int i = 1; i < map_node->input_size(); i++) {
second_map_node.add_input(map_node->input(i));
}
NameAttrList* name_attr_list =
(*second_map_node.mutable_attr())["f"].mutable_func();
name_attr_list->set_name(split_results.second_function.signature().name());
graph_utils::CopyAttribute("Targuments", *map_node, &second_map_node);
graph_utils::CopyAttribute("output_types", *map_node, &second_map_node);
graph_utils::CopyAttribute("output_shapes", *map_node, &second_map_node);
if (!is_map_and_batch) {
AddNodeAttr("deterministic", "true", &second_map_node);
}
for (auto key : {"use_inter_op_parallelism", "preserve_cardinality"}) {
if (gtl::FindOrNull(map_node->attr(), key)) {
graph_utils::CopyAttribute(key, *map_node, &second_map_node);
}
}
second_map_node_ptr = graph->AddNode(std::move(second_map_node));
}
TF_RETURN_IF_ERROR(
graph->UpdateFanouts(map_node->name(), second_map_node_ptr->name()));
*graph->graph()->mutable_library()->mutable_function()->Add() =
split_results.first_function;
*graph->graph()->mutable_library()->mutable_function()->Add() =
split_results.second_function;
return absl::OkStatus();
}
Status ConvertBatch(const string& node_name, MutableGraphView* graph) {
NodeDef* node = GetMutableNode(node_name, graph);
node->set_op(kBatchV2Op);
std::string num_parallel_calls_input = node->input(2);
node->set_input(2, node->input(3));
node->set_input(3, absl::StrCat("^", num_parallel_calls_input));
node->mutable_attr()->erase("deterministic");
return absl::OkStatus();
}
Status ConvertMapAndBatch(const string& node_name, MutableGraphView* graph) {
int index = graph_utils::FindGraphNodeWithName(node_name, *graph->graph());
DCHECK_NE(index, -1) << "Failed to find node " << node_name
<< " in the optimized graph.";
const NodeDef& orig_node = graph->graph()->node(index);
auto Targuments = orig_node.attr().find("Targuments");
if (Targuments == orig_node.attr().end()) {
return errors::Internal("Failed to find Targuments attribute for node ",
node_name);
}
NodeDef new_map_node;
new_map_node.set_op(kMapOp);
graph_utils::SetUniqueGraphNodeName(kMapOp, graph->graph(), &new_map_node);
int num_map_inputs = 1 + Targuments->second.list().type_size();
for (int i = 0; i < num_map_inputs; i++) {
new_map_node.add_input(orig_node.input(i));
}
for (int i = num_map_inputs; i < orig_node.input_size(); i++) {
if (IsControlInput(orig_node.input(i))) {
new_map_node.add_input(orig_node.input(i));
} else {
new_map_node.add_input(absl::StrCat("^", orig_node.input(i)));
}
}
for (auto key : {"f", "Targuments", "output_types"}) {
graph_utils::CopyAttribute(key, orig_node, &new_map_node);
}
for (auto key : {"preserve_cardinality"}) {
if (gtl::FindOrNull(new_map_node.attr(), key)) {
graph_utils::CopyAttribute(key, orig_node, &new_map_node);
}
}
auto orig_output_shapes = orig_node.attr().find("output_shapes");
if (orig_output_shapes == orig_node.attr().end()) {
return errors::Internal("Failed to find output_shapes attribute for node ",
node_name);
}
AttrValue& map_output_shapes =
(*new_map_node.mutable_attr())["output_shapes"];
for (const TensorShapeProto& orig_shape :
orig_output_shapes->second.list().shape()) {
TensorShapeProto* new_shape = map_output_shapes.mutable_list()->add_shape();
if (orig_shape.unknown_rank()) {
new_shape->set_unknown_rank(true);
} else if (orig_shape.dim_size() == 0) {
return errors::Internal(
"Output shape of MapAndBatch node cannot be scalar");
} else {
for (int i = 1; i < orig_shape.dim_size(); i++) {
*new_shape->add_dim() = orig_shape.dim(i);
}
}
}
NodeDef new_batch_node;
new_batch_node.set_op(kBatchV2Op);
graph_utils::SetUniqueGraphNodeName(kBatchOp, graph->graph(),
&new_batch_node);
new_batch_node.add_input(new_map_node.name());
new_batch_node.add_input(orig_node.input(num_map_inputs));
new_batch_node.add_input(
orig_node.input(num_map_inputs + 2));
graph_utils::CopyShapesAndTypesAttrs(orig_node, &new_batch_node);
graph->AddNode(std::move(new_map_node));
NodeDef* graph_batch_node = graph->AddNode(std::move(new_batch_node));
TF_RETURN_IF_ERROR(
graph->UpdateFanouts(orig_node.name(), graph_batch_node->name()));
return absl::OkStatus();
}
Status ConvertPrefetch(const string& node_name, MutableGraphView* graph) {
NodeDef* node = GetMutableNode(node_name, graph);
constexpr int buffer_size_index = 1;
node->add_input(absl::StrCat("^", node->input(buffer_size_index)));
NodeDef* tmp = graph_utils::AddScalarConstNode<int64_t>(0, graph);
node->set_input(buffer_size_index, tmp->name());
return absl::OkStatus();
}
enum class NondeterminismType { PARALLELISM, ASYNCHRONY };
bool IsDeterministicStatefulOp(NondeterminismType type,
const std::string& stateful_op) {
return type == NondeterminismType::PARALLELISM
? IsDeterministicWhenRunInParallel(stateful_op)
: IsDeterministicWhenRunAsynchronously(stateful_op);
}
bool FunctionNodeMayIntroduceNondeterminism(
const FunctionLibraryDefinition& library, const NodeDef& node_def,
NondeterminismType nondeterminism_type,
absl::flat_hash_set<std::string>* functions_processed);
bool FunctionMayIntroduceNondeterminism(
const FunctionLibraryDefinition& library, const std::string& function_name,
NondeterminismType nondeterminism_type,
absl::flat_hash_set<std::string>* functions_processed,
absl::flat_hash_set<absl::string_view>* nondeterministic_nodes) {
if (functions_processed->contains(function_name)) {
return false;
}
functions_processed->insert(function_name);
const FunctionDef* function_def = library.Find(function_name);
if (!function_def) {
VLOG(2) << "Could not look up function " << function_name
<< " in FunctionLibraryDefinition, so rewriting op to be safe";
return true;
}
bool found = false;
for (const NodeDef& node_def : function_def->node_def()) {
bool nondeterministic = FunctionNodeMayIntroduceNondeterminism(
library, node_def, nondeterminism_type, functions_processed);
if (nondeterministic) {
if (nondeterministic_nodes) {
nondeterministic_nodes->insert(node_def.name());
found = true;
} else {
return true;
}
}
}
return found;
}
bool FunctionMayIntroduceNondeterminism(
const FunctionLibraryDefinition& library, const std::string& function_name,
NondeterminismType nondeterminism_type) {
absl::flat_hash_set<string> functions_processed;
return FunctionMayIntroduceNondeterminism(library, function_name,
nondeterminism_type,
&functions_processed, nullptr);
}
bool FunctionNodeMayIntroduceNondeterminism(
const FunctionLibraryDefinition& library, const NodeDef& node_def,
NondeterminismType nondeterminism_type,
absl::flat_hash_set<std::string>* functions_processed) {
const OpRegistrationData* op_reg_data = nullptr;
Status s = library.LookUp(node_def.op(), &op_reg_data);
if (!s.ok()) {
VLOG(2) << "Could not look up op " << node_def.op()
<< " in FunctionLibraryDefinition, so rewriting op to be safe";
return true;
}
bool is_function_op = op_reg_data->is_function_op;
bool is_stateful = false;
if (!is_function_op) {
const OpDef* op_def;
s = OpRegistry::Global()->LookUpOpDef(node_def.op(), &op_def);
if (!s.ok()) {
VLOG(2) << "Could not look up op " << node_def.op()
<< " in OpRegistry, so rewriting op to be safe";
return true;
}
is_stateful = op_def->is_stateful();
}
if (is_stateful && !IsStatefulPartitionedCall((node_def)) &&
!IsIf(node_def) && !IsWhile(node_def) &&
!IsDeterministicStatefulOp(nondeterminism_type, node_def.op())) {
VLOG(2) << "Will rewrite due to op: " << node_def.op();
return true;
}
std::vector<std::string> attr_func_names;
for (const auto& attr : node_def.attr()) {
if (attr.second.has_func()) {
attr_func_names.push_back(attr.second.func().name());
}
for (const auto& name_attr_list : attr.second.list().func()) {
attr_func_names.push_back(name_attr_list.name());
}
}
if (is_function_op) {
attr_func_names.push_back(node_def.op());
}
for (const std::string& inner_function_name : attr_func_names) {
if (FunctionMayIntroduceNondeterminism(library, inner_function_name,
nondeterminism_type,
functions_processed, nullptr)) {
return true;
}
}
return false;
}
bool NodeMayIntroduceNondeterminismWhenAsync(
const FunctionLibraryDefinition& library, const NodeDef& node) {
const OpDef* op_def;
Status s = OpRegistry::Global()->LookUpOpDef(node.op(), &op_def);
if (s.code() == error::NOT_FOUND) {
return false;
} else if (!s.ok()) {
return true;
}
if (data::DatasetOpKernel::IsDatasetOp(*op_def)) {
std::vector<std::string> attr_func_names;
for (const auto& attr : node.attr()) {
if (attr.second.has_func()) {
attr_func_names.push_back(attr.second.func().name());
}
for (const auto& name_attr_list : attr.second.list().func()) {
attr_func_names.push_back(name_attr_list.name());
}
}
for (const std::string& inner_function_name : attr_func_names) {
if (FunctionMayIntroduceNondeterminism(library, inner_function_name,
NondeterminismType::ASYNCHRONY)) {
return true;
}
}
}
return false;
}
bool GraphMayHaveAsyncNondeterminism(const FunctionLibraryDefinition& library,
const GraphDef& graph) {
for (const NodeDef& node : graph.node()) {
if (NodeMayIntroduceNondeterminismWhenAsync(library, node)) {
return true;
}
}
for (const string& function_name : library.ListFunctionNames()) {
const FunctionDef* function_def = library.Find(function_name);
CHECK(function_def);
for (const NodeDef& node : function_def->node_def()) {
if (NodeMayIntroduceNondeterminismWhenAsync(library, node)) {
return true;
}
}
}
return false;
}
}
Status MakeDeterministic::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
MutableGraphView graph(output);
FunctionLibraryDefinition function_library(OpRegistry::Global(),
item.graph.library());
absl::flat_hash_set<string> nodes_to_delete;
bool remove_async_nodes =
GraphMayHaveAsyncNondeterminism(function_library, item.graph);
for (const NodeDef& node : item.graph.node()) {
if (graph_utils::HasSloppyAttr(node.op())) {
NodeDef* mutable_node = GetMutableNode(node.name(), &graph);
(*mutable_node->mutable_attr())["sloppy"].set_b(false);
stats->num_changes++;
}
if (graph_utils::HasDeterministicAttr(node.op())) {
NodeDef* mutable_node = GetMutableNode(node.name(), &graph);
(*mutable_node->mutable_attr())["deterministic"].set_s("true");
stats->num_changes++;
}
bool rewrite_due_to_async =
IntroducesAsynchrony(node.op()) && remove_async_nodes;
absl::flat_hash_set<std::string> functions_processed;
absl::flat_hash_set<absl::string_view> nondeterministic_nodes;
bool rewrite_due_to_parallelism =
IntroducesFunctionParallelism(node.op()) &&
FunctionMayIntroduceNondeterminism(
function_library, node.attr().at("f").func().name(),
NondeterminismType::PARALLELISM, &functions_processed,
&nondeterministic_nodes);
if (!rewrite_due_to_async && !rewrite_due_to_parallelism) {
continue;
}
VLOG(1) << "Rewriting node " << node.name() << " (" << node.op()
<< ") because it introduces nondeterminism through "
<< (rewrite_due_to_async ? "asynchrony" : "parallelism");
bool maybe_can_split =
!rewrite_due_to_async &&
(node.op() == kParallelMapOpV2 || IsMapAndBatch(node.op()));
if (maybe_can_split) {
Status s = SplitMap(function_library, node.name(), &graph, | #include "tensorflow/core/grappler/optimizers/data/make_deterministic.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
std::vector<string> GetNodeNames(const FunctionDef& func) {
std::vector<string> node_names;
for (const NodeDef& node : func.node_def()) {
node_names.push_back(node.name());
}
return node_names;
}
class SplitMapTest : public ::testing::TestWithParam<std::tuple<bool, bool>> {};
TEST_P(SplitMapTest, SplitMapFunction) {
using test::function::NDef;
GrapplerItem item;
bool deterministic, rewrite_map_and_batch;
std::tie(deterministic, rewrite_map_and_batch) = GetParam();
if (deterministic && rewrite_map_and_batch) {
LOG(INFO) << "Skipping test because MapAndBatch does not have "
"'deterministic' attribute";
return;
}
FunctionDef orig_func_def = FunctionDefHelper::Create(
"MyFunction",
{"a1: float", "a2: float", "a3: double"},
{"o1: float", "o2: double"},
{},
{
{{"i1"}, "Identity", {"a2"}, {{"T", DT_FLOAT}}},
{{"i2"}, "Identity", {"i1:output"}, {{"T", DT_FLOAT}}},
{{"stateful"},
"SampleDistortedBoundingBox",
{"a1", "i2:output"},
{{"T", DT_FLOAT}}},
{{"i3"}, "Identity", {"stateful:bboxes:0"}, {{"T", DT_FLOAT}}},
{{"i4"}, "Identity", {"a3"}, {{"T", DT_DOUBLE}}},
},
{{"o1", "i3:output"}, {"o2", "i4:output"}});
NodeDef orig_map_node_def;
if (rewrite_map_and_batch) {
orig_map_node_def = graph_tests_utils::MakeMapAndBatchNode(
"map", "range", "batch_size", "num_parallel_calls", "drop_remainder",
"MyFunction");
} else {
orig_map_node_def = graph_tests_utils::MakeParallelMapV2Node(
"map", "range", "num_parallel_calls", "MyFunction",
deterministic ? "true" : "false");
}
orig_map_node_def.add_input("^start");
AttrValue* attr_val = &(*orig_map_node_def.mutable_attr())["Targuments"];
SetAttrValue(std::vector<DataType>{DT_DOUBLE}, attr_val);
(*orig_map_node_def.mutable_attr())["preserve_cardinality"].set_b(true);
attr_val = &(*orig_map_node_def.mutable_attr())["output_types"];
SetAttrValue(std::vector<DataType>{DT_FLOAT, DT_DOUBLE}, attr_val);
attr_val = &(*orig_map_node_def.mutable_attr())["output_shapes"];
SetAttrValue(std::vector<TensorShape>{{1}, {1}}, attr_val);
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 2}, {"dtype", DT_INT32}}),
orig_map_node_def},
{orig_func_def});
MakeDeterministic optimizer;
GraphDef output;
VLOG(1) << "GraphDef before optimization:\n"
<< item.graph.DebugString() << "\n\n";
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
VLOG(1) << "GraphDef after optimization:\n" << output.DebugString() << "\n\n";
int index = graph_utils::FindGraphNodeWithOp("MapDataset", output);
ASSERT_GE(index, 0);
NodeDef first_map_node_def = output.node(index);
if (rewrite_map_and_batch) {
ASSERT_THAT(
first_map_node_def.input(),
::testing::ElementsAre("range", "^batch_size", "^num_parallel_calls",
"^drop_remainder", "^start"));
} else {
ASSERT_THAT(
first_map_node_def.input(),
::testing::ElementsAre("range", "^num_parallel_calls", "^start"));
}
std::vector<DataType> t_arguments;
TF_ASSERT_OK(GetNodeAttr(first_map_node_def, "Targuments", &t_arguments));
ASSERT_THAT(t_arguments, ::testing::ElementsAre(DT_DOUBLE));
std::vector<DataType> output_types;
TF_ASSERT_OK(GetNodeAttr(first_map_node_def, "output_types", &output_types));
ASSERT_THAT(output_types, ::testing::ElementsAre(DT_FLOAT));
std::vector<TensorShapeProto> output_shapes;
TF_ASSERT_OK(
GetNodeAttr(first_map_node_def, "output_shapes", &output_shapes));
for (const TensorShapeProto& shape : output_shapes) {
ASSERT_TRUE(shape.unknown_rank());
}
bool preserve_cardinality;
TF_ASSERT_OK(GetNodeAttr(first_map_node_def, "preserve_cardinality",
&preserve_cardinality));
ASSERT_TRUE(preserve_cardinality);
NameAttrList f;
TF_ASSERT_OK(GetNodeAttr(first_map_node_def, "f", &f));
ASSERT_EQ(f.attr_size(), 0);
index = graph_utils::FindGraphFunctionWithName(f.name(), output.library());
CHECK_GE(index, 0);
FunctionDef first_func = output.library().function(index);
ASSERT_TRUE(first_func.signature().is_stateful());
ASSERT_THAT(GetNodeNames(first_func),
::testing::UnorderedElementsAre("i1", "i2", "stateful"));
NodeDef second_map_node_def;
if (rewrite_map_and_batch) {
index = graph_utils::FindGraphNodeWithOp("MapAndBatchDataset", output);
CHECK_GE(index, 0);
second_map_node_def = output.node(index);
ASSERT_THAT(second_map_node_def.input(),
::testing::ElementsAre(first_map_node_def.name(), "batch_size",
"num_parallel_calls", "drop_remainder",
"^start"));
} else {
index = graph_utils::FindGraphNodeWithOp("ParallelMapDatasetV2", output);
CHECK_GE(index, 0);
second_map_node_def = output.node(index);
ASSERT_THAT(second_map_node_def.input(),
::testing::ElementsAre(first_map_node_def.name(),
"num_parallel_calls", "^start"));
ASSERT_EQ(second_map_node_def.attr().at("deterministic").s(), "true");
}
t_arguments.clear();
TF_ASSERT_OK(GetNodeAttr(second_map_node_def, "Targuments", &t_arguments));
ASSERT_THAT(t_arguments, ::testing::ElementsAre(DT_DOUBLE));
output_types.clear();
TF_ASSERT_OK(GetNodeAttr(second_map_node_def, "output_types", &output_types));
ASSERT_THAT(output_types, ::testing::ElementsAre(DT_FLOAT, DT_DOUBLE));
output_shapes.clear();
TF_ASSERT_OK(
GetNodeAttr(first_map_node_def, "output_shapes", &output_shapes));
for (const TensorShapeProto& shape : output_shapes) {
ASSERT_EQ(shape.dim_size(), 0);
}
TF_ASSERT_OK(GetNodeAttr(first_map_node_def, "preserve_cardinality",
&preserve_cardinality));
ASSERT_TRUE(preserve_cardinality);
TF_ASSERT_OK(GetNodeAttr(second_map_node_def, "f", &f));
ASSERT_EQ(f.attr_size(), 0);
index = graph_utils::FindGraphFunctionWithName(f.name(), output.library());
CHECK_GE(index, 0);
FunctionDef second_func = output.library().function(index);
ASSERT_THAT(GetNodeNames(second_func),
::testing::UnorderedElementsAre("i3", "i4"));
}
INSTANTIATE_TEST_SUITE_P(Test, SplitMapTest,
::testing::Combine(::testing::Bool(),
::testing::Bool()));
FunctionDef OuterXTimesTwo() {
return FunctionDefHelper::Define(
"OuterXTimesTwo",
{"x: float"},
{"y: float"},
{},
{{{"y"},
"PartitionedCall",
{"x"},
{{"Tin", DataTypeSlice{DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f",
FunctionDefHelper::FunctionRef("XTimesTwo", {{"T", DT_FLOAT}})}}}});
}
FunctionDef OuterRandomUniform() {
return FunctionDefHelper::Define(
"OuterRandomUniform",
{"x: float"},
{"random_uniform: int64"},
{},
{{{"random_uniform"},
"StatefulPartitionedCall",
{"x"},
{{"Tin", DataTypeSlice{DT_FLOAT}},
{"Tout", DataTypeSlice{DT_INT64}},
{"f", FunctionDefHelper::FunctionRef("RandomUniformFn",
{{"T", DT_FLOAT}})}}}});
}
FunctionDef OuterReadResourceVariable() {
return FunctionDefHelper::Define(
"OuterReadResourceVariable",
{"x: resource"},
{"y: float"},
{},
{{{"y"},
"StatefulPartitionedCall",
{"x"},
{{"Tin", DataTypeSlice{DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FunctionDefHelper::FunctionRef("ReadResourceVariable", {})}}}});
}
class MakeDeterministicTest
: public ::testing::TestWithParam<std::tuple<bool, bool>> {};
TEST_P(MakeDeterministicTest, NoRewriteInterleave) {
using test::function::NDef;
GrapplerItem item;
bool nest, deterministic;
std::tie(nest, deterministic) = GetParam();
std::string func_name = nest ? "OuterXTimesTwo" : "XTimesTwo";
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("cycle_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("block_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelInterleaveV2Node(
"interleave", "range", "cycle_length", "block_length",
"num_parallel_calls", func_name, !deterministic)},
{test::function::XTimesTwo(), OuterXTimesTwo()});
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithName("interleave", output);
ASSERT_GE(index, 0);
NodeDef node_def = output.node(index);
ASSERT_EQ(node_def.op(), "ParallelInterleaveDatasetV2");
ASSERT_EQ(node_def.attr().at("sloppy").b(), false);
}
TEST_P(MakeDeterministicTest, NoRewriteMap) {
using test::function::NDef;
GrapplerItem item;
bool nest, deterministic;
std::tie(nest, deterministic) = GetParam();
std::string func_name = nest ? "OuterXTimesTwo" : "XTimesTwo";
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelMapV2Node(
"map", "range", "num_parallel_calls", func_name,
deterministic ? "true" : "false")},
{test::function::XTimesTwo(), OuterXTimesTwo()});
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithName("map", output);
ASSERT_GE(index, 0);
NodeDef node_def = output.node(index);
ASSERT_EQ(node_def.op(), "ParallelMapDatasetV2");
ASSERT_EQ(node_def.attr().at("deterministic").s(), "true");
}
TEST_P(MakeDeterministicTest, NoRewriteBatch) {
using test::function::NDef;
typedef FunctionDefHelper FDH;
GrapplerItem item;
bool nest, deterministic;
std::tie(nest, deterministic) = GetParam();
std::string func_name = nest ? "OuterRandomUniform" : "RandomUniformFn";
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
graph_tests_utils::MakeMapNode("map", "range", func_name),
graph_tests_utils::MakeParallelBatchNode(
"batch", "map", "batch_size", "num_parallel_calls", "drop_remainder",
deterministic ? "true" : "false")},
{test::function::RandomUniform(), OuterRandomUniform()});
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithName("batch", output);
ASSERT_GE(index, 0);
NodeDef node_def = output.node(index);
ASSERT_EQ(node_def.op(), "ParallelBatchDataset");
ASSERT_EQ(node_def.attr().at("deterministic").s(), "true");
}
TEST_P(MakeDeterministicTest, NoRewritePrefetch) {
using test::function::NDef;
typedef FunctionDefHelper FDH;
GrapplerItem item;
bool nest, deterministic;
std::tie(nest, deterministic) = GetParam();
std::string func_name = nest ? "OuterRandomUniform" : "RandomUniformFn";
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
NDef("buffer_size", "Const", {},
{{"value", Tensor(int64_t{1})}, {"dtype", DT_INT64}}),
graph_tests_utils::MakeParallelMapV2Node(
"map", "range", "num_parallel_calls", func_name,
deterministic ? "true" : "false"),
graph_tests_utils::MakePrefetchNode("prefetch", "map", "buffer_size")},
{test::function::RandomUniform(), OuterRandomUniform()});
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithName("prefetch", output);
ASSERT_GE(index, 0);
NodeDef node_def = output.node(index);
ASSERT_EQ(node_def.op(), "PrefetchDataset");
ASSERT_EQ(node_def.input_size(), 2);
ASSERT_THAT(node_def.input(0), ::testing::EndsWith("map"));
ASSERT_EQ(node_def.input(1), "buffer_size");
NodeDef buffer_size =
output.node(graph_utils::FindGraphNodeWithName("buffer_size", output));
EXPECT_EQ(buffer_size.attr().at("value").tensor().int64_val(0), 1);
}
TEST_P(MakeDeterministicTest, RewriteInterleave) {
using test::function::NDef;
typedef FunctionDefHelper FDH;
GrapplerItem item;
bool nest, deterministic;
std::tie(nest, deterministic) = GetParam();
std::string func_name = nest ? "OuterRandomUniform" : "RandomUniformFn";
NodeDef interleave_node_def = graph_tests_utils::MakeParallelInterleaveV2Node(
"interleave", "range", "cycle_length", "block_length",
"num_parallel_calls", func_name, !deterministic);
interleave_node_def.add_input("^start");
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("cycle_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("block_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 2}, {"dtype", DT_INT32}}),
interleave_node_def},
{test::function::RandomUniform(), OuterRandomUniform()});
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithOp("InterleaveDataset", output);
ASSERT_GE(index, 0);
NodeDef node_def = output.node(index);
ASSERT_EQ(node_def.input_size(), 5);
ASSERT_EQ(node_def.input(0), "range");
ASSERT_EQ(node_def.input(1), "cycle_length");
ASSERT_EQ(node_def.input(2), "block_length");
ASSERT_EQ(node_def.input(3), "^num_parallel_calls");
ASSERT_EQ(node_def.input(4), "^start");
}
enum CannotSplitReason { FUNC_HAS_ATTR, ASYNC_NONDETERMINISM };
class RewriteMapWithoutSplitTest
: public ::testing::TestWithParam<
std::tuple<bool, bool, CannotSplitReason>> {};
TEST_P(RewriteMapWithoutSplitTest, RewriteMapWithoutSplit) {
using test::function::NDef;
typedef FunctionDefHelper FDH;
GrapplerItem item;
bool nest, deterministic;
CannotSplitReason reason;
std::tie(nest, deterministic, reason) = GetParam();
FunctionDef func;
FunctionDef outer_func;
if (reason == FUNC_HAS_ATTR) {
func = test::function::RandomUniform();
(*func.mutable_attr())["test_attr"].set_s("test_value");
outer_func = OuterRandomUniform();
(*outer_func.mutable_attr())["test_attr"].set_s("test_value");
} else {
func = test::function::ReadResourceVariable();
outer_func = OuterReadResourceVariable();
}
std::string func_name =
nest ? outer_func.signature().name() : func.signature().name();
NodeDef map_node_def = graph_tests_utils::MakeParallelMapV2Node(
"map", "range", "num_parallel_calls", func_name,
deterministic ? "true" : "false");
map_node_def.add_input("^start");
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 2}, {"dtype", DT_INT32}}),
map_node_def},
{func, outer_func});
VLOG(1) << "Orig graph: \n" << item.graph.DebugString() << "\n\n";
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithOp("MapDataset", output);
ASSERT_GE(index, 0);
NodeDef node_def = output.node(index);
ASSERT_EQ(node_def.input_size(), 3);
ASSERT_EQ(node_def.input(0), "range");
ASSERT_EQ(node_def.input(1), "^num_parallel_calls");
ASSERT_EQ(node_def.input(2), "^start");
NameAttrList f;
TF_ASSERT_OK(GetNodeAttr(node_def, "f", &f));
ASSERT_EQ(f.name(), func_name);
ASSERT_FALSE(graph_utils::ContainsNodeWithOp("ParallelMapDatasetV2", output));
}
TEST_P(MakeDeterministicTest, RewriteBatch) {
using test::function::NDef;
typedef FunctionDefHelper FDH;
GrapplerItem item;
bool nest, deterministic;
std::tie(nest, deterministic) = GetParam();
std::string func_name =
nest ? "OuterReadResourceVariable" : "ReadResourceVariable";
NodeDef batch_node_def = graph_tests_utils::MakeParallelBatchNode(
"batch", "map", "batch_size", "num_parallel_calls", "drop_remainder",
deterministic ? "true" : "false");
batch_node_def.add_input("^start");
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
graph_tests_utils::MakeMapNode("map", "range", func_name),
batch_node_def},
{test::function::ReadResourceVariable(), OuterReadResourceVariable()});
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithOp("BatchDatasetV2", output);
ASSERT_GE(index, 0);
NodeDef node_def = output.node(index);
ASSERT_EQ(node_def.input_size(), 5);
ASSERT_EQ(node_def.input(0), "map");
ASSERT_EQ(node_def.input(1), "batch_size");
ASSERT_EQ(node_def.input(2), "drop_remainder");
ASSERT_EQ(node_def.input(3), "^num_parallel_calls");
ASSERT_EQ(node_def.input(4), "^start");
ASSERT_EQ(node_def.attr().count("deterministic"), 0);
}
TEST_P(MakeDeterministicTest, RewritePrefetch) {
using test::function::NDef;
typedef FunctionDefHelper FDH;
GrapplerItem item;
bool nest, deterministic;
std::tie(nest, deterministic) = GetParam();
std::string func_name =
nest ? "OuterReadResourceVariable" : "ReadResourceVariable";
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
NDef("buffer_size", "Const", {},
{{"value", Tensor(int64_t{1})}, {"dtype", DT_INT64}}),
graph_tests_utils::MakeParallelMapV2Node(
"map", "range", "num_parallel_calls", func_name,
deterministic ? "true" : "false"),
graph_tests_utils::MakePrefetchNode("prefetch", "map", "buffer_size")},
{test::function::ReadResourceVariable(), OuterReadResourceVariable()});
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithName("prefetch", output);
ASSERT_GE(index, 0);
NodeDef node_def = output.node(index);
ASSERT_EQ(node_def.op(), "PrefetchDataset");
ASSERT_EQ(node_def.input_size(), 3);
ASSERT_THAT(node_def.input(0), ::testing::EndsWith("map"));
ASSERT_EQ(node_def.input(2), "^buffer_size");
NodeDef buffer_size = output.node(
graph_utils::FindGraphNodeWithName(node_def.input(1), output));
EXPECT_EQ(buffer_size.attr().at("value").tensor().int64_val(0), 0);
}
INSTANTIATE_TEST_SUITE_P(Test, MakeDeterministicTest,
::testing::Combine(::testing::Bool(),
::testing::Bool()));
INSTANTIATE_TEST_SUITE_P(
Test, RewriteMapWithoutSplitTest,
::testing::Combine(::testing::Bool(), ::testing::Bool(),
::testing::Values(FUNC_HAS_ATTR, ASYNC_NONDETERMINISM)));
TEST(NoRewriteMapAndBatchTest, NoRewriteMapAndBatch) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT64}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 2}, {"dtype", DT_INT64}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
graph_tests_utils::MakeMapAndBatchNode(
"map_and_batch", "range", "batch_size", "num_parallel_calls",
"drop_remainder", "XTimesTwo")},
{test::function::XTimesTwo()});
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithName("map_and_batch", output);
ASSERT_GE(index, 0);
NodeDef node_def = output.node(index);
ASSERT_EQ(node_def.input_size(), 4);
ASSERT_EQ(node_def.input(0), "range");
ASSERT_EQ(node_def.input(1), "batch_size");
ASSERT_EQ(node_def.input(2), "num_parallel_calls");
ASSERT_EQ(node_def.input(3), "drop_remainder");
}
class RewriteMapAndBatchWithoutSplitTest
: public ::testing::TestWithParam<std::tuple<bool, CannotSplitReason>> {};
TEST_P(RewriteMapAndBatchWithoutSplitTest, RewriteMapAndBatchWithoutSplit) {
using test::function::NDef;
GrapplerItem item;
bool nest;
CannotSplitReason reason;
std::tie(nest, reason) = GetParam();
FunctionDef func;
if (reason == FUNC_HAS_ATTR) {
func = test::function::RandomUniform();
(*func.mutable_attr())["test_attr"].set_s("test_value");
} else {
func = test::function::ReadResourceVariable();
}
NodeDef map_and_batch_node_def = graph_tests_utils::MakeMapAndBatchNode(
"map_and_batch", "range", "batch_size", "num_parallel_calls",
"drop_remainder", func.signature().name());
SetAttrValue(
absl::Span<const PartialTensorShape>{
{2}, {-1, 3, -1}, PartialTensorShape()},
&(*map_and_batch_node_def.mutable_attr())["output_shapes"]);
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT64}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
map_and_batch_node_def},
{func});
VLOG(1) << "Orig graph: \n" << item.graph.DebugString() << "\n\n";
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
ASSERT_FALSE(graph_utils::ContainsNodeWithOp("MapAndBatchDataset", output));
int index = graph_utils::FindGraphNodeWithOp("MapDataset", output);
ASSERT_GE(index, 0);
NodeDef map_node_def = output.node(index);
ASSERT_EQ(map_node_def.input_size(), 4);
ASSERT_EQ(map_node_def.input(0), "range");
ASSERT_EQ(map_node_def.input(1), "^batch_size");
ASSERT_EQ(map_node_def.input(2), "^num_parallel_calls");
ASSERT_EQ(map_node_def.input(3), "^drop_remainder");
ASSERT_TRUE(AreAttrValuesEqual(map_and_batch_node_def.attr().at("f"),
map_node_def.attr().at("f")));
ASSERT_TRUE(AreAttrValuesEqual(map_and_batch_node_def.attr().at("Targuments"),
map_node_def.attr().at("Targuments")));
ASSERT_TRUE(
AreAttrValuesEqual(map_and_batch_node_def.attr().at("output_types"),
map_node_def.attr().at("output_types")));
ASSERT_EQ(map_node_def.attr().at("output_shapes").list().shape_size(), 3);
ASSERT_TRUE(PartialTensorShape({}).IsIdenticalTo(
map_node_def.attr().at("output_shapes").list().shape(0)));
ASSERT_TRUE(PartialTensorShape({3, -1}).IsIdenticalTo(
map_node_def.attr().at("output_shapes").list().shape(1)));
ASSERT_TRUE(PartialTensorShape().IsIdenticalTo(
map_node_def.attr().at("output_shapes").list().shape(2)));
index = graph_utils::FindGraphNodeWithOp("BatchDatasetV2", output);
ASSERT_GE(index, 0);
NodeDef batch_node_def = output.node(index);
ASSERT_EQ(batch_node_def.input_size(), 3);
ASSERT_EQ(batch_node_def.input(0), map_node_def.name());
ASSERT_EQ(batch_node_def.input(1), "batch_size");
ASSERT_EQ(batch_node_def.input(2), "drop_remainder");
ASSERT_TRUE(
AreAttrValuesEqual(map_and_batch_node_def.attr().at("output_types"),
batch_node_def.attr().at("output_types")));
ASSERT_TRUE(
AreAttrValuesEqual(map_and_batch_node_def.attr().at("output_shapes"),
batch_node_def.attr().at("output_shapes")));
}
INSTANTIATE_TEST_SUITE_P(
Test, RewriteMapAndBatchWithoutSplitTest,
::testing::Combine(::testing::Bool(),
::testing::Values(FUNC_HAS_ATTR, ASYNC_NONDETERMINISM)));
}
}
} |
1,414 | cpp | tensorflow/tensorflow | shuffle_and_repeat_fusion | tensorflow/core/grappler/optimizers/data/shuffle_and_repeat_fusion.cc | tensorflow/core/grappler/optimizers/data/shuffle_and_repeat_fusion_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_SHUFFLE_AND_REPEAT_FUSION_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_SHUFFLE_AND_REPEAT_FUSION_H_
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
namespace tensorflow {
namespace grappler {
class ShuffleAndRepeatFusion : public TFDataOptimizerBase {
public:
ShuffleAndRepeatFusion() = default;
~ShuffleAndRepeatFusion() override = default;
string name() const override { return "shuffle_and_repeat_fusion"; };
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
Status OptimizeAndCollectStats(Cluster* cluster, const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) override;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/shuffle_and_repeat_fusion.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/strcat.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kShuffleDataset[] = "ShuffleDataset";
constexpr char kShuffleDatasetV2[] = "ShuffleDatasetV2";
constexpr char kShuffleDatasetV3[] = "ShuffleDatasetV3";
constexpr char kRepeatDataset[] = "RepeatDataset";
constexpr char kShuffleAndRepeatDataset[] = "ShuffleAndRepeatDataset";
constexpr char kShuffleAndRepeatDatasetV2[] = "ShuffleAndRepeatDatasetV2";
constexpr char kReshuffleEachIteration[] = "reshuffle_each_iteration";
Status FuseShuffleV1AndRepeat(const NodeDef& shuffle_node,
const NodeDef& repeat_node,
MutableGraphView* graph, GraphDef* output,
NodeDef* fused_node) {
fused_node->set_op(kShuffleAndRepeatDataset);
graph_utils::SetUniqueGraphNodeName(kShuffleAndRepeatDataset, output,
fused_node);
fused_node->add_input(shuffle_node.input(0));
fused_node->add_input(shuffle_node.input(1));
fused_node->add_input(shuffle_node.input(2));
fused_node->add_input(shuffle_node.input(3));
fused_node->add_input(repeat_node.input(1));
graph_utils::CopyShapesAndTypesAttrs(shuffle_node, fused_node);
graph_utils::CopyAttribute(kReshuffleEachIteration, shuffle_node, fused_node);
graph_utils::MaybeSetFusedMetadata(shuffle_node, repeat_node, fused_node);
return absl::OkStatus();
}
Status FuseShuffleV2AndRepeat(const NodeDef& shuffle_node,
const NodeDef& repeat_node,
MutableGraphView* graph, GraphDef* output,
NodeDef* fused_node) {
fused_node->set_op(kShuffleAndRepeatDatasetV2);
graph_utils::SetUniqueGraphNodeName(kShuffleAndRepeatDatasetV2, output,
fused_node);
NodeDef zero_node = *graph_utils::AddScalarConstNode<int64_t>(0, graph);
fused_node->add_input(shuffle_node.input(0));
fused_node->add_input(shuffle_node.input(1));
fused_node->add_input(zero_node.name());
fused_node->add_input(zero_node.name());
fused_node->add_input(repeat_node.input(1));
fused_node->add_input(shuffle_node.input(2));
graph_utils::CopyShapesAndTypesAttrs(shuffle_node, fused_node);
(*fused_node->mutable_attr())[kReshuffleEachIteration].set_b(true);
graph_utils::MaybeSetFusedMetadata(shuffle_node, repeat_node, fused_node);
return absl::OkStatus();
}
Status FuseShuffleV3AndRepeat(const NodeDef& shuffle_node,
const NodeDef& repeat_node,
MutableGraphView* graph, GraphDef* output,
NodeDef* fused_node) {
fused_node->set_op(kShuffleAndRepeatDatasetV2);
graph_utils::SetUniqueGraphNodeName(kShuffleAndRepeatDataset, output,
fused_node);
fused_node->add_input(shuffle_node.input(0));
fused_node->add_input(shuffle_node.input(1));
fused_node->add_input(shuffle_node.input(2));
fused_node->add_input(shuffle_node.input(3));
fused_node->add_input(repeat_node.input(1));
fused_node->add_input(shuffle_node.input(4));
graph_utils::CopyShapesAndTypesAttrs(shuffle_node, fused_node);
graph_utils::CopyAttribute(kReshuffleEachIteration, shuffle_node, fused_node);
graph_utils::MaybeSetFusedMetadata(shuffle_node, repeat_node, fused_node);
return absl::OkStatus();
}
}
Status ShuffleAndRepeatFusion::OptimizeAndCollectStats(
Cluster* cluster, const GrapplerItem& item, GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
MutableGraphView graph(output);
absl::flat_hash_set<string> nodes_to_delete;
for (const NodeDef& repeat_node : item.graph.node()) {
if (repeat_node.op() != kRepeatDataset) {
continue;
}
const NodeDef& shuffle_node =
*graph_utils::GetInputNode(repeat_node, graph);
NodeDef fused_node;
if (shuffle_node.op() == kShuffleDataset) {
TF_RETURN_IF_ERROR(FuseShuffleV1AndRepeat(shuffle_node, repeat_node,
&graph, output, &fused_node));
} else if (shuffle_node.op() == kShuffleDatasetV2) {
TF_RETURN_IF_ERROR(FuseShuffleV2AndRepeat(shuffle_node, repeat_node,
&graph, output, &fused_node));
} else if (shuffle_node.op() == kShuffleDatasetV3) {
TF_RETURN_IF_ERROR(FuseShuffleV3AndRepeat(shuffle_node, repeat_node,
&graph, output, &fused_node));
} else {
continue;
}
NodeDef& shuffle_and_repeat_node = *graph.AddNode(std::move(fused_node));
TF_RETURN_IF_ERROR(graph.UpdateFanouts(repeat_node.name(),
shuffle_and_repeat_node.name()));
TF_RETURN_IF_ERROR(graph.UpdateFanouts(shuffle_node.name(),
shuffle_and_repeat_node.name()));
const auto nodes_to_preserve = item.NodesToPreserve();
if (nodes_to_preserve.find(shuffle_node.name()) ==
nodes_to_preserve.end() &&
nodes_to_preserve.find(repeat_node.name()) == nodes_to_preserve.end()) {
nodes_to_delete.insert(shuffle_node.name());
nodes_to_delete.insert(repeat_node.name());
}
stats->num_changes++;
}
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(ShuffleAndRepeatFusion,
"shuffle_and_repeat_fusion");
}
} | #include "tensorflow/core/grappler/optimizers/data/shuffle_and_repeat_fusion.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kOutputShapes[] = "output_shapes";
constexpr char kOutputTypes[] = "output_types";
constexpr char kReshuffleEachIteration[] = "reshuffle_each_iteration";
TEST(ShuffleAndRepeatFusionTest, FuseShuffleV1AndRepeat) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
std::vector<std::pair<string, AttrValue>> common_attrs(2);
AttrValue shapes_attr;
SetAttrValue(kOutputShapes, &shapes_attr);
common_attrs[0] = std::make_pair(kOutputShapes, shapes_attr);
AttrValue types_attr;
SetAttrValue(kOutputTypes, &types_attr);
common_attrs[1] = std::make_pair(kOutputTypes, types_attr);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs,
common_attrs, &graph);
NodeDef *buffer_size_node =
graph_utils::AddScalarConstNode<int64_t>(128, &graph);
NodeDef *seed_node = graph_utils::AddScalarConstNode<int64_t>(-1, &graph);
NodeDef *seed2_node = graph_utils::AddScalarConstNode<int64_t>(-1, &graph);
std::vector<string> shuffle_inputs(4);
shuffle_inputs[0] = range_node->name();
shuffle_inputs[1] = buffer_size_node->name();
shuffle_inputs[2] = seed_node->name();
shuffle_inputs[3] = seed2_node->name();
NodeDef *shuffle_node = graph_utils::AddNode(
"", "ShuffleDataset", shuffle_inputs, common_attrs, &graph);
(*shuffle_node->mutable_attr())[kReshuffleEachIteration].set_b(true);
NodeDef *count_node = graph_utils::AddScalarConstNode<int64_t>(-1, &graph);
std::vector<string> repeat_inputs(2);
repeat_inputs[0] = shuffle_node->name();
repeat_inputs[1] = count_node->name();
NodeDef *repeat_node = graph_utils::AddNode(
"", "RepeatDataset", repeat_inputs, common_attrs, &graph);
ShuffleAndRepeatFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(shuffle_node->name(), output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(repeat_node->name(), output));
EXPECT_TRUE(
graph_utils::ContainsNodeWithOp("ShuffleAndRepeatDataset", output));
NodeDef shuffle_and_repeat_node = output.node(
graph_utils::FindGraphNodeWithOp("ShuffleAndRepeatDataset", output));
EXPECT_EQ(shuffle_and_repeat_node.input_size(), 5);
EXPECT_EQ(shuffle_and_repeat_node.input(0), shuffle_node->input(0));
EXPECT_EQ(shuffle_and_repeat_node.input(1), shuffle_node->input(1));
EXPECT_EQ(shuffle_and_repeat_node.input(2), shuffle_node->input(2));
EXPECT_EQ(shuffle_and_repeat_node.input(3), shuffle_node->input(3));
EXPECT_EQ(shuffle_and_repeat_node.input(4), repeat_node->input(1));
for (const auto &attr :
{kOutputShapes, kOutputTypes, kReshuffleEachIteration}) {
EXPECT_TRUE(AreAttrValuesEqual(shuffle_and_repeat_node.attr().at(attr),
shuffle_node->attr().at(attr)));
}
}
TEST(ShuffleAndRepeatFusionTest, FuseShuffleV2AndRepeat) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
std::vector<std::pair<string, AttrValue>> common_attrs(2);
AttrValue shapes_attr;
SetAttrValue(kOutputShapes, &shapes_attr);
common_attrs[0] = std::make_pair(kOutputShapes, shapes_attr);
AttrValue types_attr;
SetAttrValue(kOutputTypes, &types_attr);
common_attrs[1] = std::make_pair(kOutputTypes, types_attr);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs,
common_attrs, &graph);
NodeDef *buffer_size_node =
graph_utils::AddScalarConstNode<int64_t>(128, &graph);
NodeDef *seed_generator_node =
graph_utils::AddScalarConstNode<StringPiece>("dummy_resource", &graph);
std::vector<string> shuffle_inputs(3);
shuffle_inputs[0] = range_node->name();
shuffle_inputs[1] = buffer_size_node->name();
shuffle_inputs[2] = seed_generator_node->name();
NodeDef *shuffle_node = graph_utils::AddNode(
"", "ShuffleDatasetV2", shuffle_inputs, common_attrs, &graph);
NodeDef *count_node = graph_utils::AddScalarConstNode<int64_t>(-1, &graph);
std::vector<string> repeat_inputs(2);
repeat_inputs[0] = shuffle_node->name();
repeat_inputs[1] = count_node->name();
NodeDef *repeat_node = graph_utils::AddNode(
"", "RepeatDataset", repeat_inputs, common_attrs, &graph);
ShuffleAndRepeatFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(shuffle_node->name(), output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(repeat_node->name(), output));
EXPECT_TRUE(
graph_utils::ContainsNodeWithOp("ShuffleAndRepeatDatasetV2", output));
NodeDef shuffle_and_repeat_node = output.node(
graph_utils::FindGraphNodeWithOp("ShuffleAndRepeatDatasetV2", output));
EXPECT_EQ(shuffle_and_repeat_node.input_size(), 6);
EXPECT_EQ(shuffle_and_repeat_node.input(0), shuffle_node->input(0));
EXPECT_EQ(shuffle_and_repeat_node.input(1), shuffle_node->input(1));
EXPECT_EQ(shuffle_and_repeat_node.input(4), repeat_node->input(1));
EXPECT_EQ(shuffle_and_repeat_node.input(5), shuffle_node->input(2));
for (const auto &attr : {kOutputShapes, kOutputTypes}) {
EXPECT_TRUE(AreAttrValuesEqual(shuffle_and_repeat_node.attr().at(attr),
shuffle_node->attr().at(attr)));
}
EXPECT_TRUE(shuffle_and_repeat_node.attr().at(kReshuffleEachIteration).b());
}
TEST(ShuffleAndRepeatFusionTest, FuseShuffleV3AndRepeat) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
std::vector<std::pair<string, AttrValue>> common_attrs(2);
AttrValue shapes_attr;
SetAttrValue(kOutputShapes, &shapes_attr);
common_attrs[0] = std::make_pair(kOutputShapes, shapes_attr);
AttrValue types_attr;
SetAttrValue(kOutputTypes, &types_attr);
common_attrs[1] = std::make_pair(kOutputTypes, types_attr);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs,
common_attrs, &graph);
NodeDef *buffer_size_node =
graph_utils::AddScalarConstNode<int64_t>(128, &graph);
NodeDef *seed_node = graph_utils::AddScalarConstNode<int64_t>(-1, &graph);
NodeDef *seed2_node = graph_utils::AddScalarConstNode<int64_t>(-1, &graph);
NodeDef *seed_generator_node =
graph_utils::AddScalarConstNode<StringPiece>("dummy_resource", &graph);
std::vector<string> shuffle_inputs(5);
shuffle_inputs[0] = range_node->name();
shuffle_inputs[1] = buffer_size_node->name();
shuffle_inputs[2] = seed_node->name();
shuffle_inputs[3] = seed2_node->name();
shuffle_inputs[4] = seed_generator_node->name();
NodeDef *shuffle_node = graph_utils::AddNode(
"", "ShuffleDatasetV3", shuffle_inputs, common_attrs, &graph);
(*shuffle_node->mutable_attr())[kReshuffleEachIteration].set_b(true);
NodeDef *count_node = graph_utils::AddScalarConstNode<int64_t>(-1, &graph);
std::vector<string> repeat_inputs(2);
repeat_inputs[0] = shuffle_node->name();
repeat_inputs[1] = count_node->name();
NodeDef *repeat_node = graph_utils::AddNode(
"", "RepeatDataset", repeat_inputs, common_attrs, &graph);
ShuffleAndRepeatFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(shuffle_node->name(), output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(repeat_node->name(), output));
EXPECT_TRUE(
graph_utils::ContainsNodeWithOp("ShuffleAndRepeatDatasetV2", output));
NodeDef shuffle_and_repeat_node = output.node(
graph_utils::FindGraphNodeWithOp("ShuffleAndRepeatDatasetV2", output));
EXPECT_EQ(shuffle_and_repeat_node.input_size(), 6);
EXPECT_EQ(shuffle_and_repeat_node.input(0), shuffle_node->input(0));
EXPECT_EQ(shuffle_and_repeat_node.input(1), shuffle_node->input(1));
EXPECT_EQ(shuffle_and_repeat_node.input(2), shuffle_node->input(2));
EXPECT_EQ(shuffle_and_repeat_node.input(3), shuffle_node->input(3));
EXPECT_EQ(shuffle_and_repeat_node.input(4), repeat_node->input(1));
EXPECT_EQ(shuffle_and_repeat_node.input(5), shuffle_node->input(4));
for (const auto &attr :
{kOutputShapes, kOutputTypes, kReshuffleEachIteration}) {
EXPECT_TRUE(AreAttrValuesEqual(shuffle_and_repeat_node.attr().at(attr),
shuffle_node->attr().at(attr)));
}
}
TEST(ShuffleAndRepeatFusionTest, NoChange) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
std::vector<std::pair<string, AttrValue>> common_attrs(2);
AttrValue shapes_attr;
SetAttrValue(kOutputShapes, &shapes_attr);
common_attrs[0] = std::make_pair(kOutputShapes, shapes_attr);
AttrValue types_attr;
SetAttrValue(kOutputTypes, &types_attr);
common_attrs[1] = std::make_pair(kOutputTypes, types_attr);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs,
common_attrs, &graph);
NodeDef *count_node = graph_utils::AddScalarConstNode<int64_t>(-1, &graph);
std::vector<string> repeat_inputs(2);
repeat_inputs[0] = range_node->name();
repeat_inputs[1] = count_node->name();
graph_utils::AddNode("", "RepeatDataset", repeat_inputs, common_attrs,
&graph);
ShuffleAndRepeatFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::Compare(*graph.graph(), output));
}
}
}
} |
1,415 | cpp | tensorflow/tensorflow | enable_gradient_descent | tensorflow/core/grappler/optimizers/data/enable_gradient_descent.cc | tensorflow/core/grappler/optimizers/data/enable_gradient_descent_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_ENABLE_GRADIENT_DESCENT_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_ENABLE_GRADIENT_DESCENT_H_
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
namespace tensorflow {
namespace grappler {
constexpr char kAutotune[] = "autotune";
class EnableGradientDescent : public TFDataOptimizerBase {
public:
EnableGradientDescent() = default;
~EnableGradientDescent() override = default;
string name() const override { return "enable_gradient_descent"; };
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
if (!config) return absl::OkStatus();
const string& autotune = config->parameter_map().at(kAutotune).s();
if (autotune == "true") {
autotune_ = true;
} else if (autotune == "false") {
autotune_ = false;
} else {
return errors::InvalidArgument("Received an invalid value for parameter ",
kAutotune, ": ", autotune);
}
return absl::OkStatus();
}
Status OptimizeAndCollectStats(Cluster* cluster, const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) override;
private:
bool autotune_ = true;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/enable_gradient_descent.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kAlgorithm[] = "algorithm";
constexpr char kModelDataset[] = "ModelDataset";
constexpr int64_t HILL_CLIMB = 0;
constexpr int64_t GRADIENT_DESCENT = 1;
}
Status EnableGradientDescent::OptimizeAndCollectStats(
Cluster* cluster, const GrapplerItem& item, GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
if (!autotune_) {
VLOG(1) << "The optimization enable_gradient_descent is not applied if "
"autotune is off.";
return absl::OkStatus();
}
MutableGraphView graph(output);
if (graph_utils::IsItemDerivedFromFunctionDef(item, graph))
return absl::OkStatus();
int index = graph_utils::FindGraphNodeWithOp(kModelDataset, *output);
NodeDef& model_node = *(output->mutable_node(index));
if (model_node.attr().at(kAlgorithm).i() == HILL_CLIMB) {
(*model_node.mutable_attr())[kAlgorithm].set_i(GRADIENT_DESCENT);
stats->num_changes++;
}
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(EnableGradientDescent, "enable_gradient_descent");
}
} | #include "tensorflow/core/grappler/optimizers/data/enable_gradient_descent.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
Status OptimizeWithEnableGradientDescent(const GrapplerItem &item,
GraphDef *output, bool autotune) {
EnableGradientDescent optimizer;
RewriterConfig_CustomGraphOptimizer config;
if (autotune) {
(*config.mutable_parameter_map())["autotune"].set_s("true");
} else {
(*config.mutable_parameter_map())["autotune"].set_s("false");
}
TF_RETURN_IF_ERROR(optimizer.Init(&config));
return optimizer.Optimize(nullptr, item, output);
}
class SimpleRewrite
: public ::testing::TestWithParam<std::tuple<bool, int64_t, string>> {};
TEST_P(SimpleRewrite, EnableGradientDescentTest) {
const bool autotune = std::get<0>(GetParam());
const int64_t algorithm_index = std::get<1>(GetParam());
const string op = std::get<2>(GetParam());
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 5}, {"dtype", DT_INT32}}),
NDef("batch", "BatchDataset", {"range", "batch_size"}, {}),
NDef("model", "ModelDataset", {"batch"},
{{"algorithm", algorithm_index}}),
NDef("Sink", op, {"model"}, {})});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithEnableGradientDescent(item, &output, autotune));
EXPECT_EQ(item.graph.node().size(), output.node().size());
NodeDef model_node =
output.node(graph_utils::FindGraphNodeWithName("model", output));
EXPECT_EQ(model_node.attr().at("algorithm").i(),
(autotune && op != "_Retval") ? 1 : algorithm_index);
}
INSTANTIATE_TEST_SUITE_P(
Test, SimpleRewrite,
::testing::Combine(::testing::Values(false, true), ::testing::Values(0, 1),
::testing::Values("Identity", "_Retval")));
}
}
} |
1,416 | cpp | tensorflow/tensorflow | batch_parallelization | tensorflow/core/grappler/optimizers/data/batch_parallelization.cc | tensorflow/core/grappler/optimizers/data/batch_parallelization_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_BATCH_PARALLELIZATION_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_BATCH_PARALLELIZATION_H_
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
namespace tensorflow {
namespace grappler {
constexpr char kAutotune[] = "autotune";
class BatchParallelization : public TFDataOptimizerBase {
public:
BatchParallelization() = default;
~BatchParallelization() override = default;
string name() const override { return "batch_parallelization"; };
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
if (!config) return absl::OkStatus();
const string& autotune = config->parameter_map().at(kAutotune).s();
if (autotune == "true") {
autotune_ = true;
} else if (autotune == "false") {
autotune_ = false;
} else {
return errors::InvalidArgument("Received an invalid value for parameter ",
kAutotune, ": ", autotune);
}
return absl::OkStatus();
}
Status OptimizeAndCollectStats(Cluster* cluster, const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) override;
private:
bool autotune_ = true;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/batch_parallelization.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kBatchDataset[] = "BatchDatasetV2";
constexpr char kParallelBatchDataset[] = "ParallelBatchDataset";
NodeDef MakeParallelBatch(const string& name, MutableGraphView* graph) {
int index = graph_utils::FindGraphNodeWithName(name, *graph->graph());
DCHECK_NE(index, -1) << "Failed to find node " << name
<< " in the optimized graph.";
NodeDef parallel_batch = graph->graph()->node(index);
graph_utils::SetUniqueGraphNodeName(kParallelBatchDataset, graph->graph(),
¶llel_batch);
parallel_batch.set_op(kParallelBatchDataset);
auto* num_parallel_calls =
graph_utils::AddScalarConstNode(data::model::kAutotune, graph);
string drop_remainder_name = parallel_batch.input(2);
parallel_batch.set_input(2, num_parallel_calls->name());
parallel_batch.add_input(drop_remainder_name);
return parallel_batch;
}
}
Status BatchParallelization::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
if (!autotune_) {
VLOG(1) << "The optimization batch_parallelization is not applied if "
"autotune is off.";
return absl::OkStatus();
}
MutableGraphView graph(output);
if (graph_utils::IsItemDerivedFromFunctionDef(item, graph))
return absl::OkStatus();
absl::flat_hash_set<string> nodes_to_delete;
FunctionLibraryDefinition function_library(OpRegistry::Global(),
item.graph.library());
auto get_batch_node = [](const NodeDef& node) -> const NodeDef* {
if (node.op() == kBatchDataset) return &node;
return nullptr;
};
for (const NodeDef& node : item.graph.node()) {
const NodeDef* batch_node = get_batch_node(node);
if (!batch_node) continue;
auto* parallel_batch =
graph.AddNode(MakeParallelBatch(batch_node->name(), &graph));
TF_RETURN_IF_ERROR(
graph.UpdateFanouts(batch_node->name(), parallel_batch->name()));
nodes_to_delete.insert(batch_node->name());
stats->num_changes++;
}
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(BatchParallelization, "batch_parallelization");
}
} | #include "tensorflow/core/grappler/optimizers/data/batch_parallelization.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
Status OptimizeWithBatchParallelization(const GrapplerItem& item,
GraphDef* output, bool autotune) {
BatchParallelization optimizer;
RewriterConfig_CustomGraphOptimizer config;
if (autotune) {
(*config.mutable_parameter_map())["autotune"].set_s("true");
} else {
(*config.mutable_parameter_map())["autotune"].set_s("false");
}
TF_RETURN_IF_ERROR(optimizer.Init(&config));
return optimizer.Optimize(nullptr, item, output);
}
using graph_tests_utils::MakeBatchV2Node;
class AutotuneSetting : public ::testing::TestWithParam<bool> {};
TEST_P(AutotuneSetting, BatchParallelizationTest) {
const bool autotune = GetParam();
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
MakeBatchV2Node("batch", "range", "batch_size", "drop_remainder",
false),
NDef("Sink", "Identity", {"batch"}, {})},
{});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithBatchParallelization(item, &output, autotune));
EXPECT_EQ(graph_utils::ContainsNodeWithOp("ParallelBatchDataset", output),
autotune);
EXPECT_EQ(graph_utils::ContainsGraphNodeWithName("batch", output), !autotune);
}
INSTANTIATE_TEST_SUITE_P(Test, AutotuneSetting, ::testing::Values(false, true));
class FromFunctionDef : public ::testing::TestWithParam<string> {};
TEST_P(FromFunctionDef, BatchParallelizationTest) {
const string op = GetParam();
bool from_function_def = (op == "_Retval");
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
MakeBatchV2Node("batch", "range", "batch_size", "drop_remainder",
false),
NDef("Sink", op, {"batch"}, {})},
{});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithBatchParallelization(item, &output, true));
EXPECT_EQ(graph_utils::ContainsNodeWithOp("ParallelBatchDataset", output),
!from_function_def);
EXPECT_EQ(graph_utils::ContainsGraphNodeWithName("batch", output),
from_function_def);
}
INSTANTIATE_TEST_SUITE_P(Test, FromFunctionDef,
::testing::Values("Identity", "_Retval"));
class ValueRewrites : public ::testing::TestWithParam<bool> {};
TEST_P(ValueRewrites, BatchParallelizationTest) {
const bool parallel_copy = GetParam();
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
MakeBatchV2Node("batch", "range", "batch_size", "drop_remainder",
parallel_copy),
NDef("Sink", "Identity", {"batch"}, {})},
{});
item.fetch.push_back("Sink");
NodeDef batch =
item.graph.node(graph_utils::FindGraphNodeWithName("batch", item.graph));
EXPECT_TRUE(batch.attr().find("parallel_copy") != batch.attr().end());
GraphDef output;
TF_ASSERT_OK(OptimizeWithBatchParallelization(item, &output, true));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("ParallelBatchDataset", output));
NodeDef parallel_batch = output.node(
graph_utils::FindGraphNodeWithOp("ParallelBatchDataset", output));
EXPECT_EQ(parallel_batch.input_size(), 4);
EXPECT_EQ(parallel_batch.input(0), "range");
EXPECT_EQ(parallel_batch.input(1), "batch_size");
EXPECT_EQ(parallel_batch.input(3), "drop_remainder");
EXPECT_EQ(parallel_batch.attr().at("parallel_copy").b(), parallel_copy);
NodeDef parallelism_val = output.node(
graph_utils::FindGraphNodeWithName(parallel_batch.input(2), output));
EXPECT_EQ(parallelism_val.attr().at("value").tensor().int64_val(0), -1);
}
INSTANTIATE_TEST_SUITE_P(Test, ValueRewrites, ::testing::Values(false, true));
}
}
} |
1,417 | cpp | tensorflow/tensorflow | slack | tensorflow/core/grappler/optimizers/data/slack.cc | tensorflow/core/grappler/optimizers/data/slack_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_SLACK_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_SLACK_H_
#include "absl/strings/numbers.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
namespace tensorflow {
namespace grappler {
class Slack : public TFDataOptimizerBase {
public:
Slack() = default;
~Slack() override = default;
string name() const override { return "slack"; };
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
if (!config) return errors::InvalidArgument("Config parameter required.");
const string& slack_period_param =
config->parameter_map().at("slack_period").s();
if (!absl::SimpleAtoi(slack_period_param, &slack_period_)) {
return errors::InvalidArgument("Invalid `slack_period` parameter: ",
slack_period_param);
}
return absl::OkStatus();
}
Status OptimizeAndCollectStats(Cluster* cluster, const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) override;
private:
int64_t slack_period_ = -1;
Status RecursivelyHandleOp(const MutableGraphView& graph,
NodeDef* dataset_node);
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/slack.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kPrefetchDatasetOp[] = "PrefetchDataset";
template <std::size_t SIZE>
bool IsDatasetNodeOfType(const NodeDef& node,
const std::array<const char*, SIZE>& arr) {
for (const auto& dataset_op_name : arr) {
if (node.op() == dataset_op_name) return true;
}
return false;
}
constexpr std::array<const char*, 2> kMultipleInputsDatasetOps = {
"ZipDataset", "ConcatenateDataset"};
constexpr std::array<const char*, 22> kPassThroughOps = {
"CacheDataset",
"CacheDatasetV2",
"ExperimentalMaxIntraOpParallelismDataset",
"ExperimentalPrivateThreadPoolDataset",
"FilterDataset",
"Identity",
"MapDataset",
"MaxIntraOpParallelismDataset",
"ModelDataset",
"OptimizeDataset",
"ParallelMapDataset",
"PrivateThreadPoolDataset",
"ReduceDataset",
"RepeatDataset",
"ShardDataset",
"ShuffleAndRepeatDataset",
"ShuffleDataset",
"ShuffleDatasetV2",
"ShuffleDatasetV3",
"SkipDataset",
"TakeDataset",
"WindowDataset",
};
}
Status Slack::RecursivelyHandleOp(const MutableGraphView& graph,
NodeDef* dataset_node) {
if (dataset_node->op() == kPrefetchDatasetOp) {
if (HasNodeAttr(*dataset_node, "slack_period")) {
(*dataset_node->mutable_attr())["slack_period"].set_i(slack_period_);
} else {
AddNodeAttr("slack_period", slack_period_, dataset_node);
}
return absl::OkStatus();
}
if (IsDatasetNodeOfType(*dataset_node, kPassThroughOps)) {
NodeDef* input_node = graph_utils::GetInputNode(*dataset_node, graph, 0);
return RecursivelyHandleOp(graph, input_node);
}
if (IsDatasetNodeOfType(*dataset_node, kMultipleInputsDatasetOps)) {
for (int i = 0; i < dataset_node->input_size(); ++i) {
NodeDef* input_node = graph_utils::GetInputNode(*dataset_node, graph, i);
TF_RETURN_IF_ERROR(RecursivelyHandleOp(graph, input_node));
}
return absl::OkStatus();
}
LOG(WARNING) << "Could not find a final `prefetch` in the input pipeline to "
"which to introduce slack.";
return absl::OkStatus();
}
Status Slack::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
if (slack_period_ < 1)
return errors::InvalidArgument("Invalid `slack_period` parameter: ",
slack_period_);
*output = item.graph;
MutableGraphView graph(output);
if (graph_utils::IsItemDerivedFromFunctionDef(item, graph))
return absl::OkStatus();
if (item.fetch.size() != 1) {
return errors::InvalidArgument(
"Expected only one fetch node but there were ", item.fetch.size(), ": ",
absl::StrJoin(item.fetch, ", "));
}
NodeDef* dataset_node = graph.GetNode(item.fetch.at(0));
return RecursivelyHandleOp(graph, dataset_node);
}
REGISTER_GRAPH_OPTIMIZER_AS(Slack, "slack");
}
} | #include "tensorflow/core/grappler/optimizers/data/slack.h"
#include "absl/status/status.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils/functions.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
void SetupGrapplerItem(GrapplerItem *item) {
MutableGraphView graph(&item->graph);
std::vector<std::pair<string, AttrValue>> common_attrs(2);
AttrValue shapes_attr;
SetAttrValue(std::vector<TensorShape>({{}}), &shapes_attr);
common_attrs[0] = std::make_pair("output_shapes", shapes_attr);
AttrValue types_attr;
SetAttrValue(std::vector<DataType>({DT_INT64}), &types_attr);
common_attrs[1] = std::make_pair("output_types", types_attr);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
NodeDef *range_node = graph_utils::AddNode(
"RangeDataset", "RangeDataset", range_inputs, common_attrs, &graph);
NodeDef *buffer_size_node =
graph_utils::AddScalarConstNode<int64_t>(1, &graph);
NodeDef *prefetch_node = graph_utils::AddNode(
"PrefetchDataset", "PrefetchDataset",
{range_node->name(), buffer_size_node->name()}, common_attrs, &graph);
item->fetch.push_back(prefetch_node->name());
}
struct ParameterizedSlackTest
: ::testing::TestWithParam<std::tuple<string, int>> {};
TEST_P(ParameterizedSlackTest, BasicTest) {
GrapplerItem item;
SetupGrapplerItem(&item);
Slack optimizer;
tensorflow::RewriterConfig_CustomGraphOptimizer config;
(*config.mutable_parameter_map())["slack_period"].set_s(
std::get<0>(GetParam()));
TF_ASSERT_OK(optimizer.Init(&config));
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
ASSERT_TRUE(graph_utils::ContainsNodeWithOp("PrefetchDataset", output));
NodeDef optimized_prefetch_node =
output.node(graph_utils::FindGraphNodeWithOp("PrefetchDataset", output));
EXPECT_EQ(optimized_prefetch_node.attr().at("slack_period").i(),
std::get<1>(GetParam()));
}
INSTANTIATE_TEST_SUITE_P(DifferentSlackEveryValues, ParameterizedSlackTest,
::testing::Values(std::make_tuple("1", 1),
std::make_tuple("8", 8)));
TEST(SlackTest, TestFailWithoutInit) {
GrapplerItem item;
Slack optimizer;
GraphDef output;
Status result = optimizer.Optimize(nullptr, item, &output);
EXPECT_FALSE(result.ok());
EXPECT_TRUE(absl::IsInvalidArgument(result));
}
TEST(SlackTest, TestFailWithInvalidSlackEveryParam) {
GrapplerItem item;
SetupGrapplerItem(&item);
Slack optimizer;
tensorflow::RewriterConfig_CustomGraphOptimizer config;
(*config.mutable_parameter_map())["slack_period"].set_s("0");
TF_ASSERT_OK(optimizer.Init(&config));
GraphDef output;
Status result = optimizer.Optimize(nullptr, item, &output);
EXPECT_FALSE(result.ok());
EXPECT_TRUE(absl::IsInvalidArgument(result));
}
TEST(SlackTest, TestFunctionNotOptimized) {
GrapplerFunctionItem item;
FunctionDefLibrary lib_def;
FunctionDef *fdef = lib_def.add_function();
fdef->mutable_signature()->set_name("nested_function");
auto *input_arg = fdef->mutable_signature()->add_input_arg();
input_arg->set_name("args_0");
input_arg->set_type(DT_INT64);
auto *output_arg = fdef->mutable_signature()->add_output_arg();
output_arg->set_name("identity");
output_arg->set_type(DT_VARIANT);
fdef->mutable_signature()->set_is_stateful(true);
AttrValue shapes_attr;
SetAttrValue(std::vector<TensorShape>({{}}), &shapes_attr);
AttrValue types_attr;
SetAttrValue(std::vector<DataType>({DT_INT64}), &types_attr);
NodeDef *tensor_dataset_node =
function_utils::AddNode("TensorDataset", "TensorDataset", {"args_0"},
{std::make_pair("output_shapes", shapes_attr),
std::make_pair("Toutput_types", types_attr)},
fdef);
NodeDef *prefetch_node = function_utils::AddNode(
"PrefetchDataset", "PrefetchDataset",
{strings::StrCat(tensor_dataset_node->name(), ":handle:0"), "args_0"},
{std::make_pair("output_shapes", shapes_attr),
std::make_pair("output_types", types_attr)},
fdef);
AttrValue variant_type_attr;
SetAttrValue(DT_VARIANT, &variant_type_attr);
NodeDef *identity_node = function_utils::AddNode(
"Identity", "Identity",
{strings::StrCat(prefetch_node->name(), ":handle:0"),
strings::StrCat("^", tensor_dataset_node->name())},
{std::make_pair("T", variant_type_attr)}, fdef);
(*fdef->mutable_ret())["identity"] =
strings::StrCat(identity_node->name(), ":output:0");
(*fdef->mutable_control_ret())[tensor_dataset_node->name()] =
tensor_dataset_node->name();
fdef->mutable_signature()->add_control_output(tensor_dataset_node->name());
FunctionLibraryDefinition flib(OpRegistry::Global(), lib_def);
TF_ASSERT_OK(
MakeGrapplerFunctionItem(*fdef, flib, 27, &item));
GraphDef output;
Slack optimizer;
tensorflow::RewriterConfig_CustomGraphOptimizer config;
(*config.mutable_parameter_map())["slack_period"].set_s("8");
TF_ASSERT_OK(optimizer.Init(&config));
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
ASSERT_TRUE(graph_utils::ContainsNodeWithOp("PrefetchDataset", output));
NodeDef optimized_prefetch_node =
output.node(graph_utils::FindGraphNodeWithOp("PrefetchDataset", output));
EXPECT_EQ(optimized_prefetch_node.attr().at("slack_period").i(), 0);
}
}
}
} |
1,418 | cpp | tensorflow/tensorflow | map_fusion | tensorflow/core/grappler/optimizers/data/map_fusion.cc | tensorflow/core/grappler/optimizers/data/map_fusion_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_MAP_FUSION_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_MAP_FUSION_H_
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
namespace tensorflow {
namespace grappler {
constexpr char kAutotune[] = "autotune";
class MapFusion : public TFDataOptimizerBase {
public:
MapFusion() = default;
~MapFusion() override = default;
string name() const override { return "map_fusion"; };
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
if (!config) return absl::OkStatus();
const string& autotune = config->parameter_map().at(kAutotune).s();
if (autotune == "true") {
autotune_ = true;
} else if (autotune == "false") {
autotune_ = false;
} else {
return errors::InvalidArgument("Received an invalid value for parameter ",
kAutotune, ": ", autotune);
}
return absl::OkStatus();
}
Status OptimizeAndCollectStats(Cluster* cluster, const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) override;
private:
bool autotune_ = true;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/map_fusion.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/fusion_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kMapDatasetOp[] = "MapDataset";
constexpr char kParallelMapDatasetOp[] = "ParallelMapDatasetV2";
constexpr char kDeterministicAttr[] = "deterministic";
constexpr char kConstOp[] = "Const";
constexpr char kValueAttr[] = "value";
constexpr int kAutotuneValue = -1;
bool IsAutotuneNode(const string& node_name, const MutableGraphView& graph) {
const NodeDef* node = graph.GetNode(node_name);
if (!node) return false;
if (node->op() != kConstOp) return false;
const auto* value = gtl::FindOrNull(node->attr(), kValueAttr);
if (!value) return false;
if (value->has_tensor()) {
if (value->tensor().int64_val_size()) {
return value->tensor().int64_val(0) == kAutotuneValue;
}
}
return false;
}
bool SameDeterministicAttr(const NodeDef& parallel_map_node,
const NodeDef& parent_parallel_map_node) {
const auto* first_deterministic_attr =
gtl::FindOrNull(parallel_map_node.attr(), kDeterministicAttr);
const auto* second_deterministic_attr =
gtl::FindOrNull(parent_parallel_map_node.attr(), kDeterministicAttr);
const bool first_deterministic_val =
(first_deterministic_attr == nullptr) ||
(first_deterministic_attr->s() == "true" ||
first_deterministic_attr->s() == "default");
const bool second_deterministic_val =
(second_deterministic_attr == nullptr) ||
(second_deterministic_attr->s() == "true" ||
second_deterministic_attr->s() == "default");
return first_deterministic_val == second_deterministic_val;
}
string GetFusedName(const NodeDef& parent, const NodeDef& child) {
return absl::StrCat("map_fusion_nodes/", parent.name(), "/", child.name());
}
string GetFusedName(const FunctionDef& parent, const FunctionDef& child) {
return absl::StrCat("map_fusion_funcs/", parent.signature().name(), "/",
child.signature().name());
}
NodeDef MakeFusedNode(const NodeDef& parent_map_node, const NodeDef& map_node,
const FunctionDef& fused_function,
MutableGraphView* graph) {
NodeDef fused_node;
graph_utils::SetUniqueGraphNodeName(GetFusedName(parent_map_node, map_node),
graph->graph(), &fused_node);
if (map_node.op() == kMapDatasetOp) {
fused_node.set_op(kMapDatasetOp);
fused_node.add_input(parent_map_node.input(0));
} else if (map_node.op() == kParallelMapDatasetOp) {
fused_node.set_op(kParallelMapDatasetOp);
fused_node.add_input(parent_map_node.input(0));
fused_node.add_input(parent_map_node.input(1));
}
auto attr = parent_map_node.attr().at("f");
*attr.mutable_func()->mutable_name() = fused_function.signature().name();
(*fused_node.mutable_attr())["f"] = std::move(attr);
graph_utils::CopyAttribute("Targuments", parent_map_node, &fused_node);
graph_utils::CopyShapesAndTypesAttrs(map_node, &fused_node);
auto value_or_false = [](const AttrValue* attr) {
if (!attr) return false;
return attr->b();
};
const auto* first_parallelism =
gtl::FindOrNull(parent_map_node.attr(), "use_inter_op_parallelism");
const auto* second_parallelism =
gtl::FindOrNull(map_node.attr(), "use_inter_op_parallelism");
(*fused_node.mutable_attr())["use_inter_op_parallelism"].set_b(
value_or_false(first_parallelism) || value_or_false(second_parallelism));
const auto* first_cardinality =
gtl::FindOrNull(parent_map_node.attr(), "preserve_cardinality");
const auto* second_cardinality =
gtl::FindOrNull(map_node.attr(), "preserve_cardinality");
(*fused_node.mutable_attr())["preserve_cardinality"].set_b(
value_or_false(first_cardinality) && value_or_false(second_cardinality));
graph_utils::MaybeSetFusedMetadata(parent_map_node, map_node, &fused_node);
if (map_node.op() == kParallelMapDatasetOp) {
graph_utils::CopyAttribute(kDeterministicAttr, map_node, &fused_node);
}
return fused_node;
}
}
Status MapFusion::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
GraphDef sorted_old_graph = item.graph;
TF_RETURN_IF_ERROR(TopologicalSort(&sorted_old_graph));
*output = sorted_old_graph;
if (!autotune_) {
VLOG(1) << "The optimization map_fusion is not applied if "
"autotune is off.";
return absl::OkStatus();
}
MutableGraphView graph(output);
absl::flat_hash_set<string> nodes_to_delete;
FunctionLibraryDefinition function_library(OpRegistry::Global(),
item.graph.library());
auto get_map_node = [&graph](const NodeDef& node) -> const NodeDef* {
if (node.op() == kMapDatasetOp && node.input_size() == 1) return &node;
if (node.op() == kParallelMapDatasetOp) {
if (node.input_size() != 2) return nullptr;
if (!IsAutotuneNode(node.input(1), graph)) return nullptr;
return &node;
}
return nullptr;
};
auto make_fused_function = [&function_library, &output](
const NodeDef* parent_map_node,
const NodeDef* map_node) -> FunctionDef* {
const auto& parent_fun = parent_map_node->attr().at("f");
const FunctionDef* parent_func =
function_library.Find(parent_fun.func().name());
const auto& fun = map_node->attr().at("f");
const FunctionDef* func = function_library.Find(fun.func().name());
if (!fusion_utils::CanCompose(parent_func->signature(),
func->signature())) {
VLOG(1) << "Can't fuse two maps because the output signature of the "
"first map function does not match the input signature of the "
"second function\n";
return nullptr;
}
return fusion_utils::FuseFunctions(
*parent_func, *func, GetFusedName(*parent_func, *func),
fusion_utils::ComposeSignature, fusion_utils::ComposeInput,
fusion_utils::ComposeOutput, fusion_utils::MergeNodes,
output->mutable_library());
};
for (const NodeDef& node : sorted_old_graph.node()) {
const NodeDef* map_node = get_map_node(node);
if (!map_node) continue;
const NodeDef* parent_map_node =
get_map_node(*graph_utils::GetInputNode(*map_node, graph));
if (!parent_map_node) continue;
if (parent_map_node->op() != map_node->op()) continue;
if (map_node->op() == kParallelMapDatasetOp) {
if (!SameDeterministicAttr(*parent_map_node, *map_node)) continue;
}
const auto* fused_function = make_fused_function(parent_map_node, map_node);
if (fused_function == nullptr) continue;
const auto* fused_maps_node = graph.AddNode(
MakeFusedNode(*parent_map_node, *map_node, *fused_function, &graph));
TF_RETURN_IF_ERROR(
graph.UpdateFanouts(map_node->name(), fused_maps_node->name()));
TF_RETURN_IF_ERROR(function_library.AddFunctionDef(*fused_function));
nodes_to_delete.insert(parent_map_node->name());
nodes_to_delete.insert(map_node->name());
stats->num_changes++;
}
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(MapFusion, "map_fusion");
}
} | #include "tensorflow/core/grappler/optimizers/data/map_fusion.h"
#include <functional>
#include <memory>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace grappler {
namespace {
using graph_tests_utils::MakeMapNode;
using graph_tests_utils::MakeParallelMapV2Node;
constexpr char kConstOpName[] = "Const";
NodeDef CreateScalarConstNodeHelper(
const std::string& node_name, DataType dtype,
const std::function<void(TensorProto*)>& add_value) {
NodeDef node;
node.set_op(kConstOpName);
node.set_name(node_name);
(*node.mutable_attr())["dtype"].set_type(dtype);
auto tensor = std::make_unique<tensorflow::TensorProto>();
auto tensor_shape = std::make_unique<tensorflow::TensorShapeProto>();
tensor->set_allocated_tensor_shape(tensor_shape.release());
tensor->set_dtype(dtype);
add_value(tensor.get());
(*node.mutable_attr())["value"].set_allocated_tensor(tensor.release());
return node;
}
Status OptimizeWithMapFusion(const GrapplerItem& item, GraphDef* output,
bool autotune) {
MapFusion optimizer;
RewriterConfig_CustomGraphOptimizer config;
if (autotune) {
(*config.mutable_parameter_map())["autotune"].set_s("true");
} else {
(*config.mutable_parameter_map())["autotune"].set_s("false");
}
TF_RETURN_IF_ERROR(optimizer.Init(&config));
return optimizer.Optimize(nullptr, item, output);
}
class AutotuneSetting : public ::testing::TestWithParam<bool> {};
TEST_P(AutotuneSetting, MapFusionTest) {
const bool autotune = GetParam();
using test::function::NDef;
GrapplerItem item;
NodeDef num_parallel_calls_node = CreateScalarConstNodeHelper(
"num_parallel_calls", DT_INT64,
[](TensorProto* proto) { proto->add_int64_val(-1); });
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
num_parallel_calls_node,
MakeParallelMapV2Node("map1", "range", num_parallel_calls_node.name(),
"XTimesTwo", "default"),
MakeParallelMapV2Node("map2", "map1", num_parallel_calls_node.name(),
"XTimesTwo", "default")},
{
test::function::XTimesTwo(),
});
MapFusion optimizer;
GraphDef output;
TF_ASSERT_OK(OptimizeWithMapFusion(item, &output, autotune));
if (autotune) {
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("map1", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("map2", output));
} else {
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("map1", output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("map2", output));
}
}
INSTANTIATE_TEST_SUITE_P(Test, AutotuneSetting, ::testing::Values(false, true));
TEST(MapFusionTest, FuseTwoMapNodesIntoOne) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeMapNode("map1", "range"), MakeMapNode("map2", "map1")},
{
test::function::XTimesTwo(),
});
MapFusion optimizer;
GraphDef output;
TF_ASSERT_OK(OptimizeWithMapFusion(item, &output, true));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("MapDataset", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("map1", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("map2", output));
}
TEST(MapFusionTest, FuseThreeNodesIntoOne) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("filename", "Const", {}, {{"value", ""}, {"dtype", DT_STRING}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeMapNode("map1", "range"), MakeMapNode("map2", "map1"),
MakeMapNode("map3", "map2"),
NDef("cache", "CacheDataset", {"map3", "filename"}, {})},
{
test::function::XTimesTwo(),
});
MapFusion optimizer;
GraphDef output;
TF_ASSERT_OK(OptimizeWithMapFusion(item, &output, true));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("MapDataset", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("map1", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("map2", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("map3", output));
}
TEST(MapFusionTest, FuseTwoParallelMapNodesIntoOne) {
using test::function::NDef;
GrapplerItem item;
NodeDef num_parallel_calls_node = CreateScalarConstNodeHelper(
"num_parallel_calls", DT_INT64,
[](TensorProto* proto) { proto->add_int64_val(-1); });
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
num_parallel_calls_node,
MakeParallelMapV2Node("map1", "range", num_parallel_calls_node.name(),
"XTimesTwo", "default"),
MakeParallelMapV2Node("map2", "map1", num_parallel_calls_node.name(),
"XTimesTwo", "default")},
{
test::function::XTimesTwo(),
});
MapFusion optimizer;
GraphDef output;
TF_ASSERT_OK(OptimizeWithMapFusion(item, &output, true));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("ParallelMapDatasetV2", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("map1", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("map2", output));
}
TEST(MapFusionTest, FusedNodesAndFunctionsAreNamedAfterOldNodesAndFunctions) {
using test::function::NDef;
NodeDef num_parallel_calls_node = CreateScalarConstNodeHelper(
"num_parallel_calls", DT_INT64,
[](TensorProto* proto) { proto->add_int64_val(-1); });
auto graph = [&num_parallel_calls_node](
const std::string& parent_map_node_name,
const std::string& map_node_name,
const std::string& parent_function_name,
const std::string& function_name) {
FunctionDef parent_fn = test::function::XTimesTwo();
FunctionDef fn = test::function::XTimesTwo();
parent_fn.mutable_signature()->set_name(parent_function_name);
fn.mutable_signature()->set_name(function_name);
return test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
num_parallel_calls_node,
MakeParallelMapV2Node(parent_map_node_name, "range",
num_parallel_calls_node.name(),
parent_function_name, "default"),
MakeParallelMapV2Node(map_node_name, parent_map_node_name,
num_parallel_calls_node.name(), function_name,
"default")},
{parent_fn, fn});
};
GrapplerItem item_1;
item_1.graph = graph("map1", "map2", "fnA", "fnB");
GraphDef output_1;
TF_ASSERT_OK(OptimizeWithMapFusion(item_1, &output_1, true));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName(
"map_fusion_nodes/map1/map2", output_1));
EXPECT_TRUE(graph_utils::ContainsGraphFunctionWithName(
"map_fusion_funcs/fnA/fnB", output_1.library()));
GrapplerItem item_2;
item_2.graph = graph("map3", "map4", "fnC", "fnD");
GraphDef output_2;
TF_ASSERT_OK(OptimizeWithMapFusion(item_2, &output_2, true));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName(
"map_fusion_nodes/map3/map4", output_2));
EXPECT_TRUE(graph_utils::ContainsGraphFunctionWithName(
"map_fusion_funcs/fnC/fnD", output_2.library()));
}
}
}
} |
1,419 | cpp | tensorflow/tensorflow | inject_io_prefetch | tensorflow/core/grappler/optimizers/data/inject_io_prefetch.cc | tensorflow/core/grappler/optimizers/data/inject_io_prefetch_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_INJECT_IO_PREFETCH_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_INJECT_IO_PREFETCH_H_
#include <string>
#include "absl/status/status.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
#include "tensorflow/core/grappler/optimizers/graph_optimizer.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace grappler {
class InjectIoPrefetch : public TFDataOptimizerBase {
public:
InjectIoPrefetch() = default;
~InjectIoPrefetch() override = default;
std::string name() const override { return "inject_io_prefetch"; };
bool UsesFunctionLibrary() const override { return false; }
absl::Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override;
absl::Status OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) override;
protected:
bool autotune_ = true;
};
class InjectIoPrefetchEligible : public InjectIoPrefetch {
public:
std::string name() const override { return "inject_io_prefetch_eligible"; };
absl::Status OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) override;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/inject_io_prefetch.h"
#include <array>
#include <cstdint>
#include <string>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kAutotune[] = "autotune";
constexpr char kFunctionAttrKey[] = "f";
constexpr char kParallelInterleave[] = "ParallelInterleaveDataset";
constexpr char kParallelMap[] = "ParallelMapDataset";
constexpr char kPrefetch[] = "PrefetchDataset";
constexpr std::array<const char*, 5> kAsync = {
"MapAndBatchDataset", "ParallelBatchDataset", "ParallelInterleaveDataset",
"ParallelMapDataset", "PrefetchDataset"};
constexpr std::array<const char*, 6> kIo = {
"ArrayRecordDataset", "FixedLengthRecordDataset", "RecordIODataset",
"SSTableDataset", "TextLineDataset", "TFRecordDataset"};
bool IsAsync(const NodeDef* node) {
if (!node) {
return false;
}
return absl::c_any_of(kAsync, [&](const char* dataset) {
return data::MatchesAnyVersion(dataset, node->op());
});
}
bool IsIo(const NodeDef* node) {
if (!node) {
return false;
}
return absl::c_any_of(kIo, [&](const char* dataset) {
return data::MatchesAnyVersion(dataset, node->op());
});
}
bool IsIo(const FunctionDef& function) {
for (const auto& node : function.node_def()) {
if (IsIo(&node)) {
return true;
}
}
return false;
}
bool IsIoFunction(const std::string& function_name,
const MutableGraphView& graph) {
for (const auto& function : graph.graph()->library().function()) {
if (function.signature().name() == function_name) {
return IsIo(function);
}
}
return false;
}
bool HasIoFunction(const NodeDef* node, const MutableGraphView& graph) {
if (auto it = node->attr().find(kFunctionAttrKey); it != node->attr().end()) {
return IsIoFunction(it->second.func().name(), graph);
}
return false;
}
bool IsParallelInterleaveWithIo(const NodeDef* node,
const MutableGraphView& graph) {
if (!node || !data::MatchesAnyVersion(kParallelInterleave, node->op())) {
return false;
}
return HasIoFunction(node, graph);
}
bool IsParallelMap(const NodeDef* node) {
if (!node) {
return false;
}
return data::MatchesAnyVersion(kParallelMap, node->op());
}
bool IsPrefetch(const NodeDef* node) {
if (!node) {
return false;
}
return node->op() == kPrefetch;
}
struct Edge {
NodeDef* input;
NodeDef* output;
template <typename H>
friend H AbslHashValue(H h, const Edge& e) {
return H::combine(std::move(h), e.input, e.output);
}
friend bool operator==(const Edge& lhs, const Edge& rhs) {
return lhs.input == rhs.input && lhs.output == rhs.output;
}
};
absl::StatusOr<bool> InjectPrefetch(const Edge& edge, MutableGraphView& graph) {
NodeDef prefetch;
graph_utils::SetUniqueGraphNodeName(
absl::StrCat("inject/io_prefetch", edge.input->name()), graph.graph(),
&prefetch);
prefetch.set_op(kPrefetch);
*prefetch.mutable_input()->Add() = edge.input->name();
NodeDef* autotune_value =
graph_utils::AddScalarConstNode(data::model::kAutotune, &graph);
*prefetch.mutable_input()->Add() = autotune_value->name();
if (!graph_utils::CopyShapesAndTypesAttrs(*edge.input, &prefetch)) {
return false;
}
TF_RETURN_IF_ERROR(graph_utils::SetMetadataName(prefetch.name(), &prefetch));
NodeDef* added_prefetch = graph.AddNode(std::move(prefetch));
TF_RETURN_IF_ERROR(
graph.UpdateFanouts(edge.input->name(), added_prefetch->name()));
return true;
}
void GetPrefetchInjectionEdges(
const MutableGraphView& graph, NodeDef* node, NodeDef* output,
NodeDef* output_output, NodeDef* last_async, NodeDef* last_async_output,
NodeDef* last_last_async,
absl::flat_hash_set<Edge>& prefetch_injection_edges) {
if (!node) {
return;
}
if (IsAsync(output)) {
last_last_async = last_async;
last_async_output = output_output;
last_async = output;
}
if (IsIo(node)) {
if (IsParallelMap(last_async) && !IsPrefetch(last_last_async)) {
prefetch_injection_edges.insert({last_async, last_async_output});
}
return;
}
if (IsParallelInterleaveWithIo(node, graph)) {
if (!IsPrefetch(last_async)) {
prefetch_injection_edges.insert({node, output});
}
return;
}
for (int64_t i = 0; i < node->input_size(); ++i) {
NodeDef* input = graph_utils::GetInputNode(*node, graph, i);
GetPrefetchInjectionEdges(graph, input, node,
output, last_async,
last_async_output, last_last_async,
prefetch_injection_edges);
}
}
absl::StatusOr<absl::flat_hash_set<Edge>> GetPrefetchInjectionEdges(
const GrapplerItem& item, const MutableGraphView& graph) {
if (graph_utils::IsItemDerivedFromFunctionDef(item, graph)) {
return absl::flat_hash_set<Edge>();
}
if (item.fetch.size() != 1) {
return absl::InvalidArgumentError(
absl::StrCat("Expected only one fetch node but there were ",
item.fetch.size(), ": ", absl::StrJoin(item.fetch, ", ")));
}
NodeDef* sink_node = graph.GetNode(item.fetch.at(0));
NodeDef* last_node = graph_utils::GetInputNode(*sink_node, graph);
absl::flat_hash_set<Edge> prefetch_injection_edges;
GetPrefetchInjectionEdges(
graph, last_node, sink_node,
nullptr,
nullptr, nullptr,
nullptr, prefetch_injection_edges);
return prefetch_injection_edges;
}
}
absl::Status InjectIoPrefetchEligible::OptimizeAndCollectStats(
Cluster* cluster, const GrapplerItem& item, GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
if (!autotune_) {
return absl::OkStatus();
}
MutableGraphView graph(output);
TF_ASSIGN_OR_RETURN(absl::flat_hash_set<Edge> prefetch_injection_edges,
GetPrefetchInjectionEdges(item, graph));
stats->num_changes += prefetch_injection_edges.size();
return absl::OkStatus();
}
absl::Status InjectIoPrefetch::OptimizeAndCollectStats(
Cluster* cluster, const GrapplerItem& item, GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
if (!autotune_) {
return absl::OkStatus();
}
MutableGraphView graph(output);
TF_ASSIGN_OR_RETURN(absl::flat_hash_set<Edge> prefetch_injection_edges,
GetPrefetchInjectionEdges(item, graph));
for (const auto& edge : prefetch_injection_edges) {
TF_ASSIGN_OR_RETURN(bool success, InjectPrefetch(edge, graph));
stats->num_changes += success;
}
return absl::OkStatus();
}
absl::Status InjectIoPrefetch::Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) {
if (!config) {
return absl::OkStatus();
}
const std::string& autotune = config->parameter_map().at(kAutotune).s();
if (autotune == "true") {
autotune_ = true;
} else if (autotune == "false") {
autotune_ = false;
} else {
return absl::InvalidArgumentError(absl::StrCat(
"Received an invalid value for parameter ", kAutotune, ": ", autotune));
}
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(InjectIoPrefetch, "inject_io_prefetch");
}
} | #include "tensorflow/core/grappler/optimizers/data/inject_io_prefetch.h"
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tsl/lib/core/status_test_util.h"
namespace tensorflow {
namespace grappler {
namespace {
using test::function::GDef;
using test::function::NDef;
FunctionDef InterleaveIoFunction(const std::string& name) {
return FunctionDefHelper::Create(
name,
{"args_0: int64"},
{"identity: variant"},
{},
{
{{"key_prefix"}, "Const", {}, {{"dtype", DT_STRING}}},
{{"start_key"}, "Const", {}, {{"dtype", DT_STRING}}},
{{"stop_key"}, "Const", {}, {{"dtype", DT_STRING}}},
{{"SSTableDataset"},
"SSTableDataset",
{"args_0", "key_prefix:output:0", "start_key:output:0",
"stop_key:output:0"},
{}},
},
{});
}
GraphDef EligibleInterleaveCase() {
return GDef(
{NDef("files_string_1", "Const", {},
{{"value", "file1file2"}, {"dtype", DT_STRING}}),
NDef("files_tensor_1", "TensorSliceDataset", {"files_1_string"},
{{"is_files", true}}),
NDef("cycle_length_1", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("block_length_1", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls_1", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelInterleaveV4Node(
"interleave_1", "files_tensor_1", "cycle_length_1", "block_length_1",
"num_parallel_calls_1", "io_1", "default"),
NDef("files_string_2", "Const", {},
{{"value", "file1file2"}, {"dtype", DT_STRING}}),
NDef("files_tensor_2", "TensorSliceDataset", {"files_2_string"},
{{"is_files", true}}),
NDef("cycle_length_2", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("block_length_2", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls_2", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelInterleaveV4Node(
"interleave_2", "files_tensor_2", "cycle_length_2", "block_length_2",
"num_parallel_calls_2", "io_2", "default"),
NDef("zip", "ZipDataset", {"interleave_1", "interleave_2"}, {}),
NDef("Sink", "Identity", {"zip"}, {})},
{InterleaveIoFunction("io_1"), InterleaveIoFunction("io_2")});
}
GraphDef EligibleMapCase() {
return GDef(
{NDef("files_1", "Const", {},
{{"value", "file1file2"}, {"dtype", DT_STRING}}),
NDef("key_prefix_1", "Const", {}, {{"value", 1}, {"dtype", DT_STRING}}),
NDef("start_key_1", "Const", {}, {{"value", 1}, {"dtype", DT_STRING}}),
NDef("stop_key_1", "Const", {}, {{"value", 1}, {"dtype", DT_STRING}}),
NDef("io_1", "SSTableDataset",
{"files_1", "key_prefix_1", "start_key_1", "stop_key_1"}, {}),
NDef("num_parallel_calls_1", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelMapV2Node(
"map_1", "io_1", "num_parallel_calls_1", "noop_1",
"default"),
NDef("files_2", "Const", {},
{{"value", "file1file2"}, {"dtype", DT_STRING}}),
NDef("key_prefix_2", "Const", {}, {{"value", 1}, {"dtype", DT_STRING}}),
NDef("start_key_2", "Const", {}, {{"value", 1}, {"dtype", DT_STRING}}),
NDef("stop_key_2", "Const", {}, {{"value", 1}, {"dtype", DT_STRING}}),
NDef("io_2", "SSTableDataset",
{"files_2", "key_prefix_2", "start_key_2", "stop_key_2"}, {}),
NDef("num_parallel_calls_2", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelMapV2Node(
"map_2", "io_2", "num_parallel_calls_2", "noop_2",
"default"),
NDef("zip", "ZipDataset", {"map_1", "map_2"}, {}),
NDef("Sink", "Identity", {"zip"}, {})},
{});
}
TEST(InjectIoPrefetchEligible, EligibleInterleaveCaseHasNoInjection) {
GrapplerItem item;
item.graph = EligibleInterleaveCase();
item.fetch.push_back("Sink");
InjectIoPrefetchEligible optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
NodeDef zip_node =
output.node(graph_utils::FindGraphNodeWithName("zip", output));
for (const auto& input_node_name : zip_node.input()) {
NodeDef input_node = output.node(
graph_utils::FindGraphNodeWithName(input_node_name, output));
EXPECT_NE(input_node.op(), "PrefetchDataset");
}
EXPECT_EQ(item.graph.DebugString(), output.DebugString());
}
TEST(InjectIoPrefetchEligible, EligibleMapCaseHasNoInjection) {
GrapplerItem item;
item.graph = EligibleMapCase();
item.fetch.push_back("Sink");
InjectIoPrefetchEligible optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
NodeDef zip_node =
output.node(graph_utils::FindGraphNodeWithName("zip", output));
for (const auto& input_node_name : zip_node.input()) {
NodeDef input_node = output.node(
graph_utils::FindGraphNodeWithName(input_node_name, output));
EXPECT_NE(input_node.op(), "PrefetchDataset");
}
EXPECT_EQ(item.graph.DebugString(), output.DebugString());
}
TEST(InjectIoPrefetch, InterleaveCaseHasInjection) {
GrapplerItem item;
item.graph = EligibleInterleaveCase();
item.fetch.push_back("Sink");
InjectIoPrefetch optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
NodeDef zip_node =
output.node(graph_utils::FindGraphNodeWithName("zip", output));
for (const auto& input_node_name : zip_node.input()) {
NodeDef input_node = output.node(
graph_utils::FindGraphNodeWithName(input_node_name, output));
EXPECT_EQ(input_node.op(), "PrefetchDataset");
}
}
TEST(InjectIoPrefetch, MapCaseHasInjection) {
GrapplerItem item;
item.graph = EligibleMapCase();
item.fetch.push_back("Sink");
InjectIoPrefetch optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
NodeDef zip_node =
output.node(graph_utils::FindGraphNodeWithName("zip", output));
for (const auto& input_node_name : zip_node.input()) {
NodeDef input_node = output.node(
graph_utils::FindGraphNodeWithName(input_node_name, output));
EXPECT_EQ(input_node.op(), "PrefetchDataset");
}
}
}
}
} |
1,420 | cpp | tensorflow/tensorflow | seq_interleave_prefetch | tensorflow/core/grappler/optimizers/data/seq_interleave_prefetch.cc | tensorflow/core/grappler/optimizers/data/seq_interleave_prefetch_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_SEQ_INTERLEAVE_PREFETCH_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_SEQ_INTERLEAVE_PREFETCH_H_
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
namespace tensorflow {
namespace grappler {
class SeqInterleavePrefetch : public TFDataOptimizerBase {
public:
SeqInterleavePrefetch() = default;
~SeqInterleavePrefetch() override = default;
std::string name() const override { return "seq_interleave_prefetch"; };
bool UsesFunctionLibrary() const override { return true; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
Status OptimizeAndCollectStats(Cluster* cluster, const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) override;
protected:
bool autotune_ = true;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/seq_interleave_prefetch.h"
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kInterleaveDatasetOpName[] = "InterleaveDataset";
constexpr char kParallelInterleaveDatasetV2OpName[] =
"ParallelInterleaveDatasetV2";
constexpr char kParallelInterleaveDatasetV3OpName[] =
"ParallelInterleaveDatasetV3";
constexpr char kParallelInterleaveDatasetV4OpName[] =
"ParallelInterleaveDatasetV4";
constexpr char kParallelInterleaveDatasetOpName[] = "ParallelInterleaveDataset";
constexpr char kPrefetchDatasetOpName[] = "PrefetchDataset";
constexpr char kDatasetStr[] = "Dataset";
constexpr char kConstOpName[] = "Const";
constexpr char kOutputShapes[] = "output_shapes";
constexpr char kOutputTypes[] = "output_types";
constexpr char kConstNodeOutputSuffix[] = ":output:0";
constexpr char kDatasetNodeOutputSuffix[] = ":handle:0";
constexpr char kDeterministicAttr[] = "deterministic";
constexpr char kFunctionAttr[] = "f";
constexpr char kDTypeAttr[] = "dtype";
constexpr char kValueAttr[] = "value";
constexpr char kTArgumentsAttr[] = "Targuments";
constexpr char kOutputTypesAttr[] = "output_types";
constexpr char kMetadataAttr[] = "metadata";
constexpr char kOutputShapesAttr[] = "output_shapes";
constexpr char kTOutputTypesAttr[] = "Toutput_types";
constexpr char kSeqInterleavePrefetchRewritePrefix[] =
"inject/seq_interleave_prefetch_rewrite_";
bool IsParallelInterleave(const std::string& op) {
return data::MatchesAnyVersion(kParallelInterleaveDatasetOpName, op);
}
int GetNumInputsForParallelInterleaveOp(const std::string& op) {
if (op == kParallelInterleaveDatasetV2OpName) {
return 4;
} else if (op == kParallelInterleaveDatasetV3OpName) {
return 4;
} else if (op == kParallelInterleaveDatasetV4OpName) {
return 6;
}
return 0;
}
bool NodeOpHasDatasetSuffix(const NodeDef& node) {
return absl::EndsWith(node.op(), kDatasetStr);
}
bool DatasetOpInFunction(const NodeDef& node, const FunctionDef* fn) {
for (const auto& node : fn->node_def()) {
if (NodeOpHasDatasetSuffix(node)) {
return true;
}
}
return false;
}
bool RewritePossibleForNode(const NodeDef& node,
const FunctionLibraryDefinition& fld) {
auto is_deterministic_parallel_interleave_node = [&]() -> bool {
if (!IsParallelInterleave(node.op())) return false;
auto determinism_value = node.attr().find(kDeterministicAttr);
return (determinism_value != node.attr().end()) &&
(determinism_value->second.s() == "true");
};
if (node.attr().count(kFunctionAttr) == 0) return false;
const FunctionDef* fn = fld.Find(node.attr().at(kFunctionAttr).func().name());
if (fn == nullptr) return false;
if (fn->signature().output_arg_size() != 1) return false;
if (is_deterministic_parallel_interleave_node()) {
return DatasetOpInFunction(node, fn);
}
return false;
}
NodeDef CreateBufferSizeNode(DataType dtype,
const std::function<void(TensorProto*)>& add_value,
MutableGraphView* graph, FunctionDef& fdef) {
NodeDef node;
node.set_op(kConstOpName);
function_utils::SetUniqueFunctionNodeName(
absl::StrCat(kSeqInterleavePrefetchRewritePrefix, "buffer_size"), &fdef,
&node);
(*node.mutable_attr())[kDTypeAttr].set_type(dtype);
auto tensor = std::make_unique<tensorflow::TensorProto>();
auto tensor_shape = std::make_unique<tensorflow::TensorShapeProto>();
tensor->set_allocated_tensor_shape(tensor_shape.release());
tensor->set_dtype(dtype);
add_value(tensor.get());
(*node.mutable_attr())[kValueAttr].set_allocated_tensor(tensor.release());
return node;
}
Status CreateAndAppendPrefetchNode(MutableGraphView* graph, FunctionDef& fdef) {
auto get_last_dataset_op_node = [&]() -> const NodeDef* {
const auto& output_arg = fdef.signature().output_arg(0).name();
const auto& ret_val = fdef.ret().at(output_arg);
auto input = function_utils::FunctionDefTensorDesc(ret_val);
const NodeDef* dataset_op_node = nullptr;
while (
function_utils::ContainsFunctionNodeWithName(input.node_name, fdef)) {
int idx = function_utils::FindFunctionNodeWithName(input.node_name, fdef);
const NodeDef& node = fdef.node_def(idx);
if (NodeOpHasDatasetSuffix(node)) {
dataset_op_node = &node;
break;
}
input = function_utils::FunctionDefTensorDesc(node.input(0));
}
return dataset_op_node;
};
const NodeDef* add_after = get_last_dataset_op_node();
if (add_after == nullptr) {
return errors::NotFound(
"Could not find any dataset node to append `Prefetch` at its output in "
"`seq_interleave_prefetch` rewrite");
}
NodeDef prefetch_node;
prefetch_node.set_op(kPrefetchDatasetOpName);
function_utils::SetUniqueFunctionNodeName(
absl::StrCat(kSeqInterleavePrefetchRewritePrefix,
fdef.signature().name()),
&fdef, &prefetch_node);
const auto input_dataset =
absl::StrCat(add_after->name(), kDatasetNodeOutputSuffix);
NodeDef buffer_size_node = CreateBufferSizeNode(
DT_INT64,
[](TensorProto* proto) { proto->add_int64_val(data::model::kAutotune); },
graph, fdef);
prefetch_node.add_input(input_dataset);
prefetch_node.add_input(
absl::StrCat(buffer_size_node.name(), kConstNodeOutputSuffix));
if (add_after->attr().count(kOutputShapes) > 0) {
graph_utils::CopyAttribute(kOutputShapes, *add_after, &prefetch_node);
} else {
tensorflow::TensorShapeProto* shape =
(*(prefetch_node.mutable_attr()))[kOutputShapes]
.mutable_list()
->add_shape();
shape->set_unknown_rank(true);
}
if (add_after->attr().count(kOutputTypes) > 0) {
graph_utils::CopyAttribute(kOutputTypes, *add_after, &prefetch_node);
} else if (add_after->attr().count(kTOutputTypesAttr) > 0) {
(*(prefetch_node.mutable_attr()))[kOutputTypes] =
add_after->attr().at(kTOutputTypesAttr);
} else {
(*(prefetch_node.mutable_attr()))[kOutputTypes].mutable_list()->add_type(
tensorflow::DataType::DT_STRING);
}
std::string old_input = input_dataset;
std::string new_input =
absl::StrCat(prefetch_node.name(), kDatasetNodeOutputSuffix);
function_utils::ReplaceReferences(old_input, new_input, &fdef);
*fdef.add_node_def() = std::move(prefetch_node);
*fdef.add_node_def() = std::move(buffer_size_node);
return absl::OkStatus();
}
Status AddInterleaveNode(MutableGraphView* graph,
const NodeDef& parallel_interleave_node,
const std::string& interleave_map_func_name,
absl::flat_hash_set<string>& nodes_to_delete) {
NodeDef interleave_node;
interleave_node.set_op(kInterleaveDatasetOpName);
graph_utils::SetUniqueGraphNodeName(
absl::StrCat(kSeqInterleavePrefetchRewritePrefix,
parallel_interleave_node.name()),
graph->graph(), &interleave_node);
int num_other_args =
parallel_interleave_node.input_size() -
GetNumInputsForParallelInterleaveOp(parallel_interleave_node.op());
int inputs_from_parallel_interleave = 1 + num_other_args +
1 +
1 ;
for (int i = 0; i < inputs_from_parallel_interleave; ++i) {
interleave_node.add_input(parallel_interleave_node.input(i));
}
if (parallel_interleave_node.attr().contains(kTArgumentsAttr)) {
graph_utils::CopyAttribute(kTArgumentsAttr, parallel_interleave_node,
&interleave_node);
}
if (parallel_interleave_node.attr().contains(kOutputTypesAttr)) {
graph_utils::CopyAttribute(kOutputTypesAttr, parallel_interleave_node,
&interleave_node);
}
if (parallel_interleave_node.attr().contains(kOutputShapesAttr)) {
graph_utils::CopyAttribute(kOutputShapesAttr, parallel_interleave_node,
&interleave_node);
}
if (parallel_interleave_node.attr().contains(kMetadataAttr)) {
graph_utils::CopyAttribute(kMetadataAttr, parallel_interleave_node,
&interleave_node);
}
const auto& parallel_interleave_fn_attr =
parallel_interleave_node.attr().at(kFunctionAttr);
(*interleave_node.mutable_attr())[kFunctionAttr] =
parallel_interleave_fn_attr;
(*interleave_node.mutable_attr())[kFunctionAttr].mutable_func()->set_name(
interleave_map_func_name);
graph_utils::CopyShapesAndTypesAttrs(parallel_interleave_node,
&interleave_node);
*interleave_node.mutable_experimental_type() =
parallel_interleave_node.experimental_type();
NodeDef* new_node_graph = graph->AddNode(std::move(interleave_node));
TF_RETURN_IF_ERROR(graph->UpdateFanouts(parallel_interleave_node.name(),
new_node_graph->name()));
nodes_to_delete.insert(parallel_interleave_node.name());
return absl::OkStatus();
}
}
Status SeqInterleavePrefetch::OptimizeAndCollectStats(
Cluster* cluster, const GrapplerItem& item, GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
MutableGraphView graph(output);
absl::flat_hash_set<string> nodes_to_delete;
FunctionLibraryDefinition fld(OpRegistry::Global(), item.graph.library());
for (const NodeDef& node : item.graph.node()) {
if (!RewritePossibleForNode(node, fld)) continue;
const FunctionDef* parallel_interleave_fn =
fld.Find(node.attr().at("f").func().name());
FunctionDef interleave_fn(*parallel_interleave_fn);
interleave_fn.mutable_signature()->set_name(
absl::StrCat(kSeqInterleavePrefetchRewritePrefix,
parallel_interleave_fn->signature().name()));
TF_RETURN_IF_ERROR(AddInterleaveNode(
&graph, node, interleave_fn.signature().name(), nodes_to_delete));
TF_RETURN_IF_ERROR(CreateAndAppendPrefetchNode(&graph, interleave_fn));
TF_RETURN_IF_ERROR(fld.ReplaceFunction(
parallel_interleave_fn->signature().name(), interleave_fn));
stats->num_changes++;
}
*output->mutable_library() = fld.ToProto();
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(SeqInterleavePrefetch, "seq_interleave_prefetch");
}
} | #include "tensorflow/core/grappler/optimizers/data/seq_interleave_prefetch.h"
#include <string>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/lib/core/status_test_util.h"
namespace tensorflow {
namespace grappler {
namespace {
using test::function::GDef;
using test::function::NDef;
constexpr char kPrefetchDatasetOpName[] = "PrefetchDataset";
constexpr char kInterleaveDatasetOpName[] = "InterleaveDataset";
constexpr char kParallelInterleaveDatasetOpName[] =
"ParallelInterleaveDatasetV4";
constexpr char kSeqInterleavePrefetchRewritePrefix[] =
"inject/seq_interleave_prefetch_rewrite_";
constexpr char kFdefProtoStr[] =
R"pb(signature {
name: "parallel_interleave_fdef"
input_arg { name: "args_0" type: DT_STRING }
output_arg { name: "identity" type: DT_VARIANT }
is_stateful: true
control_output: "SSTableDataset"
}
node_def {
name: "key_prefix"
op: "Const"
attr {
key: "dtype"
value { type: DT_STRING }
}
attr {
key: "value"
value {
tensor {
dtype: DT_STRING
tensor_shape {}
string_val: ""
}
}
}
}
node_def {
name: "start_key"
op: "Const"
attr {
key: "dtype"
value { type: DT_STRING }
}
attr {
key: "value"
value {
tensor {
dtype: DT_STRING
tensor_shape {}
string_val: ""
}
}
}
}
node_def {
name: "stop_key"
op: "Const"
attr {
key: "dtype"
value { type: DT_STRING }
}
attr {
key: "value"
value {
tensor {
dtype: DT_STRING
tensor_shape {}
string_val: ""
}
}
}
}
node_def {
name: "SSTableDataset"
op: "SSTableDataset"
input: "args_0"
input: "key_prefix:output:0"
input: "start_key:output:0"
input: "stop_key:output:0"
attr {
key: "metadata"
value { s: "" }
}
attr {
key: "split_size"
value { i: 0 }
}
experimental_type {
type_id: TFT_PRODUCT
args {
type_id: TFT_DATASET
args {
type_id: TFT_TENSOR
args { type_id: TFT_STRING }
}
}
}
}
node_def {
name: "Identity"
op: "Identity"
input: "SSTableDataset:handle:0"
input: "^NoOp"
attr {
key: "T"
value { type: DT_VARIANT }
}
}
node_def { name: "NoOp" op: "NoOp" input: "^SSTableDataset" }
ret { key: "identity" value: "Identity:output:0" }
attr {
key: "_construction_context"
value { s: "kEagerRuntime" }
}
attr {
key: "_tf_data_function"
value { b: true }
}
control_ret { key: "SSTableDataset" value: "SSTableDataset" }
arg_attr {
key: 0
value {
attr {
key: "_output_shapes"
value { list { shape {} } }
}
attr {
key: "_user_specified_name"
value { s: "args_0" }
}
}
})pb";
GraphDef ParallelInterleaveCase(bool deterministic) {
FunctionDef fdef;
protobuf::TextFormat::ParseFromString(kFdefProtoStr, &fdef);
return GDef(
{NDef("stop", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"stop"}, {}),
NDef("cycle_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("block_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelInterleaveV4Node(
"parallel_interleave", "range", "cycle_length", "block_length",
"num_parallel_calls", "parallel_interleave_fdef",
deterministic ? "true" : "false")},
{
fdef,
});
}
GraphDef MultipleParallelInterleaveCase(bool deterministic) {
FunctionDef fdef_1, fdef_2, fdef_3;
protobuf::TextFormat::ParseFromString(kFdefProtoStr, &fdef_1);
fdef_1.mutable_signature()->set_name("parallel_interleave_fdef_1");
protobuf::TextFormat::ParseFromString(kFdefProtoStr, &fdef_2);
fdef_2.mutable_signature()->set_name("parallel_interleave_fdef_2");
protobuf::TextFormat::ParseFromString(kFdefProtoStr, &fdef_3);
fdef_3.mutable_signature()->set_name("parallel_interleave_fdef_3");
auto make_parallel_interleave_node =
[&deterministic](const int node_num, const FunctionDef &fdef) {
return graph_tests_utils::MakeParallelInterleaveV4Node(
absl::StrCat("parallel_interleave_", node_num), "range",
"cycle_length", "block_length", "num_parallel_calls",
fdef.signature().name(), deterministic ? "true" : "false");
};
return GDef(
{NDef("stop", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"stop"}, {}),
NDef("cycle_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("block_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
make_parallel_interleave_node(1, fdef_1),
make_parallel_interleave_node(2, fdef_2),
make_parallel_interleave_node(3, fdef_3)},
{
fdef_1,
fdef_2,
fdef_3,
});
}
GraphDef InterleaveCase(bool deterministic) {
FunctionDef fdef;
protobuf::TextFormat::ParseFromString(kFdefProtoStr, &fdef);
return GDef(
{NDef("stop", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"stop"}, {}),
NDef("cycle_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("block_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeInterleaveNode(
"sequential_interleave", "range", "cycle_length", "block_length",
"parallel_interleave_fdef", deterministic ? "true" : "false")},
{
fdef,
});
}
bool PrefetchInFunction(const NodeDef &node,
const FunctionLibraryDefinition &flib) {
auto f_attr_it = node.attr().find("f");
if (f_attr_it == node.attr().end()) return false;
const FunctionDef *func = flib.Find(f_attr_it->second.func().name());
if (func == nullptr) {
return false;
}
for (int i = 0; i < func->node_def_size(); i++) {
NodeDef node_in_func = func->node_def(i);
if (tensorflow::data::MatchesAnyVersion(
kPrefetchDatasetOpName,
node_in_func.op())) {
return true;
}
}
return false;
}
bool IsInterleaveNode(const NodeDef &node) {
return (node.op() == kInterleaveDatasetOpName);
}
}
Status OptimizeWithInjectInterleavePrefetch(const GrapplerItem &item,
GraphDef *output) {
SeqInterleavePrefetch optimizer;
return optimizer.Optimize(nullptr, item, output);
}
class SeqInterleavePrefetchParameterizedTest
: public ::testing::TestWithParam<bool> {};
TEST_P(SeqInterleavePrefetchParameterizedTest,
ParallelInterleaveHasConditionalInjection) {
GrapplerItem item;
bool deterministic = GetParam();
item.graph = ParallelInterleaveCase(deterministic);
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithInjectInterleavePrefetch(item, &output));
FunctionLibraryDefinition lib_def(OpRegistry::Global(), output.library());
const std::string ¶llel_interleave_fdef_name = "parallel_interleave_fdef";
const std::string &interleave_fdef_name = absl::StrCat(
kSeqInterleavePrefetchRewritePrefix, parallel_interleave_fdef_name);
if (deterministic) {
EXPECT_TRUE(
!graph_utils::ContainsGraphNodeWithName("parallel_interleave", output));
EXPECT_TRUE(!graph_utils::ContainsNodeWithOp(
kParallelInterleaveDatasetOpName, output));
EXPECT_TRUE(
graph_utils::ContainsNodeWithOp(kInterleaveDatasetOpName, output));
for (auto node : output.node()) {
if (!IsInterleaveNode(node)) continue;
EXPECT_TRUE(PrefetchInFunction(node, lib_def));
}
const FunctionDef *parallel_interleave_fdef =
lib_def.Find(parallel_interleave_fdef_name);
const FunctionDef *interleave_fdef = lib_def.Find(interleave_fdef_name);
EXPECT_EQ(parallel_interleave_fdef, nullptr);
EXPECT_NE(interleave_fdef, nullptr);
EXPECT_EQ(lib_def.ListFunctionNames().at(0), interleave_fdef_name);
EXPECT_TRUE(function_utils::FindFunctionNodeWithOp(kPrefetchDatasetOpName,
*interleave_fdef));
} else {
EXPECT_TRUE(graph_utils::ContainsNodeWithOp(
kParallelInterleaveDatasetOpName, output));
EXPECT_TRUE(
!graph_utils::ContainsNodeWithOp(kInterleaveDatasetOpName, output));
EXPECT_TRUE(
graph_utils::ContainsGraphNodeWithName("parallel_interleave", output));
EXPECT_NE(lib_def.Find(parallel_interleave_fdef_name), nullptr);
}
EXPECT_EQ(lib_def.num_functions(), 1);
}
TEST_P(SeqInterleavePrefetchParameterizedTest,
MultipleParallelInterleavesHaveConditionalInjection) {
GrapplerItem item;
bool deterministic = GetParam();
item.graph = MultipleParallelInterleaveCase(deterministic);
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithInjectInterleavePrefetch(item, &output));
FunctionLibraryDefinition lib_def(OpRegistry::Global(), output.library());
if (deterministic) {
EXPECT_TRUE(!graph_utils::ContainsNodeWithOp(
kParallelInterleaveDatasetOpName, output));
EXPECT_TRUE(
graph_utils::ContainsNodeWithOp(kInterleaveDatasetOpName, output));
for (int i = 1; i <= 3; ++i) {
EXPECT_TRUE(!graph_utils::ContainsGraphNodeWithName(
absl::StrCat("parallel_interleave_", std::to_string(i)), output));
}
for (auto node : output.node()) {
if (!IsInterleaveNode(node)) continue;
EXPECT_TRUE(PrefetchInFunction(node, lib_def));
}
} else {
EXPECT_TRUE(graph_utils::ContainsNodeWithOp(
kParallelInterleaveDatasetOpName, output));
EXPECT_TRUE(
!graph_utils::ContainsNodeWithOp(kInterleaveDatasetOpName, output));
for (int i = 1; i <= 3; ++i) {
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName(
absl::StrCat("parallel_interleave_", std::to_string(i)), output));
}
}
}
TEST_P(SeqInterleavePrefetchParameterizedTest,
SequentialInterleaveHasNoInjection) {
GrapplerItem item;
item.graph = InterleaveCase(GetParam());
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithInjectInterleavePrefetch(item, &output));
EXPECT_TRUE(
graph_utils::ContainsNodeWithOp(kInterleaveDatasetOpName, output));
EXPECT_TRUE(
graph_utils::ContainsGraphNodeWithName("sequential_interleave", output));
FunctionLibraryDefinition lib_def(OpRegistry::Global(), output.library());
for (auto node : output.node()) {
if (!IsInterleaveNode(node)) continue;
EXPECT_FALSE(PrefetchInFunction(node, lib_def));
}
}
INSTANTIATE_TEST_SUITE_P(Determinism, SeqInterleavePrefetchParameterizedTest,
::testing::Values(false, true));
}
} |
1,421 | cpp | tensorflow/tensorflow | autotune_buffer_sizes | tensorflow/core/grappler/optimizers/data/autotune_buffer_sizes.cc | tensorflow/core/grappler/optimizers/data/autotune_buffer_sizes_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_AUTOTUNE_BUFFER_SIZES_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_AUTOTUNE_BUFFER_SIZES_H_
#include "absl/status/status.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
namespace tensorflow {
namespace grappler {
constexpr char kAutotune[] = "autotune";
class AutotuneBufferSizes : public TFDataOptimizerBase {
public:
AutotuneBufferSizes() = default;
~AutotuneBufferSizes() override = default;
string name() const override { return "autotune_buffer_sizes"; };
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
if (!config) return absl::OkStatus();
const string& autotune = config->parameter_map().at(kAutotune).s();
if (autotune == "true") {
autotune_ = true;
} else if (autotune == "false") {
autotune_ = false;
} else {
return absl::InvalidArgumentError(
absl::StrCat("Received an invalid value for parameter ", kAutotune,
": ", autotune));
}
return absl::OkStatus();
}
Status OptimizeAndCollectStats(Cluster* cluster, const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) override;
private:
bool autotune_ = true;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/autotune_buffer_sizes.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kBufferSizeMin[] = "buffer_size_min";
constexpr char kPrefetchDataset[] = "PrefetchDataset";
constexpr std::array<const char*, 8> kAsyncDatasetOps = {
"ExperimentalMapAndBatchDataset",
"MapAndBatchDataset",
"ParallelBatchDataset",
"ParallelInterleaveDatasetV2",
"ParallelInterleaveDatasetV3",
"ParallelInterleaveDatasetV4",
"ParallelMapDataset",
"ParallelMapDatasetV2",
};
}
Status AutotuneBufferSizes::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
if (!autotune_) {
VLOG(1) << "The optimization autotune_buffer_sizes is not applied if "
"autotune is off.";
return absl::OkStatus();
}
MutableGraphView graph(output);
NodeDef* autotune_value =
graph_utils::AddScalarConstNode(data::model::kAutotune, &graph);
absl::flat_hash_set<string> already_prefetched;
for (NodeDef& node : *output->mutable_node()) {
if (node.op() == kPrefetchDataset) {
NodeDef* buffer_size_node = graph.GetNode(node.input(1));
if (buffer_size_node->op() == "Const") {
int64_t initial_buffer_size =
buffer_size_node->attr().at("value").tensor().int64_val(0);
if (initial_buffer_size != data::model::kAutotune) {
TF_RETURN_IF_ERROR(graph.UpdateFanin(node.name(),
{buffer_size_node->name(), 0},
{autotune_value->name(), 0}));
node.mutable_attr()->at(kBufferSizeMin).set_i(initial_buffer_size);
stats->num_changes++;
}
} else {
return absl::FailedPreconditionError(
"The autotune_buffer_sizes rewrite does not currently support "
"non-constant buffer_size input.");
}
NodeDef* prefetched_node = graph_utils::GetInputNode(node, graph);
if (prefetched_node) {
already_prefetched.insert(prefetched_node->name());
}
}
}
std::vector<const NodeDef*> async_datasets;
for (const NodeDef& node : item.graph.node()) {
if (already_prefetched.find(node.name()) != already_prefetched.end()) {
continue;
}
for (const auto& async_dataset_op : kAsyncDatasetOps) {
if (node.op() == async_dataset_op) {
async_datasets.push_back(&node);
stats->num_changes++;
break;
}
}
}
if (async_datasets.empty()) return absl::OkStatus();
for (const NodeDef* async_dataset_node : async_datasets) {
NodeDef prefetch_node;
graph_utils::SetUniqueGraphNodeName(
strings::StrCat("inject/prefetch_", async_dataset_node->name()),
graph.graph(), &prefetch_node);
prefetch_node.set_op(kPrefetchDataset);
*prefetch_node.mutable_input()->Add() = async_dataset_node->name();
*prefetch_node.mutable_input()->Add() = autotune_value->name();
graph_utils::CopyShapesAndTypesAttrs(*async_dataset_node, &prefetch_node);
auto* added_node = graph.AddNode(std::move(prefetch_node));
TF_RETURN_IF_ERROR(
graph.UpdateFanouts(async_dataset_node->name(), added_node->name()));
}
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(AutotuneBufferSizes, "autotune_buffer_sizes");
}
} | #include "tensorflow/core/grappler/optimizers/data/autotune_buffer_sizes.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
Status OptimizeWithAutotuneBufferSizes(const GrapplerItem &item,
GraphDef *output, bool autotune) {
AutotuneBufferSizes optimizer;
RewriterConfig_CustomGraphOptimizer config;
if (autotune) {
(*config.mutable_parameter_map())["autotune"].set_s("true");
} else {
(*config.mutable_parameter_map())["autotune"].set_s("false");
}
TF_RETURN_IF_ERROR(optimizer.Init(&config));
return optimizer.Optimize(nullptr, item, output);
}
class SimpleInject : public ::testing::TestWithParam<string> {};
TEST_P(SimpleInject, AutotuneBufferSizesTest) {
const string async_dataset = GetParam();
using test::function::NDef;
GrapplerItem item;
if (async_dataset == "map") {
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelMapNode(
"map", "range", "num_parallel_calls", "XTimesTwo",
false)},
{
test::function::XTimesTwo(),
});
} else if (async_dataset == "interleave") {
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("cycle_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("block_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelInterleaveV2Node(
"interleave", "range", "cycle_length", "block_length",
"num_parallel_calls", "XTimesTwo", false)},
{
test::function::XTimesTwo(),
});
} else if (async_dataset == "map_and_batch") {
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 32}, {"dtype", DT_INT64}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT64}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
graph_tests_utils::MakeMapAndBatchNode(
"map_and_batch", "range", "batch_size", "num_parallel_calls",
"drop_remainder", "XTimesTwo")},
{
test::function::XTimesTwo(),
});
}
GraphDef output;
TF_ASSERT_OK(OptimizeWithAutotuneBufferSizes(item, &output, true));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("PrefetchDataset", output));
int index = graph_utils::FindGraphNodeWithOp("PrefetchDataset", output);
const NodeDef prefetch_node = output.node(index);
EXPECT_TRUE(prefetch_node.attr().find("legacy_autotune") ==
prefetch_node.attr().end());
EXPECT_EQ(prefetch_node.input_size(), 2);
NodeDef async_node = output.node(
graph_utils::FindGraphNodeWithName(prefetch_node.input(0), output));
EXPECT_EQ(async_node.name(), async_dataset);
NodeDef buffer_size_val = output.node(
graph_utils::FindGraphNodeWithName(prefetch_node.input(1), output));
EXPECT_EQ(buffer_size_val.attr().at("value").tensor().int64_val(0), -1);
}
INSTANTIATE_TEST_SUITE_P(Test, SimpleInject,
::testing::Values("map", "interleave",
"map_and_batch"));
class AutotuneSetting : public ::testing::TestWithParam<bool> {};
TEST_P(AutotuneSetting, AutotuneBufferSizesTest) {
const bool autotune = GetParam();
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelMapNode("map", "range",
"num_parallel_calls", "XTimesTwo",
false)},
{
test::function::XTimesTwo(),
});
GraphDef output;
TF_ASSERT_OK(OptimizeWithAutotuneBufferSizes(item, &output, autotune));
EXPECT_EQ(graph_utils::ContainsNodeWithOp("PrefetchDataset", output),
autotune);
}
class MultipleNodes
: public ::testing::TestWithParam<std::tuple<bool, int64_t>> {};
TEST_P(MultipleNodes, AutotuneBufferSizesTest) {
const bool legacy_autotune = std::get<0>(GetParam());
const int64_t initial_buffer_size = std::get<1>(GetParam());
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef *start_val = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_val = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_val = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_val->name();
range_inputs[1] = stop_val->name();
range_inputs[2] = step_val->name();
std::vector<std::pair<string, AttrValue>> range_attrs;
NodeDef *range_node = graph_utils::AddNode("range", "RangeDataset",
range_inputs, range_attrs, &graph);
NodeDef *parallelism_val =
graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> map_inputs1(2);
map_inputs1[0] = range_node->name();
map_inputs1[1] = parallelism_val->name();
std::vector<std::pair<string, AttrValue>> map_attrs(4);
AttrValue attr_val;
SetAttrValue("value", &attr_val);
map_attrs[0] = std::make_pair("f", attr_val);
map_attrs[1] = std::make_pair("Targuments", attr_val);
map_attrs[2] = std::make_pair("output_types", attr_val);
map_attrs[3] = std::make_pair("output_shapes", attr_val);
NodeDef *map_node1 = graph_utils::AddNode("map1", "ParallelMapDatasetV2",
map_inputs1, map_attrs, &graph);
NodeDef *buffer_size_val =
graph_utils::AddScalarConstNode<int64_t>(initial_buffer_size, &graph);
std::vector<string> prefetch_inputs(2);
prefetch_inputs[0] = map_node1->name();
prefetch_inputs[1] = buffer_size_val->name();
std::vector<std::pair<string, AttrValue>> prefetch_attrs(4);
AttrValue legacy_autotune_attr;
SetAttrValue(legacy_autotune, &legacy_autotune_attr);
AttrValue buffer_size_min_attr;
SetAttrValue(0, &buffer_size_min_attr);
prefetch_attrs[0] = std::make_pair("legacy_autotune", legacy_autotune_attr);
prefetch_attrs[1] = std::make_pair("buffer_size_min", buffer_size_min_attr);
prefetch_attrs[2] = std::make_pair("output_types", attr_val);
prefetch_attrs[3] = std::make_pair("output_shapes", attr_val);
NodeDef *prefetch_node = graph_utils::AddNode(
"prefetch", "PrefetchDataset", prefetch_inputs, prefetch_attrs, &graph);
std::vector<string> map_inputs2(2);
map_inputs2[0] = prefetch_node->name();
map_inputs2[1] = parallelism_val->name();
NodeDef *map_node2 = graph_utils::AddNode("map2", "ParallelMapDatasetV2",
map_inputs2, map_attrs, &graph);
std::vector<string> map_inputs3(1);
map_inputs3[0] = map_node2->name();
graph_utils::AddNode("map3", "MapDataset", map_inputs3, map_attrs, &graph);
GraphDef output;
TF_ASSERT_OK(OptimizeWithAutotuneBufferSizes(item, &output, true));
std::vector<int> prefetch_indices =
graph_utils::FindAllGraphNodesWithOp("PrefetchDataset", output);
EXPECT_EQ(prefetch_indices.size(), 2);
NodeDef new_map_node3 =
output.node(graph_utils::FindGraphNodeWithName("map3", output));
NodeDef new_prefetch_node2 = output.node(
graph_utils::FindGraphNodeWithName(new_map_node3.input(0), output));
EXPECT_EQ(new_prefetch_node2.op(), "PrefetchDataset");
EXPECT_EQ(new_prefetch_node2.input_size(), 2);
EXPECT_TRUE(new_prefetch_node2.attr().find("legacy_autotune") ==
new_prefetch_node2.attr().end());
EXPECT_TRUE(new_prefetch_node2.attr().find("buffer_size_min") ==
new_prefetch_node2.attr().end());
NodeDef new_buffer_size_val2 = output.node(
graph_utils::FindGraphNodeWithName(new_prefetch_node2.input(1), output));
EXPECT_EQ(new_buffer_size_val2.attr().at("value").tensor().int64_val(0), -1);
NodeDef new_map_node2 = output.node(
graph_utils::FindGraphNodeWithName(new_prefetch_node2.input(0), output));
EXPECT_EQ(new_map_node2.name(), "map2");
NodeDef new_prefetch_node1 = output.node(
graph_utils::FindGraphNodeWithName(new_map_node2.input(0), output));
EXPECT_EQ(new_prefetch_node1.op(), "PrefetchDataset");
EXPECT_EQ(new_prefetch_node1.input_size(), 2);
EXPECT_EQ(new_prefetch_node1.attr().at("legacy_autotune").b(),
legacy_autotune);
EXPECT_EQ(new_prefetch_node1.attr().at("buffer_size_min").i(),
(initial_buffer_size == -1 ? 0 : initial_buffer_size));
NodeDef new_buffer_size_val1 = output.node(
graph_utils::FindGraphNodeWithName(new_prefetch_node1.input(1), output));
EXPECT_EQ(new_buffer_size_val1.attr().at("value").tensor().int64_val(0), -1);
NodeDef new_map_node1 = output.node(
graph_utils::FindGraphNodeWithName(new_prefetch_node1.input(0), output));
EXPECT_EQ(new_map_node1.name(), "map1");
}
INSTANTIATE_TEST_SUITE_P(Test, MultipleNodes,
::testing::Combine(::testing::Values(true, false),
::testing::Values(-1, 3)));
INSTANTIATE_TEST_SUITE_P(Test, AutotuneSetting, ::testing::Values(false, true));
}
}
} |
1,422 | cpp | tensorflow/tensorflow | filter_parallelization | tensorflow/core/grappler/optimizers/data/filter_parallelization.cc | tensorflow/core/grappler/optimizers/data/filter_parallelization_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_FILTER_PARALLELIZATION_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_FILTER_PARALLELIZATION_H_
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
namespace tensorflow {
namespace grappler {
constexpr char kAutotune[] = "autotune";
class FilterParallelization : public TFDataOptimizerBase {
public:
FilterParallelization() = default;
~FilterParallelization() override = default;
string name() const override { return "filter_parallelization"; };
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
if (!config) return absl::OkStatus();
const string& autotune = config->parameter_map().at(kAutotune).s();
if (autotune == "true") {
autotune_ = true;
} else if (autotune == "false") {
autotune_ = false;
} else {
return errors::InvalidArgument("Received an invalid value for parameter ",
kAutotune, ": ", autotune);
}
return absl::OkStatus();
}
Status OptimizeAndCollectStats(Cluster* cluster, const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) override;
private:
bool autotune_ = true;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/filter_parallelization.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kFilterDataset[] = "FilterDataset";
constexpr char kParallelFilterDataset[] = "ParallelFilterDataset";
NodeDef MakeParallelFilter(const string& name, MutableGraphView* graph) {
int index = graph_utils::FindGraphNodeWithName(name, *graph->graph());
DCHECK_NE(index, -1) << "Failed to find node " << name
<< " in the optimized graph.";
NodeDef parallel_filter = graph->graph()->node(index);
graph_utils::SetUniqueGraphNodeName(kParallelFilterDataset, graph->graph(),
¶llel_filter);
parallel_filter.set_op(kParallelFilterDataset);
auto* num_parallel_calls = graph_utils::AddScalarConstNode(
static_cast<int64_t>(data::model::kAutotune), graph);
parallel_filter.add_input(num_parallel_calls->name());
AddNodeAttr("deterministic", "true", ¶llel_filter);
return parallel_filter;
}
}
Status FilterParallelization::OptimizeAndCollectStats(
Cluster* cluster, const GrapplerItem& item, GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
if (!autotune_) {
VLOG(1) << "The optimization filter_parallelization is not applied if "
"autotune is off.";
return absl::OkStatus();
}
MutableGraphView graph(output);
if (graph_utils::IsItemDerivedFromFunctionDef(item, graph))
return absl::OkStatus();
absl::flat_hash_set<string> nodes_to_delete;
FunctionLibraryDefinition function_library(OpRegistry::Global(),
item.graph.library());
auto get_filter_node = [](const NodeDef& node) -> const NodeDef* {
if (node.op() == kFilterDataset) return &node;
return nullptr;
};
for (const NodeDef& node : item.graph.node()) {
const NodeDef* filter_node = get_filter_node(node);
if (!filter_node) continue;
auto* function = function_library.Find(
filter_node->attr().at("predicate").func().name());
if (function_utils::IsFunctionStateful(function_library, *function, true)) {
continue;
}
auto* parallel_filter =
graph.AddNode(MakeParallelFilter(filter_node->name(), &graph));
TF_RETURN_IF_ERROR(
graph.UpdateFanouts(filter_node->name(), parallel_filter->name()));
nodes_to_delete.insert(filter_node->name());
stats->num_changes++;
}
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(FilterParallelization, "filter_parallelization");
}
} | #include "tensorflow/core/grappler/optimizers/data/filter_parallelization.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
Status OptimizeWithFilterParallelization(const GrapplerItem& item,
GraphDef* output, bool autotune) {
FilterParallelization optimizer;
RewriterConfig_CustomGraphOptimizer config;
if (autotune) {
(*config.mutable_parameter_map())["autotune"].set_s("true");
} else {
(*config.mutable_parameter_map())["autotune"].set_s("false");
}
TF_RETURN_IF_ERROR(optimizer.Init(&config));
return optimizer.Optimize(nullptr, item, output);
}
using graph_tests_utils::MakeFilterNode;
const char stateless_fun_name[] = "NonZero";
const char stateful_fun_name[] = "RandomUniformLess";
class AutotuneSetting : public ::testing::TestWithParam<bool> {};
TEST_P(AutotuneSetting, FilterParallelizationTest) {
const bool autotune = GetParam();
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeFilterNode("filter", "range", stateless_fun_name),
NDef("Sink", "Identity", {"filter"}, {})},
{
test::function::NonZero(),
});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithFilterParallelization(item, &output, autotune));
EXPECT_EQ(graph_utils::ContainsNodeWithOp("ParallelFilterDataset", output),
autotune);
EXPECT_EQ(graph_utils::ContainsGraphNodeWithName("filter", output),
!autotune);
}
INSTANTIATE_TEST_SUITE_P(Test, AutotuneSetting, ::testing::Values(false, true));
class FromFunctionDef : public ::testing::TestWithParam<string> {};
TEST_P(FromFunctionDef, FilterParallelizationTest) {
const string op = GetParam();
bool from_function_def = (op == "_Retval");
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeFilterNode("filter", "range", stateless_fun_name),
NDef("Sink", op, {"filter"}, {})},
{
test::function::NonZero(),
});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithFilterParallelization(item, &output, true));
EXPECT_EQ(graph_utils::ContainsNodeWithOp("ParallelFilterDataset", output),
!from_function_def);
EXPECT_EQ(graph_utils::ContainsGraphNodeWithName("filter", output),
from_function_def);
}
INSTANTIATE_TEST_SUITE_P(Test, FromFunctionDef,
::testing::Values("Identity", "_Retval"));
TEST(ParallelizeAssert, FilterParallelizationTest) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("filename", "Const", {}, {{"value", ""}, {"dtype", DT_STRING}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeFilterNode("filter1", "range", stateful_fun_name),
MakeFilterNode("filter2", "filter1", stateless_fun_name),
NDef("cache", "CacheDataset", {"filter2", "filename"}, {}),
NDef("Sink", "Identity", {"cache"}, {})},
{
test::function::NonZero(),
test::function::RandomUniformLess(),
});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithFilterParallelization(item, &output, true));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("ParallelFilterDataset", output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("FilterDataset", output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("filter1", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("filter2", output));
}
}
}
} |
1,423 | cpp | tensorflow/tensorflow | split_utils | tensorflow/core/data/split_utils.cc | tensorflow/core/data/split_utils_test.cc | #ifndef TENSORFLOW_CORE_DATA_SPLIT_UTILS_H_
#define TENSORFLOW_CORE_DATA_SPLIT_UTILS_H_
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
class IndexSplitProvider : public SplitProvider {
public:
explicit IndexSplitProvider(int64_t n);
absl::Status GetNext(Tensor* split, bool* end_of_splits) override;
absl::Status Reset() override;
absl::Status Save(std::function<std::string(std::string)> full_name,
IteratorStateWriter* writer) override;
absl::Status Restore(std::function<std::string(std::string)> full_name,
IteratorStateReader* reader) override;
int64_t Cardinality() const override;
private:
tsl::mutex mu_;
int64_t i_ TF_GUARDED_BY(mu_);
const int64_t n_;
};
class ShardingSplitProvider : public SplitProvider {
public:
ShardingSplitProvider(int64_t num_shards, int64_t shard_index,
std::shared_ptr<SplitProvider> split_provider);
absl::Status GetNext(Tensor* split, bool* end_of_splits) override;
absl::Status Reset() override;
absl::Status Save(std::function<std::string(std::string)> full_name,
IteratorStateWriter* writer) override;
absl::Status Restore(std::function<std::string(std::string)> full_name,
IteratorStateReader* reader) override;
private:
const int64_t num_shards_;
const int64_t shard_index_;
tsl::mutex mu_;
std::shared_ptr<SplitProvider> split_provider_ TF_GUARDED_BY(mu_);
int64_t num_to_skip_ TF_GUARDED_BY(mu_);
};
absl::StatusOr<std::vector<std::unique_ptr<SplitProvider>>> GetSplitProviders(
const DatasetBase* dataset);
absl::StatusOr<std::shared_ptr<SplitProvider>> GetSingleSplitProvider(
IteratorContext* ctx, const DatasetBase* dataset);
absl::StatusOr<std::vector<IteratorContext>> CreateInputIteratorContexts(
IteratorContext* ctx, const DatasetBase* dataset);
}
}
#endif
#include "tensorflow/core/data/split_utils.h"
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/types.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNumToSkip[] = "num_to_skip";
constexpr char kSplitProvider[] = "split_provider";
constexpr char kSlash[] = "/";
constexpr char kIndex[] = "index";
}
IndexSplitProvider::IndexSplitProvider(int64_t n) : i_(0), n_(n) {
VLOG(3) << "Created index split provider with " << n << " splits.";
}
absl::Status IndexSplitProvider::GetNext(Tensor* split, bool* end_of_splits) {
tsl::mutex_lock l(mu_);
if (i_ >= n_) {
*end_of_splits = true;
return absl::OkStatus();
}
*end_of_splits = false;
*split = Tensor(DT_INT64, TensorShape{});
split->scalar<int64_t>()() = i_++;
return absl::OkStatus();
}
absl::Status IndexSplitProvider::Reset() {
tsl::mutex_lock l(mu_);
i_ = 0;
return absl::OkStatus();
}
absl::Status IndexSplitProvider::Save(
std::function<std::string(std::string)> full_name,
IteratorStateWriter* writer) {
tsl::mutex_lock l(mu_);
return writer->WriteScalar(full_name(kIndex), i_);
}
absl::Status IndexSplitProvider::Restore(
std::function<std::string(std::string)> full_name,
IteratorStateReader* reader) {
tsl::mutex_lock l(mu_);
return reader->ReadScalar(full_name(kIndex), &i_);
}
int64_t IndexSplitProvider::Cardinality() const {
if (n_ == tsl::kint64max) {
return kInfiniteCardinality;
}
return n_;
}
ShardingSplitProvider::ShardingSplitProvider(
int64_t num_shards, int64_t shard_index,
std::shared_ptr<SplitProvider> split_provider)
: num_shards_(num_shards),
shard_index_(shard_index),
split_provider_(split_provider),
num_to_skip_(shard_index_) {}
absl::Status ShardingSplitProvider::GetNext(Tensor* split,
bool* end_of_splits) {
tsl::mutex_lock l(mu_);
while (num_to_skip_ > 0) {
TF_RETURN_IF_ERROR(split_provider_->GetNext(split, end_of_splits));
if (*end_of_splits) {
return absl::OkStatus();
}
num_to_skip_--;
}
num_to_skip_ = num_shards_ - 1;
TF_RETURN_IF_ERROR(split_provider_->GetNext(split, end_of_splits));
return absl::OkStatus();
}
absl::Status ShardingSplitProvider::Reset() {
tsl::mutex_lock l(mu_);
TF_RETURN_IF_ERROR(split_provider_->Reset());
num_to_skip_ = shard_index_;
return absl::OkStatus();
}
absl::Status ShardingSplitProvider::Save(
std::function<std::string(std::string)> full_name,
IteratorStateWriter* writer) {
tsl::mutex_lock l(mu_);
TF_RETURN_IF_ERROR(split_provider_->Save(
[&](const std::string& key) {
return full_name(absl::StrCat(kSplitProvider, kSlash, key));
},
writer));
return writer->WriteScalar(full_name(kNumToSkip), num_to_skip_);
}
absl::Status ShardingSplitProvider::Restore(
std::function<std::string(std::string)> full_name,
IteratorStateReader* reader) {
tsl::mutex_lock l(mu_);
TF_RETURN_IF_ERROR(split_provider_->Restore(
[&](const std::string& key) {
return full_name(absl::StrCat(kSplitProvider, kSlash, key));
},
reader));
TF_RETURN_IF_ERROR(reader->ReadScalar(full_name(kNumToSkip), &num_to_skip_));
return absl::OkStatus();
}
absl::StatusOr<std::shared_ptr<SplitProvider>> GetSingleSplitProvider(
IteratorContext* ctx, const DatasetBase* dataset) {
if (ctx->split_providers().size() != 1) {
return absl::FailedPreconditionError(
absl::StrCat("Failed to get single split provider for dataset ",
dataset->DebugString(), ". Found ",
ctx->split_providers().size(), " split providers"));
}
return ctx->split_providers()[0];
}
absl::StatusOr<std::vector<std::unique_ptr<SplitProvider>>> GetSplitProviders(
const DatasetBase* dataset) {
std::vector<std::unique_ptr<SplitProvider>> result;
std::vector<const DatasetBase*> inputs;
TF_RETURN_IF_ERROR(dataset->InputDatasets(&inputs));
for (const auto& input : inputs) {
std::vector<std::unique_ptr<SplitProvider>> providers;
TF_RETURN_IF_ERROR(input->MakeSplitProviders(&providers));
for (auto& provider : providers) {
result.push_back(std::move(provider));
}
}
return result;
}
absl::StatusOr<std::vector<IteratorContext>> CreateInputIteratorContexts(
IteratorContext* ctx, const DatasetBase* dataset) {
std::vector<const DatasetBase*> inputs;
TF_RETURN_IF_ERROR(dataset->InputDatasets(&inputs));
std::vector<IteratorContext> result;
if (ctx->split_providers().empty()) {
for (int i = 0; i < inputs.size(); ++i) {
result.emplace_back(ctx);
}
return result;
}
int64_t num_sources = 0;
for (size_t i = 0; i < inputs.size(); ++i) {
if (inputs[i]->num_sources() < 0) {
return absl::FailedPreconditionError(absl::StrCat(
"Failed to determine the number of sources for dataset of type ",
inputs[i]->type_string()));
}
num_sources += inputs[i]->num_sources();
}
if (num_sources != ctx->split_providers().size()) {
return absl::FailedPreconditionError(absl::StrCat(
"Attempted to feed ", ctx->split_providers().size(),
" split providers into a dataset with ", num_sources, " sources"));
}
int64_t split_provider_index = 0;
for (size_t i = 0; i < inputs.size(); ++i) {
IteratorContext::Params params(ctx);
params.split_providers.clear();
for (int j = 0; j < inputs[i]->num_sources(); ++j) {
params.split_providers.push_back(
ctx->split_providers()[split_provider_index + j]);
}
split_provider_index += inputs[i]->num_sources();
result.emplace_back(std::move(params));
}
return result;
}
}
} | #include "tensorflow/core/data/split_utils.h"
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
std::string full_name(const std::string& name) {
return FullName("test", name);
}
Status SaveAndRestore(SplitProvider* split_provider) {
VariantTensorDataWriter writer;
TF_RETURN_IF_ERROR(split_provider->Save(full_name, &writer));
std::vector<const VariantTensorData*> variants;
writer.GetData(&variants);
VariantTensorDataReader reader(variants);
TF_RETURN_IF_ERROR(split_provider->Restore(full_name, &reader));
return absl::OkStatus();
}
Status CheckOutput(SplitProvider* split_provider,
std::vector<Tensor> expected) {
int64_t next = 0;
bool end_of_splits = false;
while (!end_of_splits) {
Tensor split;
TF_RETURN_IF_ERROR(split_provider->GetNext(&split, &end_of_splits));
if (!end_of_splits) {
test::ExpectEqual(split, expected[next++]);
}
}
EXPECT_EQ(next, expected.size());
return absl::OkStatus();
}
TEST(IndexSplitProviderTest, Empty) {
IndexSplitProvider split_provider(0);
TF_EXPECT_OK(CheckOutput(&split_provider,
CreateTensors<int64_t>(TensorShape({}), {})));
}
TEST(IndexSplitProviderTest, One) {
IndexSplitProvider split_provider(1);
TF_EXPECT_OK(CheckOutput(&split_provider,
CreateTensors<int64_t>(TensorShape({}), {{0}})));
}
TEST(IndexSplitProviderTest, Three) {
IndexSplitProvider split_provider(3);
TF_EXPECT_OK(
CheckOutput(&split_provider,
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})));
}
TEST(IndexSplitProviderTest, SaveAndRestore) {
IndexSplitProvider split_provider(4);
std::vector<Tensor> expected =
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}, {3}});
for (int i = 0; i < expected.size(); ++i) {
TF_ASSERT_OK(SaveAndRestore(&split_provider));
Tensor split;
bool end_of_splits = true;
TF_ASSERT_OK(split_provider.GetNext(&split, &end_of_splits));
EXPECT_FALSE(end_of_splits);
test::ExpectEqual(split, expected[i]);
}
TF_ASSERT_OK(SaveAndRestore(&split_provider));
Tensor split;
bool end_of_splits = false;
TF_ASSERT_OK(split_provider.GetNext(&split, &end_of_splits));
EXPECT_TRUE(end_of_splits);
}
TEST(ShardingSplitProviderTest, TwoWayShardZero) {
auto base = std::make_shared<IndexSplitProvider>(4);
ShardingSplitProvider split_provider(2, 0, base);
TF_EXPECT_OK(CheckOutput(
&split_provider, CreateTensors<int64_t>(TensorShape({}), {{0}, {2}})));
}
TEST(ShardingSplitProviderTest, TwoWayShardOne) {
auto base = std::make_shared<IndexSplitProvider>(4);
ShardingSplitProvider split_provider(2, 1, base);
TF_EXPECT_OK(CheckOutput(
&split_provider, CreateTensors<int64_t>(TensorShape({}), {{1}, {3}})));
}
TEST(ShardingSplitProviderTest, ThreeWayShardOne) {
auto base = std::make_shared<IndexSplitProvider>(6);
ShardingSplitProvider split_provider(3, 1, base);
TF_EXPECT_OK(CheckOutput(
&split_provider, CreateTensors<int64_t>(TensorShape({}), {{1}, {4}})));
}
TEST(ShardingSplitProviderTest, Empty) {
auto base = std::make_shared<IndexSplitProvider>(1);
ShardingSplitProvider split_provider(2, 1, base);
TF_EXPECT_OK(CheckOutput(&split_provider,
CreateTensors<int64_t>(TensorShape({}), {})));
}
TEST(ShardingSplitProviderTest, SaveAndRestore) {
auto base = std::make_shared<IndexSplitProvider>(6);
std::vector<Tensor> expected =
CreateTensors<int64_t>(TensorShape({}), {{1}, {4}});
ShardingSplitProvider split_provider(3, 1, base);
for (int i = 0; i < expected.size(); ++i) {
TF_ASSERT_OK(SaveAndRestore(&split_provider));
Tensor split;
bool end_of_splits = true;
TF_ASSERT_OK(split_provider.GetNext(&split, &end_of_splits));
EXPECT_FALSE(end_of_splits);
test::ExpectEqual(split, expected[i]);
}
TF_ASSERT_OK(SaveAndRestore(&split_provider));
Tensor split;
bool end_of_splits = false;
TF_ASSERT_OK(split_provider.GetNext(&split, &end_of_splits));
EXPECT_TRUE(end_of_splits);
}
}
}
} |
1,424 | cpp | tensorflow/tensorflow | disable_intra_op_parallelism | tensorflow/core/grappler/optimizers/data/disable_intra_op_parallelism.cc | tensorflow/core/grappler/optimizers/data/disable_intra_op_parallelism_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_DISABLE_INTRA_OP_PARALLELISM_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_DISABLE_INTRA_OP_PARALLELISM_H_
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
namespace tensorflow {
namespace grappler {
class DisableIntraOpParallelism : public TFDataOptimizerBase {
public:
DisableIntraOpParallelism() = default;
~DisableIntraOpParallelism() override = default;
string name() const override { return "disable_intra_op_parallelism"; };
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
Status OptimizeAndCollectStats(Cluster* cluster, const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) override;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/disable_intra_op_parallelism.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kMaxIntraOpParallelismDataset[] = "MaxIntraOpParallelismDataset";
constexpr char kModelDataset[] = "ModelDataset";
constexpr std::array<const char*, 2> kMaxIntraOpParallelismDatasetOps = {
"MaxIntraOpParallelismDataset",
"ExperimentalMaxIntraOpParallelismDataset",
};
}
Status DisableIntraOpParallelism::OptimizeAndCollectStats(
Cluster* cluster, const GrapplerItem& item, GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
MutableGraphView graph(output);
if (graph_utils::IsItemDerivedFromFunctionDef(item, graph))
return absl::OkStatus();
if (item.fetch.size() != 1) {
return errors::InvalidArgument(
"Expected only one fetch node but there were ", item.fetch.size(), ": ",
absl::StrJoin(item.fetch, ", "));
}
for (const NodeDef& node : item.graph.node()) {
for (const auto& target_dataset_op : kMaxIntraOpParallelismDatasetOps) {
if (node.op() == target_dataset_op) {
return absl::OkStatus();
}
}
}
NodeDef* sink_node = graph.GetNode(item.fetch.at(0));
NodeDef* last_node = graph_utils::GetInputNode(*sink_node, graph);
if (last_node->op() == kModelDataset) {
last_node = graph_utils::GetInputNode(*last_node, graph);
}
NodeDef* max_parallelism_value =
graph_utils::AddScalarConstNode(int64_t{1}, &graph);
NodeDef insert_node;
graph_utils::SetUniqueGraphNodeName("intra_op_parallelism", graph.graph(),
&insert_node);
insert_node.set_op(kMaxIntraOpParallelismDataset);
*insert_node.mutable_input()->Add() = last_node->name();
*insert_node.mutable_input()->Add() = max_parallelism_value->name();
if (!graph_utils::CopyShapesAndTypesAttrs(*last_node, &insert_node))
return absl::OkStatus();
auto* added_node = graph.AddNode(std::move(insert_node));
TF_RETURN_IF_ERROR(
graph.UpdateFanouts(last_node->name(), added_node->name()));
stats->num_changes++;
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(DisableIntraOpParallelism,
"disable_intra_op_parallelism");
}
} | #include "tensorflow/core/grappler/optimizers/data/disable_intra_op_parallelism.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
using test::function::NDef;
class IntraOpAlreadySetTest
: public ::testing::TestWithParam<std::tuple<string, int64_t>> {};
TEST_P(IntraOpAlreadySetTest, IntraOpParallelism) {
const string op = std::get<0>(GetParam());
const int64_t value = std::get<1>(GetParam());
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef *start_val = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_val = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_val = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_val->name();
range_inputs[1] = stop_val->name();
range_inputs[2] = step_val->name();
std::vector<std::pair<string, AttrValue>> range_attrs;
NodeDef *range_node = graph_utils::AddNode("range", "RangeDataset",
range_inputs, range_attrs, &graph);
NodeDef *parallelism_val =
graph_utils::AddScalarConstNode<int64_t>(value, &graph);
std::vector<string> parallelism_inputs(2);
parallelism_inputs[0] = range_node->name();
parallelism_inputs[1] = parallelism_val->name();
std::vector<std::pair<string, AttrValue>> parallelism_attrs;
NodeDef *parallelism_node = graph_utils::AddNode(
"max_parallelism", op, parallelism_inputs, parallelism_attrs, &graph);
std::vector<string> sink_inputs(1);
sink_inputs[0] = parallelism_node->name();
std::vector<std::pair<string, AttrValue>> sink_attrs;
NodeDef *sink_node =
graph_utils::AddNode("Sink", "Identity", sink_inputs, sink_attrs, &graph);
item.fetch.push_back(sink_node->name());
EXPECT_TRUE(graph_utils::ContainsNodeWithOp(op, item.graph));
EXPECT_EQ(item.graph.node_size(), 7);
EXPECT_EQ(parallelism_val->attr().at("value").tensor().int64_val(0), value);
DisableIntraOpParallelism optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(output.node_size(), 7);
EXPECT_TRUE(graph_utils::ContainsNodeWithOp(op, output));
NodeDef new_parallelism_node =
output.node(graph_utils::FindGraphNodeWithOp(op, output));
NodeDef new_parallelism_val = output.node(graph_utils::FindGraphNodeWithName(
new_parallelism_node.input(1), output));
EXPECT_EQ(new_parallelism_val.attr().at("value").tensor().int64_val(0),
value);
}
INSTANTIATE_TEST_SUITE_P(
Test, IntraOpAlreadySetTest,
::testing::Combine(
::testing::Values("MaxIntraOpParallelismDataset",
"ExperimentalMaxIntraOpParallelismDataset"),
::testing::Values(1, 5)));
class IntraOpNotSetTest : public ::testing::TestWithParam<string> {};
TEST_P(IntraOpNotSetTest, IntraOpParallelism) {
const string op = GetParam();
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("Sink", op, {"range"}, {})});
EXPECT_FALSE(graph_utils::ContainsNodeWithOp("MaxIntraOpParallelismDataset",
item.graph));
EXPECT_EQ(item.graph.node_size(), 5);
item.fetch.push_back("Sink_fake");
DisableIntraOpParallelism optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(
graph_utils::ContainsNodeWithOp("MaxIntraOpParallelismDataset", output));
EXPECT_EQ(output.node_size(), 5);
item.fetch[0] = "Sink";
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
if (op == "_Retval") {
EXPECT_FALSE(graph_utils::ContainsNodeWithOp("MaxIntraOpParallelismDataset",
output));
EXPECT_EQ(output.node_size(), 5);
return;
}
EXPECT_EQ(output.node_size(), 7);
EXPECT_TRUE(
graph_utils::ContainsNodeWithOp("MaxIntraOpParallelismDataset", output));
NodeDef sink_node =
output.node(graph_utils::FindGraphNodeWithName("Sink", output));
EXPECT_EQ(sink_node.input_size(), 1);
NodeDef parallelism_node = output.node(
graph_utils::FindGraphNodeWithName(sink_node.input(0), output));
EXPECT_EQ(parallelism_node.op(), "MaxIntraOpParallelismDataset");
EXPECT_EQ(parallelism_node.input_size(), 2);
NodeDef range_node = output.node(
graph_utils::FindGraphNodeWithName(parallelism_node.input(0), output));
EXPECT_EQ(range_node.name(), "range");
NodeDef parallelism_val = output.node(
graph_utils::FindGraphNodeWithName(parallelism_node.input(1), output));
EXPECT_EQ(parallelism_val.attr().at("value").tensor().int64_val(0), 1);
}
INSTANTIATE_TEST_SUITE_P(Test, IntraOpNotSetTest,
::testing::Values("Identity", "_Retval"));
TEST(AutotuneWithModelTest, IntraOpParallelism) {
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("model", "ModelDataset", {"range"}, {}),
NDef("Sink", "Identity", {"model"}, {})});
EXPECT_FALSE(graph_utils::ContainsNodeWithOp("MaxIntraOpParallelismDataset",
item.graph));
EXPECT_EQ(item.graph.node_size(), 6);
item.fetch.push_back("Sink");
DisableIntraOpParallelism optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(output.node_size(), 8);
EXPECT_TRUE(
graph_utils::ContainsNodeWithOp("MaxIntraOpParallelismDataset", output));
NodeDef sink_node =
output.node(graph_utils::FindGraphNodeWithName("Sink", output));
EXPECT_EQ(sink_node.input_size(), 1);
NodeDef model_node = output.node(
graph_utils::FindGraphNodeWithName(sink_node.input(0), output));
EXPECT_EQ(model_node.op(), "ModelDataset");
EXPECT_EQ(model_node.input_size(), 1);
NodeDef parallelism_node = output.node(
graph_utils::FindGraphNodeWithName(model_node.input(0), output));
EXPECT_EQ(parallelism_node.op(), "MaxIntraOpParallelismDataset");
EXPECT_EQ(parallelism_node.input_size(), 2);
NodeDef range_node = output.node(
graph_utils::FindGraphNodeWithName(parallelism_node.input(0), output));
EXPECT_EQ(range_node.name(), "range");
NodeDef parallelism_val = output.node(
graph_utils::FindGraphNodeWithName(parallelism_node.input(1), output));
EXPECT_EQ(parallelism_val.attr().at("value").tensor().int64_val(0), 1);
}
}
}
} |
1,425 | cpp | tensorflow/tensorflow | function_utils | tensorflow/core/grappler/optimizers/data/function_utils.cc | tensorflow/core/grappler/optimizers/data/function_utils_test.cc | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_FUNCTION_UTILS_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_FUNCTION_UTILS_H_
#include <functional>
#include <memory>
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
class AttrSlice;
class Graph;
class GraphDef;
class NameAttrList;
class Node;
class NodeDef;
class OpDef;
string DebugString(const Graph* g);
void DumpGraph(StringPiece label, const Graph* g);
void ToGraphDef(const Graph* g, GraphDef* gdef, bool pretty = false);
Status NameAndAttrsFromFunctionCall(const NodeDef& call_def,
NameAttrList* function);
bool RemoveDeadNodes(Graph* g);
bool RemoveIdentityNodes(Graph* g);
bool RemoveListArrayConverter(Graph* g);
Status InstantiateFunctionCall(const NodeDef& call_def,
FunctionLibraryRuntime* flr,
FunctionLibraryRuntime::Handle* handle);
bool IsFunctionCall(const FunctionLibraryDefinition& lib_def, const Node& n);
}
#endif
#include "tensorflow/core/common_runtime/function_utils.h"
#include "tensorflow/core/common_runtime/function_body.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/control_flow.h"
#include "tensorflow/core/graph/graph.h"
namespace tensorflow {
static constexpr const char* const kNodeLabel = "Func";
struct Endpoint {
Node* node;
int index;
string name() const {
if (index == 0) {
return node->name();
} else {
return strings::StrCat(node->name(), ":", index);
}
}
DataType dtype() const { return node->output_type(index); }
};
static Node* AddNoOp(StringPiece name, Graph* g) {
NodeDef ndef;
ndef.set_name(g->NewName(absl::StrCat(kNodeLabel, "/", name)));
ndef.set_op("NoOp");
Status s;
Node* ret = g->AddNode(ndef, &s);
TF_CHECK_OK(s);
return ret;
}
static Node* AddIdentity(StringPiece name, Graph* g, Endpoint input) {
DCHECK_LT(0, input.dtype());
NodeDef ndef;
ndef.set_name(g->NewName(absl::StrCat(kNodeLabel, "/", name)));
ndef.set_op("Identity");
ndef.add_input(input.name());
AddNodeAttr("T", BaseType(input.dtype()), &ndef);
Status s;
Node* ret = g->AddNode(ndef, &s);
TF_CHECK_OK(s);
g->AddEdge(input.node, input.index, ret, 0);
return ret;
}
void DumpGraph(StringPiece label, const Graph* g) {
VLOG(2) << "Graph " << label << " #nodes " << g->num_nodes() << " #edges "
<< g->num_edges();
if (VLOG_IS_ON(5)) {
for (const auto& line : str_util::Split(DebugString(g), '\n')) {
VLOG(5) << "|| " << line;
}
}
}
bool RemoveDeadNodes(Graph* g) {
VLOG(2) << "Removing dead nodes";
std::unordered_set<const Node*> nodes;
for (auto n : g->nodes()) {
if (n->IsSource() || n->IsSink() || n->IsControlFlow() ||
n->op_def().is_stateful()) {
nodes.insert(n);
}
}
return PruneForReverseReachability(g, std::move(nodes));
}
namespace {
const Edge* GetTheOnlyDataEdge(const EdgeSet& edges) {
const Edge* ret = nullptr;
for (const Edge* e : edges) {
if (e->IsControlEdge() || ret) {
return nullptr;
}
if (IsRefType(e->src()->output_type(e->src_output()))) {
return nullptr;
}
if (IsRecv(e->src()) || IsSwitch(e->src())) {
return nullptr;
}
ret = e;
}
return ret;
}
}
bool RemoveIdentityNodes(Graph* g) {
VLOG(2) << "Removing identity nodes";
bool removed_any = false;
gtl::InlinedVector<Node*, 8> matches;
for (Node* n : g->nodes()) {
if (!n->IsIdentity()) continue;
if (!GetTheOnlyDataEdge(n->in_edges())) continue;
if (n->out_edges().empty()) continue;
matches.push_back(n);
}
if (!matches.empty()) {
for (Node* n : matches) {
const Edge* in = GetTheOnlyDataEdge(n->in_edges());
for (const Edge* out : n->out_edges()) {
if (out->IsControlEdge()) {
g->AddControlEdge(in->src(), out->dst());
} else {
g->AddEdge(in->src(), in->src_output(), out->dst(), out->dst_input());
}
}
VLOG(2) << "Remove Identity: " << n->DebugString();
g->RemoveNode(n);
removed_any = true;
}
}
return removed_any;
}
bool RemoveListArrayConverter(Graph* g) {
VLOG(2) << "Removing list array converter";
gtl::InlinedVector<Node*, 8> matches;
for (Node* n : g->nodes()) {
if ((n->type_string() == "_ListToArray") ||
(n->type_string() == "_ArrayToList")) {
matches.push_back(n);
}
}
bool removed_any = false;
if (!matches.empty()) {
for (Node* n : matches) {
if (n->num_inputs() != n->num_outputs()) {
continue;
}
gtl::InlinedVector<Node*, 8> identity_nodes(n->num_inputs(), nullptr);
const auto no_op = [&](StringPiece name) -> Node* {
return AddNoOp(absl::StrCat(n->name(), "/", name), g);
};
const auto identity = [&](StringPiece name, Endpoint input) -> Node* {
Node* node = AddIdentity(absl::StrCat(n->name(), "/", name), g, input);
node->set_requested_device(input.node->def().device());
return node;
};
Node* input_control_node = nullptr;
for (const Edge* e : n->in_edges()) {
if (e->IsControlEdge()) {
if (input_control_node == nullptr) {
input_control_node = no_op("input_control_node");
}
g->AddControlEdge(e->src(), input_control_node);
} else {
const int index = e->dst_input();
Node** id_node = &identity_nodes[index];
if (*id_node != nullptr) {
LOG(ERROR)
<< "RemoveListArrayConverter unexpected duplicated input: "
<< e->dst_input();
return removed_any;
}
*id_node = identity("input", {e->src(), e->src_output()});
}
}
if (input_control_node != nullptr) {
for (Node* id : identity_nodes) {
g->AddControlEdge(input_control_node, id);
}
}
Node* output_control_node = nullptr;
for (const Edge* e : n->out_edges()) {
if (e->IsControlEdge()) {
if (output_control_node == nullptr) {
output_control_node = no_op("output_control_node");
}
g->AddControlEdge(output_control_node, e->dst());
} else {
Node* id_node = identity_nodes[e->src_output()];
if (id_node == nullptr) {
LOG(ERROR) << "RemoveListArrayConverter unexpected missing input: "
<< e->src_output();
return removed_any;
}
CHECK(id_node);
g->AddEdge(id_node, 0, e->dst(), e->dst_input());
}
}
if (output_control_node != nullptr) {
for (Node* id : identity_nodes) {
g->AddControlEdge(id, output_control_node);
}
}
g->RemoveNode(n);
removed_any = true;
}
}
return removed_any;
}
Status NameAndAttrsFromFunctionCall(const NodeDef& call_def,
NameAttrList* function) {
if (call_def.op() == "PartitionedCall" ||
call_def.op() == "StatefulPartitionedCall") {
TF_RETURN_IF_ERROR(GetNodeAttr(call_def, "f", function));
} else {
function->set_name(call_def.op());
*function->mutable_attr() = call_def.attr();
}
return absl::OkStatus();
}
Status InstantiateFunctionCall(const NodeDef& call_def,
FunctionLibraryRuntime* flr,
FunctionLibraryRuntime::Handle* handle) {
NameAttrList function;
TF_RETURN_IF_ERROR(NameAndAttrsFromFunctionCall(call_def, &function));
return flr->Instantiate(function.name(), AttrSlice(&function.attr()), handle);
}
bool IsFunctionCall(const FunctionLibraryDefinition& lib_def,
const Node& node) {
return node.IsFunctionCall();
}
string NewName(const Node* n, bool pretty) {
if (pretty) {
return strings::StrCat(n->type_string(), n->id());
} else {
return strings::StrCat("n", n->id());
}
}
void ToGraphDef(const Graph* g, GraphDef* gdef, bool pretty) {
gtl::InlinedVector<const Edge*, 4> inputs;
gdef->Clear();
*gdef->mutable_versions() = g->versions();
std::vector<Node*> start_nodes;
for (Node* n : g->nodes()) {
if (n->out_edges().empty()) {
start_nodes.push_back(n);
}
}
ReverseDFSFrom(*g, start_nodes, nullptr, [gdef, pretty, &inputs](Node* n) {
if (!n->IsOp()) return;
NodeDef* ndef = gdef->add_node();
ndef->set_name(NewName(n, pretty));
ndef->set_op(n->type_string());
for (const auto& attr : n->attrs()) {
(*ndef->mutable_attr())[attr.first] = attr.second;
}
if (!n->assigned_device_name().empty()) {
ndef->set_device(n->assigned_device_name());
} else {
ndef->set_device(n->requested_device());
}
inputs.clear();
inputs.resize(n->num_inputs());
for (const Edge* e : n->in_edges()) {
if (e->IsControlEdge()) {
inputs.push_back(e);
} else {
if (inputs[e->dst_input()] == nullptr) {
inputs[e->dst_input()] = e;
} else {
LOG(WARNING) << "Malformed graph node. multiple input edges: "
<< n->DebugString();
}
}
}
for (const Edge* e : inputs) {
if (e == nullptr) {
ndef->add_input("unknown");
continue;
}
const string srcname = NewName(e->src(), pretty);
if (!e->src()->IsOp()) {
} else if (e->IsControlEdge()) {
ndef->add_input(strings::StrCat("^", srcname));
} else if (e->src_output() == 0) {
ndef->add_input(srcname);
} else {
ndef->add_input(strings::StrCat(srcname, ":", e->src_output()));
}
}
});
}
string DebugString(const Graph* g) {
GraphDef gdef;
ToGraphDef(g, &gdef);
return DebugString(gdef);
}
} | #include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace grappler {
namespace function_utils {
namespace {
TEST(FunctionDefTensorDesc, Parsing) {
FunctionDefTensorDesc f("Cast:y:0");
EXPECT_EQ(f.full_str, "Cast:y:0");
EXPECT_EQ(f.node_name, "Cast");
EXPECT_EQ(f.node_output, "y");
EXPECT_EQ(f.position, 0);
FunctionDefTensorDesc f2("Arg0");
EXPECT_EQ(f2.full_str, "Arg0");
EXPECT_EQ(f2.node_name, "Arg0");
EXPECT_EQ(f2.node_output, "");
EXPECT_EQ(f2.position, -1);
}
TEST(ReplaceReferencesTest, ReplaceReferencesTest) {
FunctionDef outer = FunctionDefHelper::Create(
"outer", {"arg0: int32"}, {"out: int32", "out2: int64"}, {}, {},
{{"out", "MapDefun:output:0"}, {"out2", "Cast:y:0"}});
NodeDef* derive_node =
AddNode("X", "Some_Op", {"MapDefun:output:0"}, {}, &outer);
ReplaceReferences("MapDefun:output:0", "arg0", &outer);
EXPECT_EQ(outer.ret().at("out"), "arg0");
EXPECT_EQ(derive_node->input(0), "arg0");
}
TEST(FunctionUtilsTest, AddFunctionOutputWithUniqueName) {
FunctionDef function = test::function::XTimesTwo();
AddFunctionOutputWithUniqueName("y", "two", &function, DT_INT64);
EXPECT_TRUE(ContainsFunctionOutputWithName("y/_1", function));
EXPECT_EQ(function.ret().at("y/_1"), "two");
}
TEST(FunctionUtilsTest, AddFunctionInput) {
FunctionDef fdef;
auto arg0 = AddFunctionInput("arg0", &fdef, DT_INT32);
auto arg1 = AddFunctionInput("arg1", &fdef, DT_BOOL);
EXPECT_EQ(fdef.signature().input_arg().data()[0], arg0);
EXPECT_EQ(arg0->name(), "arg0");
EXPECT_EQ(arg0->type(), DT_INT32);
EXPECT_EQ(fdef.signature().input_arg().data()[1], arg1);
EXPECT_EQ(arg1->name(), "arg1");
EXPECT_EQ(arg1->type(), DT_BOOL);
}
TEST(FunctionUtilsTest, ContainsFunctionNodeWithName) {
FunctionDef function = test::function::XTimesTwo();
EXPECT_FALSE(ContainsFunctionNodeWithName(
"weird_name_that_should_not_be_there", function));
EXPECT_TRUE(ContainsFunctionNodeWithName("two", function));
}
TEST(FunctionUtilsTest, ContainsFunctionNodeWithOp) {
FunctionDef function = test::function::XTimesTwo();
EXPECT_FALSE(ContainsFunctionNodeWithOp("weird_op_that_should_not_be_there",
function));
EXPECT_TRUE(ContainsFunctionNodeWithOp("Mul", function));
}
TEST(FunctionUtilsTest, ContainsFunctionOutputWithName) {
FunctionDef function = test::function::XTimesTwo();
EXPECT_TRUE(ContainsFunctionOutputWithName("y", function));
EXPECT_FALSE(ContainsFunctionOutputWithName("Add:z:0", function));
}
TEST(FunctionUtilsTest, FindFunctionNodeWithName) {
FunctionDef function = test::function::XTimesTwo();
EXPECT_EQ(
FindFunctionNodeWithName("weird_name_that_should_not_be_there", function),
-1);
EXPECT_NE(FindFunctionNodeWithName("two", function), -1);
}
TEST(FunctionUtilsTest, FindFunctionNodeWithOp) {
FunctionDef function = test::function::XTimesTwo();
EXPECT_EQ(
FindFunctionNodeWithOp("weird_op_that_should_not_be_there", function),
-1);
EXPECT_NE(FindFunctionNodeWithOp("Mul", function), -1);
}
TEST(FunctionUtilsTest, FindFunctionInputWithName) {
FunctionDef function = test::function::XTimesTwo();
EXPECT_EQ(FindFunctionInputWithName("x", function), 0);
EXPECT_EQ(FindFunctionInputWithName("not_a_name", function), -1);
}
TEST(FunctionUtilsTest, FindFunctionOutputWithName) {
FunctionDef function = test::function::XTimesTwo();
EXPECT_EQ(FindFunctionOutputWithName("y", function), 0);
EXPECT_EQ(FindFunctionOutputWithName("Add:z:0", function), -1);
}
TEST(FunctionUtilsTest, SetUniqueFunctionNodeName) {
FunctionDef function = test::function::XTimesTwo();
NodeDef node;
SetUniqueFunctionNodeName("abc", &function, &node);
for (const NodeDef& function_node : function.node_def()) {
EXPECT_NE(node.name(), function_node.name());
}
auto* new_node = function.add_node_def();
*new_node = node;
NodeDef other;
SetUniqueFunctionNodeName("abc", &function, &other);
EXPECT_NE(other.name(), new_node->name());
}
TEST(FunctionUtilsTest, AddNodeToFunctionDef) {
FunctionDef func;
const char* op_name = "xxx";
AddNode(op_name, op_name, {}, {}, &func);
const NodeDef& node1 = func.node_def(FindFunctionNodeWithName("xxx", func));
EXPECT_EQ(node1.op(), op_name);
EXPECT_EQ(node1.input_size(), 0);
EXPECT_EQ(node1.attr_size(), 0);
const std::vector<string> inputs({"input1", "input2"});
AddNode("", op_name, inputs, {}, &func);
const NodeDef& node2 =
func.node_def(FindFunctionNodeWithName("xxx/_2", func));
EXPECT_EQ(node2.op(), op_name);
EXPECT_EQ(node2.attr_size(), 0);
EXPECT_EQ(node2.input_size(), inputs.size());
for (size_t i = 0; i < inputs.size(); ++i) {
EXPECT_EQ(node2.input(i), inputs[i]);
}
AttrValue a1, a2;
a1.set_type(DT_INT32);
a2.set_type(DT_INT64);
const std::vector<std::pair<string, AttrValue>> attrs(
{{"attr1", a1}, {"attr2", a2}});
AddNode("", op_name, {}, attrs, &func);
const NodeDef& node3 =
func.node_def(FindFunctionNodeWithName("xxx/_3", func));
EXPECT_EQ(node3.op(), op_name);
EXPECT_EQ(node3.input_size(), 0);
EXPECT_EQ(node3.attr_size(), attrs.size());
for (size_t i = 0; i < attrs.size(); ++i) {
EXPECT_EQ(attrs[i].second.type(), node3.attr().at(attrs[i].first).type());
}
}
constexpr char kCondGraphProto[] = R"proto(
node {
name: "StatefulPartitionedCall"
op: "StatefulPartitionedCall"
attr {
key: "Tin"
value { list {} }
}
attr {
key: "Tout"
value { list { type: DT_BOOL } }
}
attr {
key: "_gradient_op_type"
value { s: "PartitionedCall-20" }
}
attr {
key: "config"
value { s: "" }
}
attr {
key: "config_proto"
value { s: "" }
}
attr {
key: "executor_type"
value { s: "" }
}
attr {
key: "f"
value { func { name: "__inference_test_function_19" } }
}
}
library {
function {
signature {
name: "cond_true_3"
input_arg { name: "identity_const" type: DT_BOOL }
output_arg { name: "identity_1" type: DT_BOOL }
}
node_def { name: "NoOp" op: "NoOp" }
node_def {
name: "Identity"
op: "Identity"
input: "identity_const"
input: "^NoOp"
attr {
key: "T"
value { type: DT_BOOL }
}
}
node_def {
name: "Identity_1"
op: "Identity"
input: "Identity:output:0"
attr {
key: "T"
value { type: DT_BOOL }
}
}
ret { key: "identity_1" value: "Identity_1:output:0" }
}
function {
signature {
name: "cond_false_4"
input_arg { name: "identity_const" type: DT_BOOL }
output_arg { name: "identity_1" type: DT_BOOL }
is_stateful: true
}
node_def {
name: "Assert/Const"
op: "Const"
attr {
key: "dtype"
value { type: DT_STRING }
}
attr {
key: "value"
value {
tensor {
dtype: DT_STRING
tensor_shape {}
string_val: "Wrong branch!!!"
}
}
}
}
node_def {
name: "Assert/Assert/condition"
op: "Const"
attr {
key: "dtype"
value { type: DT_BOOL }
}
attr {
key: "value"
value {
tensor {
dtype: DT_BOOL
tensor_shape {}
bool_val: false
}
}
}
}
node_def {
name: "Assert/Assert/data_0"
op: "Const"
attr {
key: "dtype"
value { type: DT_STRING }
}
attr {
key: "value"
value {
tensor {
dtype: DT_STRING
tensor_shape {}
string_val: "Wrong branch!!!"
}
}
}
}
node_def {
name: "Assert/Assert"
op: "Assert"
input: "Assert/Assert/condition:output:0"
input: "Assert/Assert/data_0:output:0"
attr {
key: "T"
value { list { type: DT_STRING } }
}
attr {
key: "summarize"
value { i: 3 }
}
}
node_def {
name: "Identity"
op: "Identity"
input: "identity_const"
input: "^Assert/Assert"
attr {
key: "T"
value { type: DT_BOOL }
}
}
node_def {
name: "Identity_1"
op: "Identity"
input: "Identity:output:0"
input: "^Assert/Assert"
attr {
key: "T"
value { type: DT_BOOL }
}
}
ret { key: "identity_1" value: "Identity_1:output:0" }
}
function {
signature {
name: "__inference_test_function_19"
output_arg { name: "identity" type: DT_BOOL }
is_stateful: true
}
node_def {
name: "Const"
op: "Const"
attr {
key: "dtype"
value { type: DT_BOOL }
}
attr {
key: "value"
value {
tensor {
dtype: DT_BOOL
tensor_shape {}
bool_val: true
}
}
}
}
node_def {
name: "cond"
op: "If"
input: "Const:output:0"
input: "Const:output:0"
attr {
key: "Tcond"
value { type: DT_BOOL }
}
attr {
key: "Tin"
value { list { type: DT_BOOL } }
}
attr {
key: "Tout"
value { list { type: DT_BOOL } }
}
attr {
key: "_lower_using_switch_merge"
value { b: true }
}
attr {
key: "else_branch"
value { func { name: "cond_false_4" } }
}
attr {
key: "output_shapes"
value { list { shape {} } }
}
attr {
key: "then_branch"
value { func { name: "cond_true_3" } }
}
}
node_def {
name: "cond/Identity"
op: "Identity"
input: "cond:output:0"
attr {
key: "T"
value { type: DT_BOOL }
}
}
node_def {
name: "Identity"
op: "Identity"
input: "cond/Identity:output:0"
input: "^cond"
attr {
key: "T"
value { type: DT_BOOL }
}
}
ret { key: "identity" value: "Identity:output:0" }
}
}
versions { producer: 27 min_consumer: 12 })proto";
constexpr char kWhileGraphProto[] = R"proto(
node {
name: "StatefulPartitionedCall"
op: "StatefulPartitionedCall"
attr {
key: "Tin"
value { list {} }
}
attr {
key: "Tout"
value { list { type: DT_INT32 } }
}
attr {
key: "_gradient_op_type"
value { s: "PartitionedCall-35" }
}
attr {
key: "config"
value { s: "" }
}
attr {
key: "config_proto"
value { s: "" }
}
attr {
key: "executor_type"
value { s: "" }
}
attr {
key: "f"
value { func { name: "__inference_test_function_34" } }
}
}
library {
function {
signature {
name: "while_body_5"
input_arg { name: "while_loop_counter" type: DT_INT32 }
input_arg { name: "const" type: DT_INT32 }
input_arg { name: "maximum_iterations" type: DT_INT32 }
output_arg { name: "identity" type: DT_INT32 }
output_arg { name: "identity_1" type: DT_INT32 }
output_arg { name: "identity_2" type: DT_INT32 }
}
node_def {
name: "add/y"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT32 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {}
int_val: 1
}
}
}
}
node_def {
name: "add"
op: "Add"
input: "const"
input: "add/y:output:0"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "add_1/y"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT32 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {}
int_val: 1
}
}
}
}
node_def {
name: "add_1"
op: "Add"
input: "while_loop_counter"
input: "add_1/y:output:0"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "Identity"
op: "Identity"
input: "add_1:z:0"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "Identity_1"
op: "Identity"
input: "add:z:0"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "Identity_2"
op: "Identity"
input: "maximum_iterations"
attr {
key: "T"
value { type: DT_INT32 }
}
}
ret { key: "identity" value: "Identity:output:0" }
ret { key: "identity_1" value: "Identity_1:output:0" }
ret { key: "identity_2" value: "Identity_2:output:0" }
}
function {
signature {
name: "__inference_test_function_34"
output_arg { name: "identity" type: DT_INT32 }
is_stateful: true
}
node_def {
name: "maximum_iterations"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT32 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {}
int_val: 1
}
}
}
}
node_def {
name: "Const"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT32 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {}
int_val: 0
}
}
}
}
node_def {
name: "while/loop_counter"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT32 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {}
int_val: 0
}
}
}
}
node_def {
name: "while"
op: "While"
input: "while/loop_counter:output:0"
input: "Const:output:0"
input: "maximum_iterations:output:0"
attr {
key: "T"
value { list { type: DT_INT32 type: DT_INT32 type: DT_INT32 } }
}
attr {
key: "_lower_using_switch_merge"
value { b: true }
}
attr {
key: "body"
value { func { name: "while_body_5" } }
}
attr {
key: "cond"
value { func { name: "while_cond_4" } }
}
attr {
key: "output_shapes"
value {
list {
shape {}
shape {}
shape {}
}
}
}
}
node_def {
name: "while/Identity"
op: "Identity"
input: "while:output:0"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "while/Identity_1"
op: "Identity"
input: "while:output:1"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "while/Identity_2"
op: "Identity"
input: "while:output:2"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "Identity"
op: "Identity"
input: "while/Identity_1:output:0"
input: "^while"
attr {
key: "T"
value { type: DT_INT32 }
}
}
ret { key: "identity" value: "Identity:output:0" }
}
function {
signature {
name: "while_cond_4"
input_arg { name: "while_loop_counter" type: DT_INT32 }
input_arg { name: "const" type: DT_INT32 }
input_arg { name: "less_maximum_iterations" type: DT_INT32 }
output_arg { name: "identity" type: DT_BOOL }
}
node_def {
name: "Less"
op: "Less"
input: "while_loop_counter"
input: "less_maximum_iterations"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "Less_1/y"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT32 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {}
int_val: 3
}
}
}
}
node_def {
name: "Less_1"
op: "Less"
input: "const"
input: "Less_1/y:output:0"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "LogicalAnd"
op: "LogicalAnd"
input: "Less:z:0"
input: "Less_1:z:0"
}
node_def {
name: "Identity"
op: "Identity"
input: "LogicalAnd:z:0"
attr {
key: "T"
value { type: DT_BOOL }
}
}
ret { key: "identity" value: "Identity:output:0" }
}
}
versions { producer: 27 min_consumer: 12 })proto";
TEST(FunctionUtilsTest, IsFunctionStateful) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* nodeA = graph_utils::AddNode("", "A", {}, {}, &graph);
FunctionDef* function = graph_def.mutable_library()->add_function();
*function = test::function::XTimesTwo();
FunctionLibraryDefinition lib_def(OpRegistry::Global(),
*graph_def.mutable_library());
EXPECT_FALSE(IsFunctionStateful(lib_def, *function));
EXPECT_TRUE(IsNodeStateful(lib_def, *nodeA));
GraphDef graph_def_cond;
protobuf::TextFormat::ParseFromString(kCondGraphProto, &graph_def_cond);
FunctionLibraryDefinition cond_lib(OpRegistry::Global(),
graph_def_cond.library());
const FunctionDef* no_op_fnc = cond_lib.Find("cond_true_3");
EXPECT_FALSE(IsFunctionStateful(cond_lib, *no_op_fnc));
EXPECT_FALSE(IsFunctionStateful(cond_lib, *no_op_fnc, true));
const FunctionDef* assert_func = cond_lib.Find("cond_false_4");
EXPECT_TRUE(IsFunctionStateful(cond_lib, *assert_func));
EXPECT_FALSE(IsFunctionStateful(cond_lib, *assert_func, true));
EXPECT_TRUE(ContainsFunctionNodeWithOp("Const", *assert_func));
EXPECT_TRUE(ContainsFunctionNodeWithOp("Assert", *assert_func));
for (auto node : assert_func->node_def()) {
if (node.op() == "Const") {
EXPECT_FALSE(IsNodeStateful(lib_def, node));
}
if (node.op() == "Assert") {
EXPECT_TRUE(IsNodeStateful(lib_def, node));
EXPECT_FALSE(IsNodeStateful(lib_def, node, true));
}
}
const FunctionDef* cond_func = cond_lib.Find("__inference_test_function_19");
EXPECT_TRUE(IsFunctionStateful(cond_lib, *cond_func));
EXPECT_FALSE(IsFunctionStateful(cond_lib, *cond_func, true));
GraphDef graph_def_while;
protobuf::TextFormat::ParseFromString(kWhileGraphProto, &graph_def_while);
FunctionLibraryDefinition while_lib(OpRegistry::Global(),
graph_def_while.library());
const FunctionDef* while_function =
while_lib.Find("__inference_test_function_34");
EXPECT_FALSE(IsFunctionStateful(while_lib, *while_function));
EXPECT_FALSE(IsFunctionStateful(while_lib, *while_function, true));
}
}
}
}
} |
1,426 | cpp | tensorflow/tensorflow | replicate_on_split | tensorflow/core/grappler/optimizers/data/replicate_on_split.cc | tensorflow/core/grappler/optimizers/data/replicate_on_split_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_REPLICATE_ON_SPLIT_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_REPLICATE_ON_SPLIT_H_
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
namespace tensorflow {
namespace grappler {
class ReplicateOnSplit : public TFDataOptimizerBase {
public:
ReplicateOnSplit() = default;
~ReplicateOnSplit() override = default;
string name() const override { return "replicate_on_split"; }
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
Status OptimizeAndCollectStats(Cluster* cluster, const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) override;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/replicate_on_split.h"
#include "absl/log/log.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
namespace tensorflow {
namespace grappler {
Status ReplicateOnSplit::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
VLOG(1) << "Running replicate on split optimization";
*output = item.graph;
MutableGraphView graph(output);
for (NodeDef& node : *output->mutable_node()) {
if (graph_utils::HasReplicateOnSplitAttr(node.op())) {
(*node.mutable_attr())["replicate_on_split"].set_b(true);
stats->num_changes++;
}
}
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(ReplicateOnSplit, "replicate_on_split");
}
} | #include "tensorflow/core/grappler/optimizers/data/replicate_on_split.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
namespace tensorflow {
namespace grappler {
namespace {
TEST(ReplicateOnSplit, TensorSliceDataset) {
using test::function::NDef;
GrapplerItem item;
Tensor tensor = test::AsTensor<int32>({32, 32});
item.graph = test::function::GDef(
{NDef("tensor", "Const", {}, {{"value", tensor}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeTensorSliceNode("tensor_slice_dataset", "tensor",
false)});
ReplicateOnSplit optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(
graph_utils::ContainsGraphNodeWithName("tensor_slice_dataset", output));
int index =
graph_utils::FindGraphNodeWithName("tensor_slice_dataset", output);
EXPECT_TRUE(output.node(index).attr().at("replicate_on_split").b());
}
}
}
} |
1,427 | cpp | tensorflow/tensorflow | use_private_thread_pool | tensorflow/core/grappler/optimizers/data/use_private_thread_pool.cc | tensorflow/core/grappler/optimizers/data/use_private_thread_pool_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_USE_PRIVATE_THREAD_POOL_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_USE_PRIVATE_THREAD_POOL_H_
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
namespace tensorflow {
namespace grappler {
class UsePrivateThreadPool : public TFDataOptimizerBase {
public:
UsePrivateThreadPool() = default;
~UsePrivateThreadPool() override = default;
string name() const override { return "use_private_thread_pool"; };
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
Status OptimizeAndCollectStats(Cluster* cluster, const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) override;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/use_private_thread_pool.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kPrivateThreadPoolDataset[] = "PrivateThreadPoolDataset";
constexpr char kModelDataset[] = "ModelDataset";
}
Status UsePrivateThreadPool::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
MutableGraphView graph(output);
if (graph_utils::IsItemDerivedFromFunctionDef(item, graph))
return absl::OkStatus();
if (item.fetch.size() != 1) {
return errors::InvalidArgument(
"Expected only one fetch node but there were ", item.fetch.size(), ": ",
absl::StrJoin(item.fetch, ", "));
}
for (const NodeDef& node : item.graph.node()) {
if (node.op() == kPrivateThreadPoolDataset) {
return absl::OkStatus();
}
}
NodeDef* sink_node = graph.GetNode(item.fetch.at(0));
NodeDef* last_node = graph_utils::GetInputNode(*sink_node, graph);
if (last_node->op() == kModelDataset) {
last_node = graph_utils::GetInputNode(*last_node, graph);
}
NodeDef* num_threads_value =
graph_utils::AddScalarConstNode(int64_t{0}, &graph);
NodeDef insert_node;
graph_utils::SetUniqueGraphNodeName("private_thread_pool", graph.graph(),
&insert_node);
insert_node.set_op(kPrivateThreadPoolDataset);
*insert_node.mutable_input()->Add() = last_node->name();
*insert_node.mutable_input()->Add() = num_threads_value->name();
if (!graph_utils::CopyShapesAndTypesAttrs(*last_node, &insert_node))
return absl::OkStatus();
auto* added_node = graph.AddNode(std::move(insert_node));
TF_RETURN_IF_ERROR(
graph.UpdateFanouts(last_node->name(), added_node->name()));
stats->num_changes++;
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(UsePrivateThreadPool, "use_private_thread_pool");
}
} | #include "tensorflow/core/grappler/optimizers/data/use_private_thread_pool.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
using test::function::NDef;
class ThreadPoolOpAlreadySetTest : public ::testing::TestWithParam<int64_t> {};
TEST_P(ThreadPoolOpAlreadySetTest, PrivateThreadPool) {
const int64_t num_of_threads = GetParam();
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef *start_val = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_val = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_val = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_val->name();
range_inputs[1] = stop_val->name();
range_inputs[2] = step_val->name();
std::vector<std::pair<string, AttrValue>> range_attrs;
NodeDef *range_node = graph_utils::AddNode("range", "RangeDataset",
range_inputs, range_attrs, &graph);
NodeDef *num_of_threads_val =
graph_utils::AddScalarConstNode<int64_t>(num_of_threads, &graph);
std::vector<string> private_threads_inputs(2);
private_threads_inputs[0] = range_node->name();
private_threads_inputs[1] = num_of_threads_val->name();
std::vector<std::pair<string, AttrValue>> private_threads_attrs;
NodeDef *private_threads_node = graph_utils::AddNode(
"private_thread_pool", "PrivateThreadPoolDataset", private_threads_inputs,
private_threads_attrs, &graph);
std::vector<string> sink_inputs(1);
sink_inputs[0] = private_threads_node->name();
std::vector<std::pair<string, AttrValue>> sink_attrs;
NodeDef *sink_node =
graph_utils::AddNode("Sink", "Identity", sink_inputs, sink_attrs, &graph);
item.fetch.push_back(sink_node->name());
EXPECT_TRUE(
graph_utils::ContainsNodeWithOp("PrivateThreadPoolDataset", item.graph));
EXPECT_EQ(item.graph.node_size(), 7);
EXPECT_EQ(num_of_threads_val->attr().at("value").tensor().int64_val(0),
num_of_threads);
UsePrivateThreadPool optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(output.node_size(), 7);
EXPECT_TRUE(
graph_utils::ContainsNodeWithOp("PrivateThreadPoolDataset", output));
NodeDef new_private_threads_node = output.node(
graph_utils::FindGraphNodeWithOp("PrivateThreadPoolDataset", output));
NodeDef new_num_of_threads_val =
output.node(graph_utils::FindGraphNodeWithName(
new_private_threads_node.input(1), output));
EXPECT_EQ(new_num_of_threads_val.attr().at("value").tensor().int64_val(0),
num_of_threads);
}
INSTANTIATE_TEST_SUITE_P(Test, ThreadPoolOpAlreadySetTest,
::testing::Values(1, 2, 4));
class ThreadPoolOpNotSetTest : public ::testing::TestWithParam<string> {};
TEST_P(ThreadPoolOpNotSetTest, PrivateThreadPool) {
const string op = GetParam();
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("Sink", op, {"range"}, {})});
EXPECT_FALSE(
graph_utils::ContainsNodeWithOp("PrivateThreadPoolDataset", item.graph));
EXPECT_EQ(item.graph.node_size(), 5);
item.fetch.push_back("Sink_fake");
UsePrivateThreadPool optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(
graph_utils::ContainsNodeWithOp("PrivateThreadPoolDataset", output));
EXPECT_EQ(output.node_size(), 5);
item.fetch[0] = "Sink";
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
if (op == "_Retval") {
EXPECT_FALSE(
graph_utils::ContainsNodeWithOp("PrivateThreadPoolDataset", output));
EXPECT_EQ(output.node_size(), 5);
return;
}
EXPECT_EQ(output.node_size(), 7);
EXPECT_TRUE(
graph_utils::ContainsNodeWithOp("PrivateThreadPoolDataset", output));
NodeDef sink_node =
output.node(graph_utils::FindGraphNodeWithName("Sink", output));
EXPECT_EQ(sink_node.input_size(), 1);
NodeDef private_threads_node = output.node(
graph_utils::FindGraphNodeWithName(sink_node.input(0), output));
EXPECT_EQ(private_threads_node.op(), "PrivateThreadPoolDataset");
EXPECT_EQ(private_threads_node.input_size(), 2);
NodeDef range_node = output.node(graph_utils::FindGraphNodeWithName(
private_threads_node.input(0), output));
EXPECT_EQ(range_node.name(), "range");
NodeDef num_of_threads_val = output.node(graph_utils::FindGraphNodeWithName(
private_threads_node.input(1), output));
EXPECT_EQ(num_of_threads_val.attr().at("value").tensor().int64_val(0), 0);
}
INSTANTIATE_TEST_SUITE_P(Test, ThreadPoolOpNotSetTest,
::testing::Values("Identity", "_Retval"));
TEST(AutotuneWithModelTest, PrivateThreadPool) {
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("model", "ModelDataset", {"range"}, {}),
NDef("Sink", "Identity", {"model"}, {})});
EXPECT_FALSE(
graph_utils::ContainsNodeWithOp("PrivateThreadPoolDataset", item.graph));
EXPECT_EQ(item.graph.node_size(), 6);
item.fetch.push_back("Sink");
UsePrivateThreadPool optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(output.node_size(), 8);
EXPECT_TRUE(
graph_utils::ContainsNodeWithOp("PrivateThreadPoolDataset", output));
NodeDef sink_node =
output.node(graph_utils::FindGraphNodeWithName("Sink", output));
EXPECT_EQ(sink_node.input_size(), 1);
NodeDef model_node = output.node(
graph_utils::FindGraphNodeWithName(sink_node.input(0), output));
EXPECT_EQ(model_node.op(), "ModelDataset");
EXPECT_EQ(model_node.input_size(), 1);
NodeDef private_threads_node = output.node(
graph_utils::FindGraphNodeWithName(model_node.input(0), output));
EXPECT_EQ(private_threads_node.op(), "PrivateThreadPoolDataset");
EXPECT_EQ(private_threads_node.input_size(), 2);
NodeDef range_node = output.node(graph_utils::FindGraphNodeWithName(
private_threads_node.input(0), output));
EXPECT_EQ(range_node.name(), "range");
NodeDef num_of_threads_val = output.node(graph_utils::FindGraphNodeWithName(
private_threads_node.input(1), output));
EXPECT_EQ(num_of_threads_val.attr().at("value").tensor().int64_val(0), 0);
}
}
}
} |
1,428 | cpp | tensorflow/tensorflow | fusion_utils | tensorflow/core/grappler/optimizers/data/fusion_utils.cc | tensorflow/core/grappler/optimizers/data/fusion_utils_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_FUSION_UTILS_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_FUSION_UTILS_H_
#include <functional>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace fusion_utils {
using SetFunctionSignatureFn = std::function<void(
const OpDef& first_function_signature,
const OpDef& second_function_signature, OpDef* fused_function_signature)>;
using StringCollection = gtl::InlinedVector<string, 2>;
using SetInputFn =
std::function<string(const StringCollection& first_function_inputs,
const StringCollection& second_function_inputs,
const StringCollection& parent_outputs, int arg_num)>;
using SetOutputFn =
std::function<void(const protobuf::Map<string, string>& parent_ret,
const protobuf::Map<string, string>& second_function_ret,
protobuf::Map<string, string>* fused_ret)>;
using SetNodesFn = std::function<void(
const FunctionDef& first_function, const FunctionDef& second_function,
FunctionDef* fused_function, FunctionDefLibrary* library)>;
void MergeNodes(const FunctionDef& first_function,
const FunctionDef& second_function, FunctionDef* fused_function,
FunctionDefLibrary* library);
bool CanCompose(const OpDef& first_signature, const OpDef& second_signature);
void ComposeSignature(const OpDef& first_signature,
const OpDef& second_signature, OpDef* fused_signature);
string ComposeInput(const StringCollection& first_inputs,
const StringCollection& second_inputs,
const StringCollection& first_outputs, int arg_num);
void ComposeOutput(const protobuf::Map<string, string>& first_ret,
const protobuf::Map<string, string>& second_ret,
protobuf::Map<string, string>* fused_ret);
void CombineSignature(const OpDef& first_signature,
const OpDef& second_signature, OpDef* fused_signature);
void CombineOutput(const protobuf::Map<string, string>& first_ret,
const protobuf::Map<string, string>& second_ret,
protobuf::Map<string, string>* fused_ret);
bool HasSameSignature(const OpDef& first_signature,
const OpDef& second_signature);
void SameSignature(const OpDef& first_signature, const OpDef& second_signature,
OpDef* fused_signature);
string SameInput(const StringCollection& first_inputs,
const StringCollection& second_inputs,
const StringCollection& first_outputs, int arg_num);
void LazyConjunctionOutput(const protobuf::Map<string, string>& first_ret,
const protobuf::Map<string, string>& second_ret,
protobuf::Map<string, string>* fused_ret);
void LazyConjunctionNodes(const FunctionDef& first_function,
const FunctionDef& second_function,
FunctionDef* fused_function,
FunctionDefLibrary* library);
FunctionDef* FuseFunctions(
const FunctionDef& first_function, const FunctionDef& second_function,
StringPiece fused_name_prefix, const SetFunctionSignatureFn& set_signature,
const SetInputFn& set_input, const SetOutputFn& set_output,
const SetNodesFn& set_nodes, FunctionDefLibrary* library);
}
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/fusion_utils.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/strip.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/gtl/flatmap.h"
#include "tensorflow/core/lib/gtl/flatset.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace grappler {
namespace fusion_utils {
namespace {
constexpr char kControlInputPrefix[] = "^";
bool IsControlInput(const string& node_input) {
return absl::StartsWith(node_input, kControlInputPrefix);
}
string StripControlInputNotation(const string& node_input) {
return string(absl::StripPrefix(node_input, kControlInputPrefix));
}
string AddControlInputNotation(const string& node_input) {
return absl::StrCat(kControlInputPrefix, node_input);
}
string ParseNodeConnection(const string& name) {
return name.substr(0, name.find(':'));
}
string ParseOutputNode(const string& name) {
if (name.find(':') == string::npos) return {};
return name.substr(name.find(':'), string::npos);
}
string GetOutputNode(const FunctionDef& function, int output_idx) {
const auto& ret_output_name =
function.signature().output_arg(output_idx).name();
return function.ret().at(ret_output_name);
}
string& GetMutableOutputNode(FunctionDef* function, int output_idx) {
const auto& ret_output_name =
function->signature().output_arg(output_idx).name();
return function->mutable_ret()->at(ret_output_name);
}
template <typename Iterable>
StringCollection GetNames(const Iterable& iterable, int allocate_size) {
StringCollection names;
names.reserve(allocate_size);
for (auto& arg : iterable) names.push_back(arg.name());
return names;
}
template <typename Iterable>
gtl::FlatSet<string> GetNodeNamesSet(const Iterable& nodes) {
gtl::FlatSet<string> names;
for (const auto& node : nodes) {
CHECK(gtl::InsertIfNotPresent(&names, node.name()))
<< "Functions should have unique node names. Node with name "
<< node.name() << " already exists";
}
return names;
}
template <typename Iterable>
gtl::FlatMap<string, string> GetUniqueNames(const Iterable& first_iterable,
const Iterable& second_iterable) {
gtl::FlatMap<string, string> changed_node_names;
const auto first_names = GetNodeNamesSet(first_iterable);
auto second_names = GetNodeNamesSet(first_iterable);
int id = second_iterable.size();
for (const auto& node : second_iterable) {
string name_before = node.name();
string name = name_before;
bool changed_name = false;
while (first_names.count(name) ||
(changed_name && second_names.count(name))) {
name = strings::StrCat(name_before, "/_", id);
changed_name = true;
++id;
}
if (changed_name) {
changed_node_names[name_before] = name;
second_names.insert(std::move(name));
}
}
return changed_node_names;
}
void RenameFunctionNodes(
const FunctionDef& first_function,
protobuf::RepeatedPtrField<NodeDef>* nodes_to_fuse,
protobuf::Map<string, string>* rets_to_fuse,
protobuf::Map<string, string>* control_rets_to_fuse,
protobuf::RepeatedPtrField<string>* control_outputs_to_fuse) {
const gtl::FlatMap<string, string> changed_node_names =
GetUniqueNames(first_function.node_def(), *nodes_to_fuse);
auto updated_name = [&changed_node_names](const string& input) {
string input_node = ParseNodeConnection(input);
auto iter = changed_node_names.find(input_node);
if (iter != changed_node_names.end()) {
return iter->second + ParseOutputNode(input);
}
return input;
};
for (NodeDef& function_node : *nodes_to_fuse) {
if (const string* new_name =
gtl::FindOrNull(changed_node_names, function_node.name())) {
function_node.set_name(*new_name);
}
for (string& input : *function_node.mutable_input()) {
input = updated_name(input);
}
}
for (auto& [unused, ret_node] : *rets_to_fuse) {
ret_node = updated_name(ret_node);
}
protobuf::Map<string, string> new_control_rets_to_fuse;
protobuf::RepeatedPtrField<string> new_control_outputs_to_fuse;
for (const auto& [unused, control_ret_node] : *control_rets_to_fuse) {
string updated_control_ret_node = updated_name(control_ret_node);
new_control_rets_to_fuse.insert(
{updated_control_ret_node, updated_control_ret_node});
*new_control_outputs_to_fuse.Add() = updated_control_ret_node;
}
*control_rets_to_fuse = new_control_rets_to_fuse;
*control_outputs_to_fuse = new_control_outputs_to_fuse;
}
StringCollection GetFunctionInputs(const FunctionDef& function) {
return GetNames(function.signature().input_arg(),
function.signature().input_arg_size());
}
OpDef GetUniqueSignature(const OpDef& first_signature,
const OpDef& second_signature,
protobuf::Map<string, string>* rets_to_fuse,
protobuf::Map<string, string>* control_rets_to_fuse,
protobuf::RepeatedPtrField<NodeDef>* nodes_to_fuse) {
const gtl::FlatMap<string, string> changed_input_names =
GetUniqueNames(first_signature.input_arg(), second_signature.input_arg());
OpDef signature;
signature.set_name(second_signature.name());
for (const auto& input_arg : second_signature.input_arg()) {
auto& input = *signature.add_input_arg();
input = input_arg;
if (const string* new_name =
gtl::FindOrNull(changed_input_names, input.name())) {
input.set_name(*new_name);
}
}
const gtl::FlatMap<string, string> changed_output_names = GetUniqueNames(
first_signature.output_arg(), second_signature.output_arg());
for (const auto& output_arg : second_signature.output_arg()) {
auto& output = *signature.add_output_arg();
output = output_arg;
if (const string* new_name =
gtl::FindOrNull(changed_output_names, output.name())) {
output.set_name(*new_name);
}
}
auto new_rets = [&](const protobuf::Map<string, string>& old_rets) {
protobuf::Map<string, string> new_rets;
for (const auto& ret : old_rets) {
const auto& key = changed_output_names.count(ret.first)
? changed_output_names.at(ret.first)
: ret.first;
const auto& input = ParseNodeConnection(ret.second);
const auto& value =
changed_input_names.count(input)
? changed_input_names.at(input) + ParseOutputNode(ret.second)
: ret.second;
new_rets[key] = value;
}
return new_rets;
};
*rets_to_fuse = new_rets(*rets_to_fuse);
*control_rets_to_fuse = new_rets(*control_rets_to_fuse);
for (NodeDef& function_node : *nodes_to_fuse) {
for (auto& node_input : *function_node.mutable_input()) {
bool is_control_input = IsControlInput(node_input);
const auto& input =
ParseNodeConnection(StripControlInputNotation(node_input));
if (const string* new_name =
gtl::FindOrNull(changed_input_names, input)) {
node_input = *new_name + ParseOutputNode(node_input);
if (is_control_input) {
node_input = AddControlInputNotation(node_input);
}
}
}
}
if (second_signature.is_stateful()) {
signature.set_is_stateful(true);
}
return signature;
}
void FuseFunctionNodes(const StringCollection& first_inputs,
const StringCollection& second_inputs,
const StringCollection& first_outputs,
const SetInputFn& set_input,
protobuf::RepeatedPtrField<NodeDef>* nodes_to_fuse) {
for (NodeDef& function_node : *nodes_to_fuse) {
for (auto& node_input : *function_node.mutable_input()) {
bool is_control_input = IsControlInput(node_input);
auto parsed_name =
ParseNodeConnection(StripControlInputNotation(node_input));
auto input_it =
std::find(second_inputs.begin(), second_inputs.end(), parsed_name);
if (input_it == second_inputs.end()) continue;
auto arg_num = std::distance(second_inputs.begin(), input_it);
node_input =
set_input(first_inputs, second_inputs, first_outputs, arg_num);
if (is_control_input) {
node_input = AddControlInputNotation(node_input);
}
}
}
}
void FuseReturns(const StringCollection& first_inputs,
const StringCollection& second_inputs,
const StringCollection& first_outputs,
const SetInputFn& set_input,
protobuf::Map<string, string>* fused_ret) {
for (auto& ret : *fused_ret) {
auto return_input = ParseNodeConnection(ret.second);
auto input_it =
std::find(second_inputs.begin(), second_inputs.end(), return_input);
if (input_it == second_inputs.end()) continue;
auto input_idx = std::distance(second_inputs.begin(), input_it);
ret.second =
set_input(first_inputs, second_inputs, first_outputs, input_idx);
}
}
StringCollection GetFunctionOutputs(const FunctionDef& function) {
const auto number_of_outputs = function.signature().output_arg_size();
StringCollection outputs;
outputs.reserve(number_of_outputs);
for (int output_idx = 0; output_idx < number_of_outputs; output_idx++)
outputs.push_back(GetOutputNode(function, output_idx));
return outputs;
}
FunctionDef* CreateFalsePredicate(
const protobuf::RepeatedPtrField<OpDef_ArgDef>& fake_args,
FunctionDefLibrary* library) {
GraphDef graph;
MutableGraphView graph_view(&graph);
auto* node = graph_utils::AddScalarConstNode(false, &graph_view);
auto* false_predicate = library->add_function();
graph_utils::SetUniqueGraphFunctionName("false_predicate", library,
false_predicate);
int num = 0;
for (const auto& fake_arg : fake_args) {
auto* arg = false_predicate->mutable_signature()->add_input_arg();
arg->set_type(fake_arg.type());
arg->set_name(strings::StrCat("fake_arg", num));
num++;
}
auto* output = false_predicate->mutable_signature()->add_output_arg();
output->set_name("false_out");
output->set_type(DT_BOOL);
(*false_predicate->mutable_ret())["false_out"] = node->name() + ":output:0";
*false_predicate->mutable_node_def() = std::move(*graph.mutable_node());
return false_predicate;
}
void CheckIfCanCompose(const OpDef& first_signature,
const OpDef& second_signature) {
CHECK(CanCompose(first_signature, second_signature))
<< "The number of input arguments of function " << second_signature.name()
<< " should be the same as the number of output arguments of function "
<< first_signature.name() << ".";
}
}
void MergeNodes(const FunctionDef& first_function,
const FunctionDef& second_function, FunctionDef* fused_function,
FunctionDefLibrary* library) {
fused_function->mutable_node_def()->CopyFrom(first_function.node_def());
fused_function->mutable_node_def()->MergeFrom(second_function.node_def());
}
bool CanCompose(const OpDef& first_signature, const OpDef& second_signature) {
return first_signature.output_arg_size() == second_signature.input_arg_size();
}
string ComposeInput(const StringCollection& first_inputs,
const StringCollection& second_inputs,
const StringCollection& first_outputs, int arg_num) {
return first_outputs.at(arg_num);
}
void ComposeSignature(const OpDef& first_signature,
const OpDef& second_signature, OpDef* fused_signature) {
CheckIfCanCompose(first_signature, second_signature);
*fused_signature->mutable_input_arg() = first_signature.input_arg();
*fused_signature->mutable_output_arg() = second_signature.output_arg();
if (first_signature.is_stateful() || second_signature.is_stateful()) {
if (!(first_signature.is_stateful() && second_signature.is_stateful())) {
metrics::RecordTFDataDebug("fused_with_mixed_statefulness");
}
fused_signature->set_is_stateful(true);
}
fused_signature->mutable_control_output()->Add(
first_signature.control_output().begin(),
first_signature.control_output().end());
fused_signature->mutable_control_output()->Add(
second_signature.control_output().begin(),
second_signature.control_output().end());
}
void ComposeOutput(const protobuf::Map<string, string>& first_ret,
const protobuf::Map<string, string>& second_ret,
protobuf::Map<string, string>* fused_ret) {
*fused_ret = second_ret;
}
void CombineSignature(const OpDef& first_signature,
const OpDef& second_signature, OpDef* fused_signature) {
CheckIfCanCompose(first_signature, second_signature);
*fused_signature = first_signature;
fused_signature->mutable_output_arg()->MergeFrom(
second_signature.output_arg());
}
void CombineOutput(const protobuf::Map<string, string>& first_ret,
const protobuf::Map<string, string>& second_ret,
protobuf::Map<string, string>* fused_ret) {
*fused_ret = first_ret;
fused_ret->insert(second_ret.begin(), second_ret.end());
}
string SameInput(const StringCollection& first_inputs,
const StringCollection& second_inputs,
const StringCollection& first_outputs, int arg_num) {
return first_inputs.at(arg_num);
}
bool HasSameSignature(const OpDef& first_signature,
const OpDef& second_signature) {
return first_signature.input_arg_size() ==
second_signature.input_arg_size() &&
first_signature.output_arg_size() ==
second_signature.output_arg_size();
}
void SameSignature(const OpDef& first_signature, const OpDef& second_signature,
OpDef* fused_signature) {
CHECK(HasSameSignature(first_signature, second_signature))
<< "Functions do not have the same signature";
*fused_signature = first_signature;
}
void LazyConjunctionNodes(const FunctionDef& first_function,
const FunctionDef& second_function,
FunctionDef* fused_function,
FunctionDefLibrary* library) {
fused_function->mutable_node_def()->CopyFrom(first_function.node_def());
NodeDefBuilder if_builder("", "If");
if_builder.Input(GetOutputNode(first_function, 0), 0, DT_BOOL);
DataTypeVector in_arg_types;
std::vector<NodeDefBuilder::NodeOut> inputs;
for (const auto& input_arg : first_function.signature().input_arg()) {
inputs.push_back({input_arg.name(), 0, input_arg.type()});
in_arg_types.push_back(input_arg.type());
}
if_builder.Attr("Tin", in_arg_types);
if_builder.Attr("Tcond", DT_BOOL);
if_builder.Attr("Tout", DataTypeVector{DT_BOOL});
if_builder.Attr("_lower_using_switch_merge", true);
NameAttrList then_branch;
then_branch.set_name(second_function.signature().name());
if_builder.Attr("then_branch", then_branch);
auto* false_predicate =
CreateFalsePredicate(first_function.signature().input_arg(), library);
NameAttrList else_branch;
else_branch.set_name(false_predicate->signature().name());
if_builder.Attr("else_branch", else_branch);
if_builder.Input(inputs);
auto* if_node = fused_function->add_node_def();
TF_CHECK_OK(if_builder.Finalize(if_node));
function_utils::SetUniqueFunctionNodeName("cond", fused_function, if_node);
GetMutableOutputNode(fused_function, 0) = if_node->name() + ":output:0";
}
void LazyConjunctionOutput(const protobuf::Map<string, string>& first_ret,
const protobuf::Map<string, string>& second_ret,
protobuf::Map<string, string>* fused_ret) {
CHECK_EQ(first_ret.size(), 1);
CHECK_EQ(second_ret.size(), 1);
*fused_ret = first_ret;
}
FunctionDef* FuseFunctions(
const FunctionDef& first_function, const FunctionDef& second_function,
StringPiece fused_name_prefix, const SetFunctionSignatureFn& set_signature,
const SetInputFn& set_input, const SetOutputFn& set_output,
const SetNodesFn& set_nodes, FunctionDefLibrary* library) {
auto has_unknown_attrs = [](const FunctionDef& func) {
int known_attribute_size = 0;
if (data::IsTFDataFunction(func)) known_attribute_size += 1;
if (func.attr().contains("_construction_context"))
known_attribute_size += 1;
return func.attr_size() > known_attribute_size;
};
if (has_unknown_attrs(first_function) || has_unknown_attrs(second_function)) {
return nullptr;
}
FunctionDef setup_function = second_function;
*setup_function.mutable_signature() = GetUniqueSignature(
first_function.signature(), setup_function.signature(),
setup_function.mutable_ret(), setup_function.mutable_control_ret(),
setup_function.mutable_node_def());
FunctionDef* fused_function = library->add_function();
RenameFunctionNodes(
first_function, setup_function.mutable_node_def(),
setup_function.mutable_ret(), setup_function.mutable_control_ret(),
setup_function.mutable_signature()->mutable_control_output());
set_output(first_function.ret(), setup_function.ret(),
fused_function->mutable_ret());
CombineOutput(first_function.control_ret(), setup_function.control_ret(),
fused_function->mutable_control_ret());
set_signature(first_function.signature(), setup_function.signature(),
fused_function->mutable_signature());
graph_utils::SetUniqueGraphFunctionName(fused_name_prefix, library,
fused_function);
CHECK(fused_function->signature().output_arg_size() ==
fused_function->ret_size())
<< "Fused function must have the same number of returns as output "
"args. Output size: "
<< fused_function->signature().output_arg_size()
<< ", ret size: " << fused_function->ret_size();
const auto first_inputs = GetFunctionInputs(first_function);
const auto second_inputs = GetFunctionInputs(setup_function);
const auto first_outputs = GetFunctionOutputs(first_function);
FuseFunctionNodes(first_inputs, second_inputs, first_outputs, set_input,
setup_function.mutable_node_def());
FuseReturns(first_inputs, second_inputs, first_outputs, set_input,
fused_function->mutable_ret());
set_nodes(first_function, setup_function, fused_function, library);
(*fused_function->mutable_attr())[data::kTFDataFunction].set_b(true);
auto get_construction_context = [](const FunctionDef& func) {
auto iter = func.attr().find("_construction_context");
if (iter == func.attr().cend()) return std::string();
return iter->second.s();
};
std::string first_construction_context =
get_construction_context(first_function);
std::string second_construction_context =
get_construction_context(second_function);
if (first_construction_context != second_construction_context) {
LOG(ERROR) << "_construction_context attribute mismatch during fused "
"function optimization pass. First function: "
<< first_construction_context
<< " Second function: " << first_construction_context;
}
if (!first_construction_context.empty()) {
(*fused_function->mutable_attr())["_construction_context"].set_s(
first_construction_context);
}
return fused_function;
}
}
}
} | #include "tensorflow/core/grappler/optimizers/data/fusion_utils.h"
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace grappler {
namespace fusion_utils {
namespace {
string ParseNodeConnection(const string& name) {
return name.substr(0, name.find(':'));
}
void CheckUniqueNames(const FunctionDef& function) {
std::unordered_set<string> inputs;
for (const auto& input_arg : function.signature().input_arg())
inputs.insert(input_arg.name());
EXPECT_EQ(inputs.size(), function.signature().input_arg_size());
std::unordered_set<string> outputs;
for (const auto& output_arg : function.signature().output_arg())
outputs.insert(output_arg.name());
EXPECT_EQ(outputs.size(), function.signature().output_arg_size());
std::unordered_set<string> nodes;
for (const auto& node : function.node_def()) nodes.insert(node.name());
EXPECT_EQ(nodes.size(), function.node_def_size());
}
TEST(FusionUtilsTest, FuseFunctionsByComposition) {
GraphDef graph;
auto *parent_function = graph.mutable_library()->add_function();
*parent_function = test::function::XTimesTwo();
auto *function = graph.mutable_library()->add_function();
*function = test::function::XTimesTwo();
auto *fused_function = FuseFunctions(
*parent_function, *function, "fused_maps", fusion_utils::ComposeSignature,
fusion_utils::ComposeInput, fusion_utils::ComposeOutput,
fusion_utils::MergeNodes, graph.mutable_library());
EXPECT_EQ(fused_function->signature().name(), "fused_maps");
EXPECT_EQ(fused_function->signature().input_arg_size(), 1);
EXPECT_EQ(fused_function->signature().output_arg_size(), 1);
EXPECT_EQ(fused_function->ret_size(), 1);
std::cerr << fused_function->DebugString();
CheckUniqueNames(*fused_function);
const NodeDef *parent_mul = nullptr, *output_mul = nullptr;
for (const auto& fused_node : fused_function->node_def()) {
if (fused_node.op() == "Mul") {
if (fused_node.name() == "y")
parent_mul = &fused_node;
else
output_mul = &fused_node;
}
}
ASSERT_NE(parent_mul, nullptr);
ASSERT_NE(output_mul, nullptr);
EXPECT_EQ(ParseNodeConnection(output_mul->input(0)), parent_mul->name());
auto output_value = fused_function->ret().at(
fused_function->signature().output_arg(0).name());
EXPECT_EQ(ParseNodeConnection(output_value), output_mul->name());
}
TEST(FusionUtilsTest, FuseFunctionsWithControlInputs) {
GraphDef graph;
auto *parent_function = graph.mutable_library()->add_function();
*parent_function = test::function::XTimesTwoWithControlInput();
auto *function = graph.mutable_library()->add_function();
*function = test::function::XTimesTwoWithControlInput();
auto *fused_function = FuseFunctions(
*parent_function, *function, "fused_maps", fusion_utils::ComposeSignature,
fusion_utils::ComposeInput, fusion_utils::ComposeOutput,
fusion_utils::MergeNodes, graph.mutable_library());
EXPECT_EQ(fused_function->signature().name(), "fused_maps");
EXPECT_EQ(fused_function->signature().input_arg_size(), 1);
EXPECT_EQ(fused_function->signature().output_arg_size(), 1);
EXPECT_EQ(fused_function->ret_size(), 1);
CheckUniqueNames(*fused_function);
const NodeDef *parent_mul = nullptr, *output_mul = nullptr;
for (const auto& fused_node : fused_function->node_def()) {
if (fused_node.op() == "Mul") {
if (fused_node.name() == "y")
parent_mul = &fused_node;
else
output_mul = &fused_node;
}
}
ASSERT_NE(parent_mul, nullptr);
ASSERT_NE(output_mul, nullptr);
EXPECT_EQ(ParseNodeConnection(output_mul->input(1)),
absl::StrCat("^", parent_mul->name()));
auto output_value = fused_function->ret().at(
fused_function->signature().output_arg(0).name());
EXPECT_EQ(ParseNodeConnection(output_value), output_mul->name());
}
TEST(FusionUtilsTest, FuseFunctionWithControlOutputs) {
GraphDef graph;
auto *f1 = graph.mutable_library()->add_function();
*f1 = test::function::XTimesTwoWithControlOutput();
f1->mutable_signature()->set_name("f1");
auto *f2 = graph.mutable_library()->add_function();
*f2 = test::function::XTimesTwoWithControlOutput();
f2->mutable_signature()->set_name("f2");
auto *fused_function =
FuseFunctions(*f1, *f2, "fused_maps", fusion_utils::ComposeSignature,
fusion_utils::ComposeInput, fusion_utils::ComposeOutput,
fusion_utils::MergeNodes, graph.mutable_library());
EXPECT_EQ(fused_function->signature().control_output_size(), 2);
string control_output_1 = fused_function->signature().control_output(0);
string control_output_2 = fused_function->signature().control_output(1);
EXPECT_NE(control_output_1, control_output_2);
EXPECT_EQ(fused_function->control_ret_size(), 2);
EXPECT_TRUE(fused_function->control_ret().contains(control_output_1));
EXPECT_TRUE(fused_function->control_ret().contains(control_output_2));
EXPECT_EQ(fused_function->control_ret().at(control_output_1),
control_output_1);
EXPECT_EQ(fused_function->control_ret().at(control_output_2),
control_output_2);
}
struct StatefulnessTestCase {
bool is_stateful_a, is_stateful_b;
};
using FusionUtilsTest_Statefulness =
::testing::TestWithParam<StatefulnessTestCase>;
TEST_P(FusionUtilsTest_Statefulness, FuseFunctionStatefulness) {
const StatefulnessTestCase &test_case = GetParam();
GraphDef graph;
auto *parent_function = graph.mutable_library()->add_function();
*parent_function = test::function::XTimesTwo();
auto *function = graph.mutable_library()->add_function();
*function = test::function::XTimesTwo();
if (test_case.is_stateful_a) {
parent_function->mutable_signature()->set_is_stateful(true);
}
if (test_case.is_stateful_b) {
function->mutable_signature()->set_is_stateful(true);
}
auto *fused_function = FuseFunctions(
*parent_function, *function, "fused_maps", fusion_utils::ComposeSignature,
fusion_utils::ComposeInput, fusion_utils::ComposeOutput,
fusion_utils::MergeNodes, graph.mutable_library());
EXPECT_EQ(fused_function->signature().is_stateful(),
test_case.is_stateful_a || test_case.is_stateful_b);
}
INSTANTIATE_TEST_SUITE_P(
StatefulnessTests, FusionUtilsTest_Statefulness,
::testing::ValuesIn<StatefulnessTestCase>(
{{false, false}, {false, true}, {true, false}, {true, true}}));
TEST(FusionUtilsTest, FuseFunctionWithPredicate) {
GraphDef graph;
auto *xtimes_two = graph.mutable_library()->add_function();
*xtimes_two = test::function::XTimesTwo();
auto *is_zero = graph.mutable_library()->add_function();
*is_zero = test::function::IsZero();
auto *fused_function =
FuseFunctions(*xtimes_two, *is_zero, "fused_map_and_filter_function",
fusion_utils::CombineSignature, fusion_utils::ComposeInput,
fusion_utils::CombineOutput, fusion_utils::MergeNodes,
graph.mutable_library());
EXPECT_EQ(fused_function->signature().name(),
"fused_map_and_filter_function");
EXPECT_EQ(fused_function->signature().input_arg_size(), 1);
EXPECT_EQ(fused_function->signature().output_arg_size(), 2);
EXPECT_EQ(fused_function->ret_size(), 2);
CheckUniqueNames(*fused_function);
ASSERT_TRUE(
function_utils::ContainsFunctionNodeWithOp("Equal", *fused_function));
const auto& equal_node = fused_function->node_def(
function_utils::FindFunctionNodeWithOp("Equal", *fused_function));
EXPECT_EQ(xtimes_two->signature().output_arg(0).name(),
fused_function->signature().output_arg(0).name());
EXPECT_EQ(fused_function->signature().output_arg(1).name(),
equal_node.name());
EXPECT_EQ(ParseNodeConnection(equal_node.input(0)),
fused_function->signature().output_arg(0).name());
auto output_value = fused_function->ret().at(
fused_function->signature().output_arg(1).name());
EXPECT_EQ(ParseNodeConnection(output_value), equal_node.name());
}
TEST(FusionUtilsTest, FuseSameFunctionWithExtraOutput) {
GraphDef graph;
auto *parent_function = graph.mutable_library()->add_function();
*parent_function = test::function::XTimesTwo();
auto *function = graph.mutable_library()->add_function();
*function = test::function::XTimesTwo();
auto *fused_function = FuseFunctions(
*parent_function, *function, "fused_maps", fusion_utils::CombineSignature,
fusion_utils::ComposeInput, fusion_utils::CombineOutput,
fusion_utils::MergeNodes, graph.mutable_library());
EXPECT_EQ(fused_function->signature().input_arg_size(), 1);
EXPECT_EQ(fused_function->signature().output_arg_size(), 2);
EXPECT_EQ(fused_function->ret_size(), 2);
CheckUniqueNames(*fused_function);
}
TEST(FusionUtilsTest, ZipFusion) {
GraphDef graph;
auto *function = graph.mutable_library()->add_function();
*function = test::function::XTimesTwo();
auto zip_signature = [](const OpDef& parent_function_signature,
const OpDef& function_signature,
OpDef *fused_function_signature) {
*fused_function_signature = parent_function_signature;
fused_function_signature->mutable_input_arg()->MergeFrom(
function_signature.input_arg());
fused_function_signature->mutable_output_arg()->MergeFrom(
function_signature.output_arg());
};
auto zip_input = [](const StringCollection& parent_inputs,
const StringCollection& function_inputs,
const StringCollection& parent_outputs, int arg_num) {
return function_inputs.at(arg_num);
};
auto *fused_function =
FuseFunctions(*function, *function, "zip_maps", zip_signature, zip_input,
fusion_utils::CombineOutput, fusion_utils::MergeNodes,
graph.mutable_library());
EXPECT_EQ(fused_function->signature().input_arg_size(), 2);
EXPECT_EQ(fused_function->signature().output_arg_size(), 2);
EXPECT_EQ(fused_function->ret_size(), 2);
CheckUniqueNames(*fused_function);
}
}
}
}
} |
1,429 | cpp | tensorflow/tensorflow | noop_elimination | tensorflow/core/grappler/optimizers/data/noop_elimination.cc | tensorflow/core/grappler/optimizers/data/noop_elimination_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_NOOP_ELIMINATION_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_NOOP_ELIMINATION_H_
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
namespace tensorflow {
namespace grappler {
class NoOpElimination : public TFDataOptimizerBase {
public:
NoOpElimination() = default;
~NoOpElimination() override = default;
string name() const override { return "noop_elimination"; };
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
Status OptimizeAndCollectStats(Cluster* cluster, const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) override;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/noop_elimination.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kIdentity[] = "Identity";
bool IsTakeAll(const NodeDef& take_node, const MutableGraphView& graph) {
if (take_node.op() != "TakeDataset") return false;
const auto& count_node = *graph.GetNode(take_node.input(1));
if (count_node.op() != "Const") return false;
const auto& tensor = count_node.attr().at("value").tensor();
if (tensor.int64_val_size()) return tensor.int64_val(0) < 0;
return false;
}
bool IsConstNodeWithValue(const NodeDef& node, int value) {
if (node.op() != "Const") return false;
const auto& tensor = node.attr().at("value").tensor();
if (tensor.int64_val_size()) return tensor.int64_val(0) == value;
return value == 0;
}
bool IsSkipNone(const NodeDef& skip_node, const MutableGraphView& graph) {
if (skip_node.op() != "SkipDataset") return false;
return IsConstNodeWithValue(*graph.GetNode(skip_node.input(1)), 0);
}
bool IsRepeatOne(const NodeDef& repeat_node, const MutableGraphView& graph) {
if (repeat_node.op() != "RepeatDataset") return false;
return IsConstNodeWithValue(*graph.GetNode(repeat_node.input(1)), 1);
}
bool IsShardOne(const NodeDef& shard_node, const MutableGraphView& graph) {
if (shard_node.op() != "ShardDataset") return false;
return IsConstNodeWithValue(*graph.GetNode(shard_node.input(1)), 1);
}
bool IsOutputIdentityOfInput(const FunctionDef& fdef, const string& output_arg,
const string& input_arg) {
if (!fdef.ret().contains(output_arg)) {
LOG(WARNING)
<< "Malformed FunctionDef: ret dict does not contain output arg key.";
return false;
}
const auto& ret_val = fdef.ret().at(output_arg);
auto input = function_utils::FunctionDefTensorDesc(ret_val);
while (function_utils::ContainsFunctionNodeWithName(input.node_name, fdef)) {
int idx = function_utils::FindFunctionNodeWithName(input.node_name, fdef);
const NodeDef& node = fdef.node_def(idx);
if (node.op() != kIdentity) {
return false;
}
input = function_utils::FunctionDefTensorDesc(node.input(0));
}
return input.node_name == input_arg;
}
bool IsMapIdentity(const NodeDef& map_node, const MutableGraphView& graph,
const FunctionLibraryDefinition& function_library) {
if (map_node.op() != "MapDataset" && map_node.op() != "ParallelMapDataset" &&
map_node.op() != "ParallelMapDatasetV2") {
return false;
}
if (map_node.attr().at("Targuments").list().type_size() != 0) return false;
const FunctionDef* fdef =
function_library.Find(map_node.attr().at("f").func().name());
if (function_utils::IsFunctionStateful(function_library, *fdef)) {
return false;
}
const auto& sig = fdef->signature();
if (sig.input_arg_size() != sig.output_arg_size()) return false;
for (int i = 0; i < sig.input_arg_size(); ++i) {
if (!IsOutputIdentityOfInput(*fdef, sig.output_arg(i).name(),
sig.input_arg(i).name())) {
return false;
}
}
return true;
}
bool IsNoOp(const NodeDef& node, const MutableGraphView& graph,
const FunctionLibraryDefinition& function_library) {
return IsTakeAll(node, graph) || IsSkipNone(node, graph) ||
IsRepeatOne(node, graph) || IsShardOne(node, graph) ||
IsMapIdentity(node, graph, function_library);
}
}
Status NoOpElimination::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
MutableGraphView graph(output);
absl::flat_hash_set<string> nodes_to_delete;
FunctionLibraryDefinition function_library(OpRegistry::Global(),
graph.graph()->library());
for (const NodeDef& node : item.graph.node()) {
if (!IsNoOp(node, graph, function_library)) continue;
NodeDef* const parent = graph_utils::GetInputNode(node, graph);
TF_RETURN_IF_ERROR(graph.UpdateFanouts(node.name(), parent->name()));
nodes_to_delete.insert(node.name());
stats->num_changes++;
}
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(NoOpElimination, "noop_elimination");
}
} | #include "tensorflow/core/grappler/optimizers/data/noop_elimination.h"
#include <tuple>
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
std::vector<std::pair<string, AttrValue>> GetCommonAttributes() {
AttrValue shapes_attr, types_attr;
SetAttrValue("output_shapes", &shapes_attr);
SetAttrValue("output_types", &types_attr);
std::vector<std::pair<string, AttrValue>> commonAttributes = {
{"output_shapes", shapes_attr}, {"output_types", types_attr}};
return commonAttributes;
}
NodeDef *MakeNode(StringPiece node_type, std::vector<int> params,
string input_node, MutableGraphView *graph) {
std::vector<NodeDef *> node_params;
for (int param : params) {
node_params.push_back(
graph_utils::AddScalarConstNode<int64_t>(param, graph));
}
std::vector<string> inputs = {input_node};
for (int i = 0; i < node_params.size(); i++) {
inputs.push_back(node_params[i]->name());
}
return graph_utils::AddNode("", node_type, inputs, GetCommonAttributes(),
graph);
}
NodeDef *MakeNonConstNode(StringPiece node_type,
std::vector<DataType> param_dtypes, string input_node,
MutableGraphView *graph) {
std::vector<NodeDef *> node_params;
for (DataType dtype : param_dtypes) {
node_params.push_back(graph_utils::AddScalarPlaceholder(dtype, graph));
}
std::vector<string> inputs = {input_node};
for (int i = 0; i < node_params.size(); i++) {
inputs.push_back(node_params[i]->name());
}
return graph_utils::AddNode("", node_type, inputs, GetCommonAttributes(),
graph);
}
NodeDef *MakeCacheNode(string input_node, MutableGraphView *graph) {
NodeDef *node_filename =
graph_utils::AddScalarConstNode<StringPiece>("", graph);
return graph_utils::AddNode("", "CacheDataset",
{std::move(input_node), node_filename->name()},
GetCommonAttributes(), graph);
}
NodeDef *MakeRangeNode(MutableGraphView *graph) {
auto *start_node = graph_utils::AddScalarConstNode<int64_t>(0, graph);
auto *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, graph);
auto *step_node = graph_utils::AddScalarConstNode<int64_t>(1, graph);
std::vector<string> range_inputs = {start_node->name(), stop_node->name(),
step_node->name()};
return graph_utils::AddNode("", "RangeDataset", range_inputs,
GetCommonAttributes(), graph);
}
struct NoOpLastEliminationTest
: ::testing::TestWithParam<std::tuple<string, std::vector<int>, bool>> {};
TEST_P(NoOpLastEliminationTest, EliminateLastNoOpNode) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
const string &node_type = std::get<0>(GetParam());
const std::vector<int> node_params = std::get<1>(GetParam());
const bool should_keep_node = std::get<2>(GetParam());
NodeDef *range_node = MakeRangeNode(&graph);
NodeDef *node = MakeNode(node_type, node_params, range_node->name(), &graph);
NoOpElimination optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(graph_utils::ContainsGraphNodeWithName(node->name(), output),
should_keep_node);
}
INSTANTIATE_TEST_CASE_P(
BasicRemovalTest, NoOpLastEliminationTest,
::testing::Values(
std::make_tuple("TakeDataset", std::vector<int>({-3}), false),
std::make_tuple("TakeDataset", std::vector<int>({-1}), false),
std::make_tuple("TakeDataset", std::vector<int>({0}), true),
std::make_tuple("TakeDataset", std::vector<int>({3}), true),
std::make_tuple("SkipDataset", std::vector<int>({-1}), true),
std::make_tuple("SkipDataset", std::vector<int>({0}), false),
std::make_tuple("SkipDataset", std::vector<int>({3}), true),
std::make_tuple("RepeatDataset", std::vector<int>({1}), false),
std::make_tuple("RepeatDataset", std::vector<int>({2}), true),
std::make_tuple("ShardDataset", std::vector<int>({1, 0}), false),
std::make_tuple("ShardDataset", std::vector<int>({2, 0}), true)));
struct NoOpMiddleEliminationTest
: ::testing::TestWithParam<std::tuple<string, std::vector<int>, bool>> {};
TEST_P(NoOpMiddleEliminationTest, EliminateMiddleNoOpNode) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
const string &node_type = std::get<0>(GetParam());
const std::vector<int> node_params = std::get<1>(GetParam());
const bool should_keep_node = std::get<2>(GetParam());
NodeDef *range_node = MakeRangeNode(&graph);
NodeDef *node = MakeNode(node_type, node_params, range_node->name(), &graph);
NodeDef *cache_node = MakeCacheNode(node->name(), &graph);
NoOpElimination optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(graph_utils::ContainsGraphNodeWithName(node->name(), output),
should_keep_node);
EXPECT_TRUE(
graph_utils::ContainsGraphNodeWithName(cache_node->name(), output));
NodeDef cache_node_out = output.node(
graph_utils::FindGraphNodeWithName(cache_node->name(), output));
EXPECT_EQ(cache_node_out.input_size(), 2);
auto last_node_input = (should_keep_node ? node : range_node)->name();
EXPECT_EQ(cache_node_out.input(0), last_node_input);
}
INSTANTIATE_TEST_CASE_P(
BasicRemovalTest, NoOpMiddleEliminationTest,
::testing::Values(
std::make_tuple("TakeDataset", std::vector<int>({-1}), false),
std::make_tuple("TakeDataset", std::vector<int>({-3}), false),
std::make_tuple("TakeDataset", std::vector<int>({0}), true),
std::make_tuple("TakeDataset", std::vector<int>({3}), true),
std::make_tuple("SkipDataset", std::vector<int>({-1}), true),
std::make_tuple("SkipDataset", std::vector<int>({0}), false),
std::make_tuple("SkipDataset", std::vector<int>({3}), true),
std::make_tuple("RepeatDataset", std::vector<int>({1}), false),
std::make_tuple("RepeatDataset", std::vector<int>({2}), true),
std::make_tuple("ShardDataset", std::vector<int>({1, 0}), false),
std::make_tuple("ShardDataset", std::vector<int>({2, 0}), true)));
using NodesTypes = std::tuple<std::pair<string, std::vector<int>>,
std::pair<string, std::vector<int>>>;
struct NoOpMultipleEliminationTest : ::testing::TestWithParam<NodesTypes> {};
TEST_P(NoOpMultipleEliminationTest, EliminateMultipleNoOpNode) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
static_assert(std::tuple_size<NodesTypes>::value == 2,
"Make sure to include everything in the test");
const std::vector<std::pair<string, std::vector<int>>> noop_nodes = {
std::get<0>(GetParam()), std::get<1>(GetParam())};
NodeDef *range_node = MakeRangeNode(&graph);
NodeDef *previous = range_node;
std::vector<string> nodes_to_remove;
nodes_to_remove.reserve(noop_nodes.size());
for (const auto &noop_node : noop_nodes) {
NodeDef *node =
MakeNode(noop_node.first, noop_node.second, previous->name(), &graph);
nodes_to_remove.push_back(node->name());
previous = node;
}
NodeDef *cache_node = MakeCacheNode(previous->name(), &graph);
NoOpElimination optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
for (const auto &noop_node_name : nodes_to_remove)
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(noop_node_name, output));
EXPECT_TRUE(
graph_utils::ContainsGraphNodeWithName(cache_node->name(), output));
NodeDef cache_node_out = output.node(
graph_utils::FindGraphNodeWithName(cache_node->name(), output));
EXPECT_EQ(cache_node_out.input_size(), 2);
EXPECT_EQ(cache_node_out.input(0), range_node->name());
}
const auto *const kTakeNode =
new std::pair<string, std::vector<int>>{"TakeDataset", {-1}};
const auto *const kSkipNode =
new std::pair<string, std::vector<int>>{"SkipDataset", {0}};
const auto *const kRepeatNode =
new std::pair<string, std::vector<int>>{"RepeatDataset", {1}};
const auto *const kShardNode =
new std::pair<string, std::vector<int>>{"ShardDataset", {1, 0}};
INSTANTIATE_TEST_CASE_P(
BasicRemovalTest, NoOpMultipleEliminationTest,
::testing::Combine(
::testing::Values(*kTakeNode, *kSkipNode, *kRepeatNode, *kShardNode),
::testing::Values(*kTakeNode, *kSkipNode, *kRepeatNode, *kShardNode)));
struct NoOpPlaceholdersTest
: ::testing::TestWithParam<
std::tuple<std::pair<string, std::vector<DataType>>,
std::pair<string, std::vector<DataType>>>> {};
TEST_P(NoOpPlaceholdersTest, NonConstNoOpNode) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
static_assert(std::tuple_size<NodesTypes>::value == 2,
"Make sure to include everything in the test");
const std::vector<std::pair<string, std::vector<DataType>>> noop_nodes = {
std::get<0>(GetParam()), std::get<1>(GetParam())};
NodeDef *range_node = MakeRangeNode(&graph);
std::vector<string> nodes_to_keep;
nodes_to_keep.reserve(noop_nodes.size());
NodeDef *previous = range_node;
for (const auto &noop_node : noop_nodes) {
NodeDef *node = MakeNonConstNode(noop_node.first, noop_node.second,
previous->name(), &graph);
nodes_to_keep.push_back(node->name());
previous = node;
}
NoOpElimination optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
for (const auto &noop_node_name : nodes_to_keep)
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName(noop_node_name, output));
}
const auto *const kNonConstTakeNode =
new std::pair<string, std::vector<DataType>>{"TakeDataset", {DT_INT32}};
const auto *const kNonConstSkipNode =
new std::pair<string, std::vector<DataType>>{"SkipDataset", {DT_INT32}};
const auto *const kNonConstRepeatNode =
new std::pair<string, std::vector<DataType>>{"RepeatDataset", {DT_INT32}};
const auto *const kNonConstShardNode =
new std::pair<string, std::vector<DataType>>{"ShardDataset",
{DT_INT32, DT_INT32}};
INSTANTIATE_TEST_CASE_P(
DoNotRemovePlaceholders, NoOpPlaceholdersTest,
::testing::Combine(::testing::Values(*kNonConstTakeNode, *kNonConstSkipNode,
*kNonConstRepeatNode,
*kNonConstShardNode),
::testing::Values(*kNonConstTakeNode, *kNonConstSkipNode,
*kNonConstRepeatNode,
*kNonConstShardNode)));
}
}
} |
1,430 | cpp | tensorflow/tensorflow | graph | tensorflow/core/graph/graph.cc | tensorflow/core/graph/graph_test.cc | #ifndef TENSORFLOW_CORE_GRAPH_GRAPH_H_
#define TENSORFLOW_CORE_GRAPH_GRAPH_H_
#include <functional>
#include <memory>
#include <string>
#include <vector>
#include "absl/types/optional.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/edgeset.h"
#include "tensorflow/core/lib/core/arena.h"
#include "tensorflow/core/lib/core/refcount.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/gtl/iterator_range.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
class Edge;
class EdgeSetTest;
class Graph;
class GraphDebugInfo;
class GraphDef;
class GraphTest;
class Node;
struct OutputTensor;
class VersionDef;
class WhileContext;
class NeighborIter;
class NodeIter;
enum class ConstructionContext {
kNotTracked,
kDirectSession,
kEagerRuntime,
};
class Node {
public:
std::string DebugString() const;
int id() const { return id_; }
int cost_id() const { return cost_id_; }
const std::string& name() const;
void set_name(std::string name);
const std::string& type_string() const;
const NodeDef& def() const;
const OpDef& op_def() const;
NodeDef* mutable_def();
int32 num_inputs() const;
DataType input_type(int32_t i) const;
const DataTypeVector& input_types() const;
int32 num_outputs() const;
DataType output_type(int32_t o) const;
const DataTypeVector& output_types() const;
const std::string& requested_device() const;
void set_requested_device(const std::string& device);
const std::string& assigned_device_name() const;
void set_assigned_device_name(const std::string& device_name);
bool has_assigned_device_name() const {
return assigned_device_name_index_ > 0;
}
int assigned_device_name_index() const { return assigned_device_name_index_; }
void set_assigned_device_name_index(int index);
void set_original_node_names(const std::vector<string>& names);
void set_original_func_names(const std::vector<string>& names);
AttrSlice attrs() const;
const protobuf::RepeatedPtrField<string>& requested_inputs() const;
gtl::iterator_range<NeighborIter> in_nodes() const;
gtl::iterator_range<NeighborIter> out_nodes() const;
const EdgeSet& in_edges() const { return in_edges_; }
const EdgeSet& out_edges() const { return out_edges_; }
bool IsSource() const { return id() == 0; }
bool IsSink() const { return id() == 1; }
bool IsOp() const { return id() > 1; }
bool IsSwitch() const { return class_ == NC_SWITCH; }
bool IsMerge() const { return class_ == NC_MERGE; }
bool IsEnter() const { return class_ == NC_ENTER; }
bool IsExit() const { return class_ == NC_EXIT; }
bool IsNextIteration() const { return class_ == NC_NEXT_ITERATION; }
bool IsLoopCond() const { return class_ == NC_LOOP_COND; }
bool IsControlTrigger() const { return class_ == NC_CONTROL_TRIGGER; }
bool IsSend() const { return class_ == NC_SEND || class_ == NC_HOST_SEND; }
bool IsRecv() const { return class_ == NC_RECV || class_ == NC_HOST_RECV; }
bool IsConstant() const { return class_ == NC_CONSTANT; }
bool IsVariable() const { return class_ == NC_VARIABLE; }
bool IsIdentity() const { return class_ == NC_IDENTITY; }
bool IsGetSessionHandle() const { return class_ == NC_GET_SESSION_HANDLE; }
bool IsGetSessionTensor() const { return class_ == NC_GET_SESSION_TENSOR; }
bool IsDeleteSessionTensor() const {
return class_ == NC_DELETE_SESSION_TENSOR;
}
bool IsControlFlow() const {
return (class_ != NC_OTHER) &&
(IsSwitch() || IsMerge() || IsEnter() || IsExit() ||
IsNextIteration());
}
bool IsHostSend() const { return class_ == NC_HOST_SEND; }
bool IsHostRecv() const { return class_ == NC_HOST_RECV; }
bool IsScopedAllocator() const { return class_ == NC_SCOPED_ALLOCATOR; }
bool IsCollective() const { return class_ == NC_COLLECTIVE; }
bool IsMetadata() const { return class_ == NC_METADATA; }
bool IsFakeParam() const { return class_ == NC_FAKE_PARAM; }
bool IsPartitionedCall() const { return class_ == NC_PARTITIONED_CALL; }
bool IsFunctionCall() const {
return class_ == NC_PARTITIONED_CALL || class_ == NC_FUNCTION_OP ||
class_ == NC_SYMBOLIC_GRADIENT;
}
bool IsIfNode() const { return class_ == NC_IF; }
bool IsWhileNode() const { return class_ == NC_WHILE; }
bool IsCaseNode() const { return class_ == NC_CASE; }
bool IsArg() const { return class_ == NC_ARG; }
bool IsRetval() const { return class_ == NC_RETVAL; }
bool IsDistributedCommunication() const {
return op_def().is_distributed_communication();
}
template <typename T>
void AddAttr(const std::string& name, const T& val) {
SetAttrValue(val, AddAttrHelper(name));
UpdateProperties();
}
void AddAttr(const std::string& name, std::vector<string>&& val) {
MoveAttrValue(std::move(val), AddAttrHelper(name));
UpdateProperties();
}
void ClearAttr(const std::string& name);
Status input_edge(int idx, const Edge** e) const;
Status input_edges(std::vector<const Edge*>* edges) const;
Status input_node(int idx, const Node** n) const;
Status input_node(int idx, Node** n) const;
Status input_tensor(int idx, OutputTensor* t) const;
WhileContext* while_ctx() const { return while_ctx_; }
void set_while_ctx(WhileContext* while_ctx) {
DCHECK(IsExit());
DCHECK(while_ctx_ == nullptr);
while_ctx_ = while_ctx;
}
std::shared_ptr<NodeProperties> properties() const { return props_; }
void SetStackTrace(const std::shared_ptr<AbstractStackTrace>& stack_trace) {
stack_trace_ = stack_trace;
}
const std::shared_ptr<AbstractStackTrace>& GetStackTrace() const {
return stack_trace_;
}
void UpdateProperties();
void ClearTypeInfo();
Status ShrinkTypeInfo(const absl::flat_hash_map<int, int>& index_mapping,
const string& type_attr_name, bool update_full_type);
void RunForwardTypeInference();
private:
friend class Graph;
Node();
std::shared_ptr<AbstractStackTrace> stack_trace_;
void Clear();
void MaybeCopyOnWrite();
AttrValue* AddAttrHelper(const std::string& name);
enum NodeClass {
NC_UNINITIALIZED,
NC_SWITCH,
NC_MERGE,
NC_ENTER,
NC_EXIT,
NC_NEXT_ITERATION,
NC_LOOP_COND,
NC_CONTROL_TRIGGER,
NC_SEND,
NC_HOST_SEND,
NC_RECV,
NC_HOST_RECV,
NC_CONSTANT,
NC_VARIABLE,
NC_IDENTITY,
NC_GET_SESSION_HANDLE,
NC_GET_SESSION_TENSOR,
NC_DELETE_SESSION_TENSOR,
NC_METADATA,
NC_SCOPED_ALLOCATOR,
NC_COLLECTIVE,
NC_FAKE_PARAM,
NC_PARTITIONED_CALL,
NC_FUNCTION_OP,
NC_SYMBOLIC_GRADIENT,
NC_IF,
NC_WHILE,
NC_CASE,
NC_ARG,
NC_RETVAL,
NC_OTHER
};
void Initialize(int id, int cost_id, std::shared_ptr<NodeProperties> props,
NodeClass node_class);
static NodeClass GetNodeClassForOp(const std::string& ts);
int id_;
int cost_id_;
NodeClass class_;
EdgeSet in_edges_;
EdgeSet out_edges_;
std::shared_ptr<NodeProperties> props_;
int assigned_device_name_index_;
Graph* graph_;
WhileContext* while_ctx_;
Node(const Node&) = delete;
void operator=(const Node&) = delete;
};
struct NodeDebugInfo {
const std::string name;
std::vector<string> original_node_names;
std::vector<string> original_func_names;
NodeDebugInfo(const Node& n);
NodeDebugInfo(const NodeDef& ndef);
NodeDebugInfo(StringPiece node_name, bool has_experimental_debug_info,
const NodeDef_ExperimentalDebugInfo& experimental_debug_info);
};
struct InputTensor {
Node* node;
int index;
InputTensor(Node* n, int i) : node(n), index(i) {}
InputTensor() : node(nullptr), index(0) {}
bool operator==(const InputTensor& other) const;
struct Hash {
uint64 operator()(InputTensor const& s) const;
};
};
struct OutputTensor {
Node* node;
int index;
OutputTensor(Node* n, int i) : node(n), index(i) {}
OutputTensor() : node(nullptr), index(0) {}
bool operator==(const OutputTensor& other) const;
struct Hash {
uint64 operator()(OutputTensor const& s) const;
};
};
class Edge {
public:
Node* src() const { return src_; }
Node* dst() const { return dst_; }
int id() const { return id_; }
int src_output() const { return src_output_; }
int dst_input() const { return dst_input_; }
bool IsControlEdge() const;
std::string DebugString() const;
private:
Edge() {}
friend class EdgeSetTest;
friend class GraphTest;
friend class Graph;
Node* src_;
Node* dst_;
int id_;
int src_output_;
int dst_input_;
};
class GraphEdgesIterable {
private:
const std::vector<Edge*>& edges_;
public:
explicit GraphEdgesIterable(const std::vector<Edge*>& edges)
: edges_(edges) {}
typedef Edge* value_type;
class const_iterator {
private:
std::vector<value_type>::const_iterator iter_;
std::vector<value_type>::const_iterator end_;
void apply_filter() {
while (iter_ != end_ && *iter_ == nullptr) {
++iter_;
}
}
public:
const_iterator(std::vector<value_type>::const_iterator iter,
std::vector<value_type>::const_iterator end)
: iter_(iter), end_(end) {
apply_filter();
}
bool operator==(const const_iterator& other) const {
return iter_ == other.iter_;
}
bool operator!=(const const_iterator& other) const {
return iter_ != other.iter_;
}
const_iterator& operator++() {
++iter_;
apply_filter();
return *this;
}
value_type operator*() { return *iter_; }
};
const_iterator begin() {
return const_iterator(edges_.begin(), edges_.end());
}
const_iterator end() { return const_iterator(edges_.end(), edges_.end()); }
};
class Graph {
public:
explicit Graph(const OpRegistryInterface* ops);
explicit Graph(const FunctionLibraryDefinition& flib_def);
~Graph();
std::unique_ptr<Graph> Clone();
static constexpr int kControlSlot = -1;
const VersionDef& versions() const;
void set_versions(const VersionDef& versions);
Node* AddNode(NodeDef node_def, Status* status);
absl::StatusOr<Node*> AddNode(NodeDef node_def);
Node* CopyNode(const Node* node);
void RemoveNode(Node* node);
void Copy(const Graph& src);
void Clear();
const Edge* AddEdge(Node* source, int x, Node* dest, int y);
const Edge* AddControlEdge(Node* source, Node* dest,
bool allow_duplicates = false);
void RemoveEdge(const Edge* edge);
void RemoveControlEdge(const Edge* e);
Status UpdateEdge(Node* new_src, int new_src_index, Node* dst, int dst_index);
static void AddInput(NodeDef* dst, StringPiece src_name, int src_slot);
Status AddWhileInputHack(Node* new_src, int new_src_index, Node* dst);
Status AddFunctionLibrary(const FunctionDefLibrary& fdef_lib);
Status AddFunctionLibrary(FunctionDefLibrary&& fdef_lib);
Status AddFunctionLibrary(const FunctionDefLibrary& fdef_lib,
const FunctionDefLibraryStackTraces& stack_traces);
Status AddFunctionLibrary(FunctionDefLibrary&& fdef_lib,
const FunctionDefLibraryStackTraces& stack_traces);
Status AddFunctionDef(const FunctionDef& fdef,
const StackTracesMap& stack_traces);
Status AddGradientDef(const GradientDef& gdef);
int num_nodes() const { return num_nodes_; }
int num_op_nodes() const {
DCHECK_GE(num_nodes_, 2);
return num_nodes_ - 2;
}
int num_edges() const { return num_edges_; }
void ToGraphDefSubRange(GraphDef* graph_def, int from_node_id,
bool include_flib_def = true,
bool include_debug_info = false) const;
void ToGraphDef(GraphDef* graph_def, bool include_flib_def = true,
bool include_debug_info = false) const;
GraphDef ToGraphDefDebug() const;
std::string NewName(StringPiece prefix);
gtl::iterator_range<NodeIter> nodes() const;
gtl::iterator_range<NodeIter> op_nodes() const;
int num_node_ids() const { return nodes_.size(); }
Node* FindNodeId(int id) const { return nodes_[id]; }
int num_edge_ids() const { return edges_.size(); }
const Edge* FindEdgeId(int id) const { return edges_[id]; }
GraphEdgesIterable edges() const { return GraphEdgesIterable(edges_); }
enum { kSourceId = 0, kSinkId = 1 };
Node* source_node() const { return FindNodeId(kSourceId); }
Node* sink_node() const { return FindNodeId(kSinkId); }
const OpRegistryInterface* op_registry() const { return &ops_; }
const FunctionLibraryDefinition& flib_def() const { return ops_; }
FunctionLibraryDefinition* mutable_flib_def() { return &ops_; }
void CheckDeviceNameIndex(int index) {
DCHECK_GE(index, 0);
DCHECK_LT(index, static_cast<int>(device_names_.size()));
}
int InternDeviceName(const std::string& device_name);
const std::string& get_assigned_device_name(const Node& node) const {
return device_names_[node.assigned_device_name_index()];
}
void set_assigned_device_name_index(Node* node, int device_name_index) {
CheckDeviceNameIndex(device_name_index);
node->assigned_device_name_index_ = device_name_index;
}
void set_assigned_device_name(Node* node, const std::string& device_name) {
node->assigned_device_name_index_ = InternDeviceName(device_name);
}
Status IsValidNode(const Node* node) const;
Status IsValidOutputTensor(const Node* node, int idx) const;
Status IsValidInputTensor(const Node* node, int idx) const;
Status AddWhileContext(StringPiece frame_name, std::vector<Node*> enter_nodes,
std::vector<Node*> exit_nodes,
OutputTensor cond_output,
std::vector<OutputTensor> body_inputs,
std::vector<OutputTensor> body_outputs,
WhileContext** result);
std::unordered_map<string, Node*> BuildNodeNameIndex() const;
absl::optional<std::vector<bool>>& GetConstArgIndicesCache() const {
return const_arg_indices_cache_;
}
void SetConstructionContext(ConstructionContext construction_context) {
construction_context_ = construction_context;
}
ConstructionContext GetConstructionContextInternal() const {
return construction_context_;
}
void SetNodeType(StringPiece name, const FullTypeDef& type); | #include "tensorflow/core/graph/graph.h"
#include <memory>
#include <set>
#include <unordered_map>
#include <vector>
#include <gmock/gmock.h>
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/benchmark_testlib.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
using ::testing::UnorderedElementsAre;
REGISTER_OP("OneInput").Input("x: float");
REGISTER_OP("OneOutput").Output("y: float");
REGISTER_OP("OneInputTwoOutputs")
.Input("x: float")
.Output("y: float")
.Output("z: float");
REGISTER_OP("TwoInputsOneOutput")
.Input("x: float")
.Input("y: float")
.Output("z: float");
class GraphTest : public ::testing::Test {
protected:
GraphTest() : graph_(OpRegistry::Global()) {}
~GraphTest() override {}
static void VerifyNodes(Node* node, const std::vector<Node*>& expected_in,
const std::vector<Node*>& expected_out) {
std::vector<Node*> in;
for (const Edge* e : node->in_edges()) {
in.push_back(e->src());
}
EXPECT_EQ(Stringify(expected_in), Stringify(in));
std::vector<Node*> out;
for (const Edge* e : node->out_edges()) {
out.push_back(e->dst());
}
EXPECT_EQ(Stringify(expected_out), Stringify(out));
}
std::unique_ptr<Edge> BuildEdge(int id = 0, Node* src = nullptr,
Node* dst = nullptr, int x = 0, int y = 0) {
Edge* e = new Edge;
e->id_ = id;
e->src_ = src;
e->dst_ = dst;
e->src_output_ = x;
e->dst_input_ = y;
return absl::WrapUnique(e);
}
void VerifyGraphStats() {
int nodes = 0;
for (const Node* n : graph_.nodes()) {
VLOG(1) << n->id();
++nodes;
}
EXPECT_EQ(nodes, graph_.num_nodes());
int edges = 0;
for (const Edge* e : graph_.edges()) {
VLOG(1) << e->id();
++edges;
}
EXPECT_EQ(edges, graph_.num_edges());
}
Node* AddNodeWithName(const string& name) {
Node* node;
TF_CHECK_OK(NodeBuilder(name, "NoOp").Finalize(&graph_, &node));
return node;
}
Node* FromNodeDef(const string& name, const string& node_type,
int num_inputs) {
auto builder = NodeDefBuilder(name, node_type);
for (int i = 0; i < num_inputs; ++i) {
builder = builder.Input(strings::StrCat("node_", i), i, DT_FLOAT);
}
NodeDef node_def;
TF_CHECK_OK(builder.Finalize(&node_def));
Status s;
Node* node = graph_.AddNode(node_def, &s);
TF_CHECK_OK(s);
return node;
}
void FromGraphDef(const string& gdef_ascii) {
GraphDef gdef;
CHECK(protobuf::TextFormat::ParseFromString(gdef_ascii, &gdef));
GraphConstructorOptions opts;
TF_CHECK_OK(ConvertGraphDefToGraph(opts, gdef, &graph_));
}
Node* FindNode(const string& name) {
for (Node* node : graph_.nodes()) {
if (node->name() == name) return node;
}
LOG(FATAL) << name;
}
bool ControlEdgeExistsInGraphOrNodeDef(const Node* src, const Node* dst) {
for (const Edge* e : dst->in_edges()) {
if (e->IsControlEdge() && e->src() == src &&
e->src_output() == Graph::kControlSlot &&
e->dst_input() == Graph::kControlSlot) {
return true;
}
}
std::string control_edge_name = strings::StrCat("^", src->name());
for (int i = 0; i < dst->def().input_size(); ++i) {
if (dst->def().input(i) == control_edge_name) {
return true;
}
}
return false;
}
Graph graph_;
private:
static std::vector<string> Stringify(const std::vector<Node*>& nodes) {
std::vector<string> result;
result.reserve(nodes.size());
for (Node* n : nodes) {
result.push_back(n->DebugString());
}
std::sort(result.begin(), result.end());
return result;
}
};
namespace {
TEST_F(GraphTest, Constructor) {
Node* source = graph_.source_node();
EXPECT_NE(source, nullptr);
Node* sink = graph_.sink_node();
EXPECT_NE(sink, nullptr);
VerifyNodes(source, {}, {sink});
VerifyNodes(sink, {source}, {});
EXPECT_EQ(2, graph_.num_node_ids());
VerifyGraphStats();
}
TEST_F(GraphTest, RemoveThenAdd) {
AddNodeWithName("A");
Node* b = AddNodeWithName("B");
const int b_id = b->id();
AddNodeWithName("C");
EXPECT_EQ(5, graph_.num_node_ids());
graph_.RemoveNode(b);
EXPECT_EQ(5, graph_.num_node_ids());
Node* d = AddNodeWithName("D");
EXPECT_NE(b_id, d->id());
EXPECT_EQ(6, graph_.num_node_ids());
VerifyGraphStats();
}
TEST_F(GraphTest, InNodesAndOutNodes) {
Node* a = FromNodeDef("A", "OneOutput", 0);
Node* b = AddNodeWithName("B");
Node* c = FromNodeDef("C", "OneInput", 1);
graph_.RemoveNode(b);
Node* d = AddNodeWithName("D");
const Edge* source_to_a = graph_.AddControlEdge(graph_.source_node(), a);
graph_.AddControlEdge(a, graph_.sink_node());
graph_.AddEdge(a, 0, c, 0);
graph_.AddControlEdge(c, graph_.sink_node());
EXPECT_EQ("A", a->name());
VerifyNodes(a, {graph_.source_node()}, {c, graph_.sink_node()});
EXPECT_EQ("C", c->name());
VerifyNodes(c, {a}, {graph_.sink_node()});
EXPECT_EQ("D", d->name());
VerifyNodes(d, {}, {});
VerifyNodes(graph_.source_node(), {}, {a, graph_.sink_node()});
VerifyNodes(graph_.sink_node(), {a, c, graph_.source_node()}, {});
graph_.RemoveEdge(source_to_a);
VerifyNodes(a, {}, {c, graph_.sink_node()});
VerifyNodes(graph_.source_node(), {}, {graph_.sink_node()});
graph_.RemoveNode(c);
VerifyNodes(a, {}, {graph_.sink_node()});
VerifyNodes(graph_.sink_node(), {a, graph_.source_node()}, {});
EXPECT_EQ(6, graph_.num_node_ids());
EXPECT_EQ(5, graph_.num_edge_ids());
VerifyGraphStats();
}
TEST_F(GraphTest, NodeByIndex) {
Node* a = FromNodeDef("A", "OneOutput", 0);
Node* c = FromNodeDef("C", "OneInput", 1);
graph_.AddEdge(a, 0, c, 0);
const Node* a_copy;
TF_ASSERT_OK(c->input_node(0, &a_copy));
EXPECT_EQ(a, a_copy);
const Edge* e;
TF_ASSERT_OK(c->input_edge(0, &e));
EXPECT_EQ(0, e->dst_input());
EXPECT_EQ(a, e->src());
EXPECT_EQ(c, e->dst());
EXPECT_EQ(0, e->src_output());
Node* t = FromNodeDef("T", "TwoInputsOneOutput", 2);
graph_.AddEdge(a, 0, t, 0);
graph_.AddEdge(t, 0, t, 1);
const Node* t_0;
const Node* t_1;
TF_ASSERT_OK(t->input_node(0, &t_0));
EXPECT_EQ(a, t_0);
TF_ASSERT_OK(t->input_node(1, &t_1));
EXPECT_EQ(t, t_1);
TF_ASSERT_OK(t->input_edge(1, &e));
EXPECT_EQ(1, e->dst_input());
EXPECT_EQ(t, e->src());
std::vector<const Edge*> t_input_edges;
TF_ASSERT_OK(t->input_edges(&t_input_edges));
ASSERT_EQ(2, t_input_edges.size());
EXPECT_EQ(a, t_input_edges[0]->src());
EXPECT_EQ(e, t_input_edges[1]);
EXPECT_FALSE(c->input_node(1, &a_copy).ok());
EXPECT_FALSE(c->input_node(-1, &a_copy).ok());
graph_.RemoveNode(a);
Status s = c->input_node(0, &a_copy);
EXPECT_FALSE(s.ok());
Node* a_new = FromNodeDef("A_new", "OneOutput", 0);
Node* b_new = FromNodeDef("B_new", "OneOutput", 0);
graph_.AddEdge(a_new, 0, c, 0);
const Edge* a_new_c_edge;
TF_ASSERT_OK(c->input_edge(0, &a_new_c_edge));
graph_.AddEdge(b_new, 0, c, 0);
const Edge* b_new_c_edge;
TF_ASSERT_OK(c->input_edge(0, &b_new_c_edge));
graph_.RemoveEdge(a_new_c_edge);
TF_ASSERT_OK(c->input_edge(0, &b_new_c_edge));
std::vector<const Edge*> c_input_edges;
TF_ASSERT_OK(c->input_edges(&c_input_edges));
ASSERT_EQ(1, c_input_edges.size());
EXPECT_EQ(b_new_c_edge, c_input_edges[0]);
}
TEST_F(GraphTest, NodeIteration) {
Node* a = FromNodeDef("A", "OneOutput", 0);
Node* b = AddNodeWithName("B");
Node* c = FromNodeDef("C", "OneInput", 1);
graph_.RemoveNode(b);
Node* d = AddNodeWithName("D");
const Edge* source_to_a = graph_.AddControlEdge(graph_.source_node(), a);
graph_.AddControlEdge(a, graph_.sink_node());
graph_.AddEdge(a, 0, c, 0);
graph_.AddControlEdge(c, graph_.sink_node());
graph_.RemoveEdge(source_to_a);
graph_.RemoveNode(c);
std::set<string> expected;
expected.insert(graph_.source_node()->DebugString());
expected.insert(a->DebugString());
expected.insert(d->DebugString());
expected.insert(graph_.sink_node()->DebugString());
std::set<string> actual;
for (int id = 0; id < graph_.num_node_ids(); ++id) {
Node* node = graph_.FindNodeId(id);
if (node != nullptr) {
actual.insert(node->DebugString());
}
}
EXPECT_EQ(expected, actual);
actual.clear();
for (Node* node : graph_.nodes()) {
actual.insert(node->DebugString());
}
EXPECT_EQ(expected, actual);
VerifyGraphStats();
}
static void CheckType(Node* node, bool b) {
EXPECT_TRUE(b) << node->DebugString();
int count = 0;
if (node->IsSource()) count++;
if (node->IsSink()) count++;
if (node->IsOp()) count++;
EXPECT_EQ(1, count) << node->DebugString();
}
TEST_F(GraphTest, Type) {
Node* op = AddNodeWithName("A");
CheckType(graph_.source_node(), graph_.source_node()->IsSource());
CheckType(graph_.sink_node(), graph_.sink_node()->IsSink());
CheckType(op, op->IsOp());
VerifyGraphStats();
}
TEST_F(GraphTest, AddAttr) {
Node* n1 = AddNodeWithName("A");
n1->AddAttr("_a", "new_attr");
string attr;
EXPECT_EQ(absl::OkStatus(), GetNodeAttr(n1->attrs(), "_a", &attr));
EXPECT_EQ("new_attr", attr);
Node* n2 = graph_.CopyNode(n1);
n1->AddAttr("_b", "new_attr_2");
EXPECT_EQ(absl::OkStatus(), GetNodeAttr(n1->attrs(), "_a", &attr));
EXPECT_EQ("new_attr", attr);
EXPECT_EQ(absl::OkStatus(), GetNodeAttr(n1->attrs(), "_b", &attr));
EXPECT_EQ("new_attr_2", attr);
EXPECT_EQ(absl::OkStatus(), GetNodeAttr(n2->attrs(), "_a", &attr));
EXPECT_EQ("new_attr", attr);
EXPECT_NE(absl::OkStatus(), GetNodeAttr(n2->attrs(), "_b", &attr));
}
static string EdgeIter(const Graph& g) {
std::vector<std::pair<int, int> > edges;
for (const Edge* e : g.edges()) {
edges.push_back(std::make_pair(e->src()->id(), e->dst()->id()));
}
std::sort(edges.begin(), edges.end());
string result;
for (auto& p : edges) {
strings::StrAppend(&result, p.first, "->", p.second, ";");
}
return result;
}
TEST_F(GraphTest, EdgeIteration) {
EXPECT_EQ("0->1;", EdgeIter(graph_));
Node* a = FromNodeDef("A", "OneInputTwoOutputs", 1);
Node* b = FromNodeDef("B", "OneInput", 1);
EXPECT_EQ("0->1;", EdgeIter(graph_));
graph_.AddEdge(a, 0, b, 0);
EXPECT_EQ("0->1;2->3;", EdgeIter(graph_));
graph_.AddControlEdge(graph_.source_node(), a);
graph_.AddControlEdge(b, graph_.sink_node());
EXPECT_EQ("0->1;0->2;2->3;3->1;", EdgeIter(graph_));
graph_.AddEdge(a, 1, a, 0);
EXPECT_EQ("0->1;0->2;2->2;2->3;3->1;", EdgeIter(graph_));
VerifyGraphStats();
}
TEST_F(GraphTest, NewName) {
string a1 = graph_.NewName("A");
string a2 = graph_.NewName("A");
string b1 = graph_.NewName("B");
EXPECT_NE(a1, a2);
EXPECT_NE(a1, b1);
EXPECT_NE(a2, b1);
EXPECT_TRUE(absl::StartsWith(a1, "A")) << a1;
}
TEST_F(GraphTest, IsValidNode) {
Node* g1_node1;
TF_CHECK_OK(NodeBuilder("g1_node1", "NoOp").Finalize(&graph_, &g1_node1));
Graph graph2(OpRegistry::Global());
Node* g2_node1;
Node* g2_node2;
TF_CHECK_OK(NodeBuilder("g2_node1", "NoOp").Finalize(&graph2, &g2_node1));
TF_CHECK_OK(NodeBuilder("g2_node2", "NoOp").Finalize(&graph2, &g2_node2));
Status s = graph_.IsValidNode(nullptr);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
EXPECT_EQ(string("Node is null"), s.message());
s = graph_.IsValidNode(g2_node2);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
EXPECT_EQ(string("node id 3 is >= than number of nodes in graph 3"),
s.message());
s = graph_.IsValidNode(g2_node1);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
EXPECT_EQ(string("Node with id 2 is different from the passed in node. "
"Does it belong to a different graph?"),
s.message());
}
TEST_F(GraphTest, AddControlEdge) {
FromGraphDef(
"node { name: 'A' op: 'OneOutput' }"
"node { name: 'B' op: 'OneInputTwoOutputs' input: [ 'A:0' ] }"
"node { name: 'C' op: 'NoOp' } ");
Node* a = FindNode("A");
Node* b = FindNode("B");
Node* c = FindNode("C");
const Edge* edge = graph_.AddControlEdge(c, a);
ASSERT_TRUE(edge != nullptr);
EXPECT_EQ(edge->src(), c);
EXPECT_EQ(edge->src_output(), Graph::kControlSlot);
EXPECT_EQ(edge->dst(), a);
EXPECT_EQ(edge->dst_input(), Graph::kControlSlot);
ASSERT_EQ(a->def().input_size(), 1);
EXPECT_EQ(a->def().input(0), "^C");
edge = graph_.AddControlEdge(a, b);
EXPECT_TRUE(edge != nullptr);
ASSERT_EQ(b->def().input_size(), 2);
EXPECT_EQ(b->def().input(0), "A:0");
EXPECT_EQ(b->def().input(1), "^A");
edge = graph_.AddControlEdge(a, b);
EXPECT_TRUE(edge == nullptr);
EXPECT_EQ(b->def().input_size(), 2);
edge = graph_.AddControlEdge(a, b, true);
EXPECT_TRUE(edge != nullptr);
ASSERT_EQ(b->def().input_size(), 2);
EXPECT_EQ(b->def().input(0), "A:0");
EXPECT_EQ(b->def().input(1), "^A");
edge = graph_.AddControlEdge(graph_.source_node(), b);
EXPECT_TRUE(edge != nullptr);
EXPECT_EQ(b->def().input_size(), 2);
edge = graph_.AddControlEdge(graph_.source_node(), b);
EXPECT_TRUE(edge == nullptr);
EXPECT_EQ(b->def().input_size(), 2);
}
TEST_F(GraphTest, RemoveControlEdge) {
FromGraphDef(
"node { name: 'A' op: 'OneOutput' }"
"node { name: 'B' op: 'OneInputTwoOutputs' input: [ 'A:0' ] }"
"node { name: 'C' op: 'NoOp' } ");
Node* a = FindNode("A");
Node* b = FindNode("B");
Node* c = FindNode("C");
const Edge* edge_1 = graph_.AddControlEdge(c, a);
const Edge* edge_2 = graph_.AddControlEdge(a, b);
ASSERT_TRUE(edge_1 != nullptr);
ASSERT_TRUE(edge_2 != nullptr);
ASSERT_TRUE(ControlEdgeExistsInGraphOrNodeDef(c, a));
ASSERT_TRUE(ControlEdgeExistsInGraphOrNodeDef(a, b));
graph_.RemoveControlEdge(edge_1);
ASSERT_TRUE(!ControlEdgeExistsInGraphOrNodeDef(c, a));
ASSERT_TRUE(ControlEdgeExistsInGraphOrNodeDef(a, b));
graph_.RemoveControlEdge(edge_2);
ASSERT_TRUE(!ControlEdgeExistsInGraphOrNodeDef(c, a));
ASSERT_TRUE(!ControlEdgeExistsInGraphOrNodeDef(a, b));
const Edge* edge_3 = graph_.AddControlEdge(c, a);
const Edge* edge_4 = graph_.AddControlEdge(c, a);
ASSERT_TRUE(edge_3 != nullptr);
ASSERT_TRUE(edge_4 == nullptr);
graph_.RemoveControlEdge(edge_3);
ASSERT_TRUE(!ControlEdgeExistsInGraphOrNodeDef(c, a));
}
TEST_F(GraphTest, UpdateEdge) {
Node* a = FromNodeDef("A", "OneOutput", 0);
Node* b = FromNodeDef("B", "OneInputTwoOutputs", 1);
Node* c = FromNodeDef("C", "OneInputTwoOutputs", 1);
Node* d = FromNodeDef("D", "OneInput", 1);
graph_.AddControlEdge(graph_.source_node(), a);
graph_.AddControlEdge(a, graph_.sink_node());
graph_.AddEdge(a, 0, c, 0);
graph_.AddControlEdge(c, graph_.sink_node());
graph_.AddEdge(c, 0, b, 0);
graph_.AddEdge(c, 1, d, 0);
EXPECT_EQ("0->1;0->2;2->1;2->4;4->1;4->3;4->5;", EdgeIter(graph_));
TF_EXPECT_OK(graph_.UpdateEdge(a, 0, b, 0));
EXPECT_EQ("0->1;0->2;2->1;2->3;2->4;4->1;4->5;", EdgeIter(graph_));
TF_EXPECT_OK(graph_.UpdateEdge(a, 0, d, 0));
EXPECT_EQ("0->1;0->2;2->1;2->3;2->4;2->5;4->1;", EdgeIter(graph_));
Status s = graph_.UpdateEdge(a, 1, d, 0);
EXPECT_FALSE(s.ok());
EXPECT_EQ(
s.message(),
"Node 'A' (type: 'OneOutput', num of outputs: 1) does not have output 1");
s = graph_.UpdateEdge(c, 0, a, 0);
EXPECT_FALSE(s.ok());
EXPECT_EQ(
s.message(),
"Node 'A' (type: 'OneOutput', num of inputs: 0) does not have input 0");
}
TEST_F(GraphTest, InputEdges) {
Node* a = FromNodeDef("A", "OneOutput", 0);
Node* b = FromNodeDef("B", "TwoInputsOneOutput", 2);
graph_.AddEdge(a, 0, b, 0);
std::vector<const Edge*> edges;
EXPECT_EQ(error::INVALID_ARGUMENT, b->input_edges(&edges).code());
graph_.AddEdge(a, 0, b, 1);
TF_EXPECT_OK(b->input_edges(&edges));
}
TEST_F(GraphTest, EdgeDebugString) {
Node* a = FromNodeDef("A", "OneOutput", 0);
Node* b = FromNodeDef("B", "OneInput", 1);
auto e = graph_.AddEdge(a, 0, b, 0);
auto s = e->DebugString();
EXPECT_EQ(s, "[id=1 A:0 -> B:0]");
auto e1 = BuildEdge();
auto s1 = e1->DebugString();
EXPECT_EQ(s1, "[id=0 <NULL>:0 -> <NULL>:0]");
auto e2 = BuildEdge(2, 0, b, 1, 1);
auto s2 = e2->DebugString();
EXPECT_EQ(s2, "[id=2 <NULL>:1 -> B:1]");
auto e3 = BuildEdge(3, a, 0, 2, 1);
auto s3 = e3->DebugString();
EXPECT_EQ(s3, "[id=3 A:2 -> <NULL>:1]");
}
TEST_F(GraphTest, AddFunctionLibrary) {
FunctionDefLibrary proto;
*proto.add_function() = test::function::XTimesTwo();
*proto.add_function() = test::function::XTimesFour();
TF_EXPECT_OK(graph_.AddFunctionLibrary(proto));
EXPECT_TRUE(graph_.flib_def().Find("XTimesTwo") != nullptr);
EXPECT_TRUE(graph_.flib_def().Find("XTimesFour") != nullptr);
TF_EXPECT_OK(graph_.AddFunctionLibrary(proto));
EXPECT_TRUE(graph_.flib_def().Find("XTimesTwo") != nullptr);
EXPECT_TRUE(graph_.flib_def().Find("XTimesFour") != nullptr);
FunctionDefLibrary error_proto = proto;
*error_proto.mutable_function(0)->add_node_def() =
error_proto.function(0).node_def(0);
Status s = graph_.AddFunctionLibrary(error_proto);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(),
"Cannot add function 'XTimesTwo' because a different function with "
"the same name already exists.");
error_proto = proto;
error_proto.mutable_function(0)->mutable_signature()->set_name("Add");
s = graph_.AddFunctionLibrary(error_proto);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(),
"Cannot add function 'Add' because an op with the same name "
"already exists.");
GradientDef* grad = proto.add_gradient();
grad->set_function_name("XTimesTwo");
grad->set_gradient_func("Undefined");
TF_EXPECT_OK(graph_.AddFunctionLibrary(proto));
EXPECT_EQ(graph_.flib_def().FindGradient("XTimesTwo"), "Undefined");
TF_EXPECT_OK(graph_.AddFunctionLibrary(proto));
EXPECT_EQ(graph_.flib_def().FindGradient("XTimesTwo"), "Undefined");
error_proto = proto;
error_proto.mutable_gradient(0)->set_gradient_func("Undefined2");
s = graph_.AddFunctionLibrary(error_proto);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(),
"Cannot assign gradient function 'Undefined2' to 'XTimesTwo' "
"because it already has gradient function 'Undefined'");
}
TEST_F(GraphTest, BuildNodeNameIndex) {
FromGraphDef(
"node { name: 'A' op: 'OneOutput' }"
"node { name: 'B' op: 'OneInputTwoOutputs' input: [ 'A:0' ] }"
"node { name: 'C' op: 'NoOp' } ");
auto node_name_index = graph_.BuildNodeNameIndex();
EXPECT_EQ(node_name_index.size(), 5);
std::vector<string> node_names{"_SOURCE", "_SINK", "A", "B", "C"};
for (const string& node_name : node_names) {
EXPECT_NE(node_name_index.find(node_name), node_name_index.end());
EXPECT_EQ(node_name_index[node_name], FindNode(node_name));
}
}
TEST_F(GraphTest, Clear) {
const int num_nodes = 10;
const int num_edges_per_node = 2;
const GraphDef graph_def =
test::CreateGraphDef(num_nodes, num_edges_per_node);
const auto registry = OpRegistry::Global();
GraphConstructorOptions opts;
Graph graph(registry);
TF_CHECK_OK(ConvertGraphDefToGraph(opts, graph_def, &graph));
graph.Clear();
EXPECT_EQ(graph.num_nodes(), 2);
}
TEST_F(GraphTest, NodeFullType) {
FromNodeDef("A", "OneOutput", 0);
FullTypeDef node_t;
node_t.set_type_id(TFT_PRODUCT);
FullTypeDef* output_t = node_t.add_args();
output_t->set_type_id(TFT_TENSOR);
output_t->add_args()->set_type_id(TFT_FLOAT);
graph_.SetNodeType("A", node_t);
const FullTypeDef* ft;
graph_.NodeType("A", &ft);
EXPECT_NE(ft, nullptr);
EXPECT_EQ(ft->type_id(), TFT_PRODUCT);
}
TEST_F(GraphTest, NodeShrinkTypeOutput) {
auto builder = NodeDefBuilder("while", "While");
builder = builder.Input({NodeDefBuilder::NodeOut("node_0", 0, DT_FLOAT),
NodeDefBuilder::NodeOut("node_1", 0, DT_INT32),
NodeDefBuilder::NodeOut("node_2", 0, DT_INT64),
NodeDefBuilder::NodeOut("node_3", 0, DT_STRING)});
NodeDef node_def;
TF_CHECK_OK(builder.Finalize(&node_def));
Status s;
Node* node = graph_.AddNode(node_def, &s);
TF_CHECK_OK(s);
FullTypeDef node_t;
node_t.set_type_id(TFT_PRODUCT);
for (FullTypeId id : {TFT_FLOAT, TFT_INT32, TFT_INT64, TFT_STRING}) {
FullTypeDef* output_t = node_t.add_args();
output_t->set_type_id(TFT_TENSOR);
output_t->add_args()->set_type_id(id);
}
graph_.SetNodeType("while", node_t);
TF_CHECK_OK(
node->ShrinkTypeInfo({{1, 0}, {3, 1}}, "T", true));
std::vector<DataType> new_dtypes;
TF_CHECK_OK(GetNodeAttr(node->def(), "T", &new_dtypes));
ASSERT_EQ(new_dtypes.size(), 2);
EXPECT_EQ(new_dtypes[0], DT_INT32);
EXPECT_EQ(new_dtypes[1], DT_STRING);
const FullTypeDef* ft;
graph_.NodeType("while", &ft);
EXPECT_NE(ft, nullptr);
EXPECT_EQ(ft->type_id(), TFT_PRODUCT);
ASSERT_EQ(ft->args_size(), 2);
ASSERT_EQ(ft->args(0).args_size(), 1);
EXPECT_EQ(ft->args(0).args(0).type_id(), TFT_INT32);
ASSERT_EQ(ft->args(1).args_size(), 1);
EXPECT_EQ(ft->args(1).args(0).type_id(), TFT_STRING);
}
TEST_F(GraphTest, NodeShrinkTypeInput) {
auto builder = NodeDefBuilder("if", "If");
builder = builder.Input("cond", 0, DT_BOOL);
builder = builder.Input({NodeDefBuilder::NodeOut("node_0", 0, DT_FLOAT),
NodeDefBuilder::NodeOut("node_1", 0, DT_INT32),
NodeDefBuilder::NodeOut("node_2", 0, DT_INT64),
NodeDefBuilder::NodeOut("node_3", 0, DT_STRING)});
builder = builder.Attr("Tout", "[DT_FLOAT, DT_INT32, DT_INT63, DT_STRING]");
NodeDef node_def;
TF_CHECK_OK(builder.Finalize(&node_def));
Status s;
Node* node = graph_.AddNode(node_def, &s);
TF_CHECK_OK(s);
FullTypeDef node_t;
node_t.set_type_id(TFT_PRODUCT);
for (FullTypeId id : {TFT_FLOAT, TFT_INT32, TFT_INT64, TFT_STRING}) {
FullTypeDef* output_t = node_t.add_args();
output_t->set_type_id(TFT_TENSOR);
output_t->add_args()->set_type_id(id);
}
graph_.SetNodeType("if", node_t);
TF_CHECK_OK(node->ShrinkTypeInfo({{1, 0}, {3, 1}}, "Tin",
false));
std::vector<DataType> new_dtypes;
TF_CHECK_OK(GetNodeAttr(node->def(), "Tin", &new_dtypes));
ASSERT_EQ(new_dtypes.size(), 2);
EXPECT_EQ(new_dtypes[0], DT_INT32);
EXPECT_EQ(new_dtypes[1], DT_STRING);
const FullTypeDef* ft;
graph_.NodeType("if", &ft);
EXPECT_NE(ft, nullptr);
EXPECT_EQ(ft->type_id(), TFT_PRODUCT);
ASSERT_EQ(ft->args_size(), 4);
ASSERT_EQ(ft->args(0).args_size(), 1);
EXPECT_EQ(ft->args(0).args(0).type_id(), TFT_FLOAT);
ASSERT_EQ(ft->args(1).args_size(), 1);
EXPECT_EQ(ft->args(1).args(0).type_id(), TFT_INT32);
ASSERT_EQ(ft->args(2).args_size(), 1);
EXPECT_EQ(ft->args(2).args(0).type_id(), TFT_INT64);
ASSERT_EQ(ft->args(3).args_size(), 1);
EXPECT_EQ(ft->args(3).args(0).type_id(), TFT_STRING);
}
TEST(AddInput, AddsControlSlot) {
auto input_name = "input-name";
auto expected_input_name = absl::StrCat("^", input_name);
NodeDef node_def;
tensorflow::Graph::AddInput(&node_def, input_name, Graph::kControlSlot);
EXPECT_EQ(node_def.input(0), expected_input_name);
}
TEST(AddInput, AddsSourceSlotZero) {
auto input_name = "input-name";
NodeDef node_def;
tensorflow::Graph::AddInput(&node_def, input_name, 0);
EXPECT_EQ(node_def.input(0), input_name);
}
TEST(AddInput, AddsOtherSlots) {
auto input_name = "input-name";
int arbitrary_slot = 37;
auto expected_input_name =
absl::StrCat(input_name, ":", arbitrary_slot);
NodeDef node_def;
tensorflow::Graph::AddInput(&node_def, input_name, arbitrary_slot);
EXPECT_EQ(node_def.input(0), expected_input_name);
}
void BM_InEdgeIteration(::testing::benchmark::State& state) {
const int num_nodes = state.range(0);
const int num_edges_per_node = state.range(1);
const GraphDef graph_def =
test::CreateGraphDef(num_nodes, num_edges_per_node);
Graph graph(OpRegistry::Global());
GraphConstructorOptions opts;
TF_CHECK_OK(ConvertGraphDefToGraph(opts, graph_def, &graph));
int64_t sum = 0;
for (auto s : state) {
for (const Node* node : graph.nodes()) {
for (auto e : node->in_edges()) {
sum += e->id();
}
}
}
VLOG(1) << sum;
}
BENCHMARK(BM_InEdgeIteration)->ArgPair(10, 2);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 6, 2);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 9, 2);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 12, 2);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 15, 2);
BENCHMARK(BM_InEdgeIteration)->ArgPair(10, 4);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 6, 4);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 9, 4);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 12, 4);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 15, 4);
BENCHMARK(BM_InEdgeIteration)->ArgPair(10, 8);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 6, 8);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 9, 8);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 12, 8);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 15, 8);
BENCHMARK(BM_InEdgeIteration)->ArgPair(10, 16);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 6, 16);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 9, 16);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 12, 16);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 15, 16);
void BM_GraphCreation(::testing::benchmark::State& state) {
const int num_nodes = state.range(0);
const int num_edges_per_node = state.range(1);
const GraphDef graph_def =
test::CreateGraphDef(num_nodes, num_edges_per_node);
const auto registry = OpRegistry::Global();
GraphConstructorOptions opts;
Graph graph(registry);
TF_CHECK_OK(ConvertGraphDefToGraph(opts, graph_def, &graph));
int64_t sum = 0;
for (auto s : state) {
Graph graph(registry);
TF_CHECK_OK(ConvertGraphDefToGraph(opts, graph_def, &graph));
sum += graph.num_node_ids();
}
VLOG(1) << sum;
}
BENCHMARK(BM_GraphCreation)->ArgPair(10, 2);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 6, 2);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 9, 2);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 12, 2);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 15, 2);
BENCHMARK(BM_GraphCreation)->ArgPair(10, 4);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 6, 4);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 9, 4);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 12, 4);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 15, 4);
BENCHMARK(BM_GraphCreation)->ArgPair(10, 8);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 6, 8);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 9, 8);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 12, 8);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 15, 8);
BENCHMARK(BM_GraphCreation)->ArgPair(10, 16);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 6, 16);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 9, 16);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 12, 16);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 15, 16);
void BM_ToGraphDef(::testing::benchmark::State& state) {
const int num_nodes = state.range(0);
const int num_edges_per_node = state.range(1);
const GraphDef graph_def =
test::CreateGraphDef(num_nodes, num_edges_per_node);
const auto registry = OpRegistry::Global();
GraphConstructorOptions opts;
Graph graph(registry);
TF_CHECK_OK(ConvertGraphDefToGraph(opts, graph_def, &graph));
int64_t sum = 0;
for (auto s : state) {
GraphDef graph_def;
graph.ToGraphDef(&graph_def);
sum += graph_def.node_size();
}
VLOG(1) << sum;
}
BENCHMARK(BM_ToGraphDef)->ArgPair(10, 2);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 6, 2);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 9, 2);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 12, 2);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 15, 2);
BENCHMARK(BM_ToGraphDef)->ArgPair(10, 4);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 6, 4);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 9, 4);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 12, 4);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 15, 4);
BENCHMARK(BM_ToGraphDef)->ArgPair(10, 8);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 6, 8);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 9, 8);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 12, 8);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 15, 8);
BENCHMARK(BM_ToGraphDef)->ArgPair(10, 16);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 6, 16);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 9, 16);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 12, 16);
BENCHMARK(BM_ToGraphDef)->ArgPair |
1,431 | cpp | tensorflow/tensorflow | control_flow | tensorflow/core/graph/control_flow.cc | tensorflow/core/graph/control_flow_test.cc | #ifndef TENSORFLOW_CORE_FRAMEWORK_CONTROL_FLOW_H_
#define TENSORFLOW_CORE_FRAMEWORK_CONTROL_FLOW_H_
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
const uint64 kIllegalFrameId = ~0uLL;
const int64_t kIllegalIterId = -1;
struct FrameAndIter {
uint64 frame_id = kIllegalFrameId;
int64_t iter_id = kIllegalIterId;
FrameAndIter() {}
FrameAndIter(uint64 frame, int64_t iter) {
frame_id = frame;
iter_id = iter;
}
bool operator==(const FrameAndIter& other) const {
return (frame_id == other.frame_id && iter_id == other.iter_id);
}
};
struct FrameAndIterHash {
size_t operator()(const FrameAndIter& key) const {
CHECK_EQ(sizeof(uint64) + sizeof(int64_t), sizeof(FrameAndIter));
return Hash64(reinterpret_cast<const char*>(&key), sizeof(FrameAndIter));
}
};
}
#endif
#include "tensorflow/core/graph/control_flow.h"
#include <deque>
#include <vector>
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
namespace {
struct Frame {
string name;
Frame* parent = nullptr;
const Node* loop_cond = nullptr;
};
Status ValidateControlFlowInfo(const Graph* graph,
const std::vector<ControlFlowInfo>& cf_info) {
std::unordered_map<string, Frame> frames;
for (const Node* node : graph->op_nodes()) {
const ControlFlowInfo& cf = cf_info[node->id()];
if (!cf.frame || !cf.parent_frame) {
continue;
}
Frame& frame = frames[cf.frame_name];
Frame* parent = &frames[cf_info[cf.parent_frame->id()].frame_name];
if (frame.parent == nullptr) {
frame.parent = parent;
frame.name = cf.frame_name;
} else if (frame.parent != parent) {
return errors::Internal(
"Invalid loop structure: Mismatched parent frames for \"",
cf.frame_name, "\": \"", parent->name, "\" vs \"", frame.parent->name,
"\". The node giving this error: ", FormatNodeForError(*node),
". This is an internal bug, please file a bug report with "
"instructions on how to reproduce the error.");
}
if (IsLoopCond(node)) {
if (frame.loop_cond &&
!absl::StrContains(frame.loop_cond->name(), "LoopCounter") &&
!absl::StrContains(node->name(), "LoopCounter")) {
return errors::InvalidArgument(
"Invalid loop structure: Loop \"", cf.frame_name,
"\" has more than one LoopCond node: ", FormatNodeForError(*node),
" and ", FormatNodeForError(*frame.loop_cond),
". This is an internal bug, please file a bug report with "
"instructions on how to reproduce the error.");
}
frame.loop_cond = node;
}
}
return absl::OkStatus();
}
}
Status BuildControlFlowInfo(const Graph* g, std::vector<ControlFlowInfo>* info,
std::vector<string>* unreachable_nodes) {
info->clear();
info->resize(g->num_node_ids());
std::vector<const Node*> parent_nodes;
parent_nodes.resize(g->num_node_ids());
const Node* src_node = g->source_node();
ControlFlowInfo& src_info = (*info)[src_node->id()];
src_info.frame = src_node;
src_info.parent_frame = src_node;
string frame_name;
std::deque<const Node*> ready;
ready.push_back(src_node);
while (!ready.empty()) {
const Node* curr_node = ready.front();
ready.pop_front();
const ControlFlowInfo& curr_info = (*info)[curr_node->id()];
const Node* frame = curr_info.frame;
const Node* parent = curr_info.parent_frame;
frame_name = curr_info.frame_name;
if (IsExit(curr_node)) {
const ControlFlowInfo& parent_info = (*info)[parent->id()];
frame = parent_info.frame;
parent = parent_info.parent_frame;
frame_name = parent_info.frame_name;
}
for (const Edge* out_edge : curr_node->out_edges()) {
const Node* out = out_edge->dst();
int out_id = out->id();
ControlFlowInfo* out_info = &(*info)[out_id];
const Node* out_parent = out_info->parent_frame;
bool is_visited = (parent_nodes[out_id] != nullptr);
if (!out->IsOp()) continue;
if (!is_visited) {
parent_nodes[out->id()] = curr_node;
ready.push_back(out);
}
if (IsEnter(out)) {
if (is_visited) {
const string& parent_frame = (*info)[out_parent->id()].frame_name;
if (parent_frame != frame_name) {
return errors::InvalidArgument(
FormatNodeForError(*out),
" has inputs from different frames. The input ",
FormatNodeForError(*curr_node), " is in frame '", frame_name,
"'. The input ", FormatNodeForError(*parent_nodes[out->id()]),
" is in frame '", parent_frame, "'.");
}
} else {
out_info->frame = out;
out_info->parent_frame = frame;
TF_RETURN_IF_ERROR(
GetNodeAttr(out->attrs(), "frame_name", &out_info->frame_name));
if (out_info->frame_name.empty()) {
return errors::InvalidArgument("The Enter ",
FormatNodeForError(*out),
" must have a frame name.");
}
}
} else {
if (is_visited) {
if (out_info->frame_name != frame_name) {
return errors::InvalidArgument(
FormatNodeForError(*out),
" has inputs from different frames. The input ",
FormatNodeForError(*curr_node), " is in frame '", frame_name,
"'. The input ", FormatNodeForError(*parent_nodes[out->id()]),
" is in frame '", out_info->frame_name, "'.");
}
} else {
out_info->frame = frame;
out_info->parent_frame = parent;
out_info->frame_name = frame_name;
}
}
}
}
if (unreachable_nodes) {
for (const Node* node : g->op_nodes()) {
if (!parent_nodes[node->id()]) {
unreachable_nodes->push_back(node->name());
}
}
}
TF_RETURN_IF_ERROR(ValidateControlFlowInfo(g, *info));
return absl::OkStatus();
}
} | #include "tensorflow/core/graph/control_flow.h"
#include <string>
#include <vector>
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/cc/ops/while_loop.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
Status LessThanTenCond(const Scope& scope, const std::vector<Output>& inputs,
Output* output) {
*output = ops::Less(scope, inputs[0], 10);
return scope.status();
}
Status AddOneBody(const Scope& scope, const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
outputs->push_back(ops::AddN(scope, {inputs[0], 1}));
return scope.status();
}
Status NestedLoopBody(const Scope& scope, const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
return ops::BuildWhileLoop(scope.NewSubScope("inner"), inputs,
LessThanTenCond, AddOneBody, "inner_loop",
outputs);
}
TEST(ValidateControlFlowTest, InputsFromDifferentFrames) {
Scope scope = Scope::NewRootScope().ExitOnError();
std::vector<Output> inputs;
inputs.push_back(ops::Placeholder(scope, DT_INT32));
std::vector<Output> outputs;
TF_ASSERT_OK(ops::BuildWhileLoop(scope.NewSubScope("outer"), inputs,
LessThanTenCond, NestedLoopBody,
"outer_loop", &outputs));
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<ControlFlowInfo> info;
Status status = BuildControlFlowInfo(graph.get(), &info);
EXPECT_FALSE(status.ok());
EXPECT_TRUE(
absl::StrContains(status.message(), "has inputs from different frames"))
<< status.message();
EXPECT_TRUE(
absl::StrContains(status.message(), "{{node outer/body/inner/Merge}}"))
<< status.message();
EXPECT_TRUE(
absl::StrContains(status.message(), "{{node outer/body/inner/Enter}}"))
<< status.message();
EXPECT_TRUE(absl::StrContains(status.message(), "{{node outer/Switch}}"))
<< status.message();
}
TEST(ValidateControlFlowTest, MismatchedParentFrames) {
Scope scope = Scope::NewRootScope().ExitOnError();
std::vector<Output> inputs;
inputs.push_back(ops::Placeholder(scope, DT_INT32));
std::vector<Output> outputs;
TF_ASSERT_OK(ops::BuildWhileLoop(scope, inputs, LessThanTenCond, AddOneBody,
"test_loop", &outputs));
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
Node* enter_1 = nullptr;
for (Node* node : graph->op_nodes()) {
if (IsEnter(node)) {
enter_1 = node;
}
}
ASSERT_TRUE(enter_1 != nullptr);
NodeDef enter;
enter.set_name("Enter2");
enter.set_op("Enter");
(*enter.mutable_attr())["T"].set_type(DT_INT32);
(*enter.mutable_attr())["frame_name"].set_s("test_loop");
*enter.add_input() = "Enter";
Status status;
Node* enter_2 = graph->AddNode(enter, &status);
TF_ASSERT_OK(status);
graph->AddControlEdge(enter_1, enter_2);
std::vector<ControlFlowInfo> info;
status = BuildControlFlowInfo(graph.get(), &info);
EXPECT_FALSE(status.ok());
EXPECT_TRUE(absl::StrContains(status.message(), "Mismatched parent frames"))
<< status.message();
EXPECT_TRUE(absl::StrContains(status.message(), "{{node Enter2}}"))
<< status.message();
}
TEST(ValidateControlFlowTest, TwoLoopCond) {
Scope scope = Scope::NewRootScope().ExitOnError();
std::vector<Output> inputs;
inputs.push_back(ops::Placeholder(scope, DT_INT32));
std::vector<Output> outputs;
TF_ASSERT_OK(ops::BuildWhileLoop(scope, inputs, LessThanTenCond, AddOneBody,
"test_loop", &outputs));
outputs.clear();
TF_ASSERT_OK(ops::BuildWhileLoop(scope.NewSubScope("sub"), inputs,
LessThanTenCond, AddOneBody, "test_loop",
&outputs, false));
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<ControlFlowInfo> info;
Status status = BuildControlFlowInfo(graph.get(), &info);
EXPECT_FALSE(status.ok());
EXPECT_TRUE(
absl::StrContains(status.message(), "more than one LoopCond node"))
<< status.message();
EXPECT_TRUE(absl::StrContains(status.message(), "{{node sub/LoopCond}}"))
<< status.message();
EXPECT_TRUE(absl::StrContains(status.message(), "{{node LoopCond}}"))
<< status.message();
}
}
} |
1,432 | cpp | tensorflow/tensorflow | node_builder | tensorflow/core/graph/node_builder.cc | tensorflow/core/graph/node_builder_test.cc | #ifndef TENSORFLOW_CORE_GRAPH_NODE_BUILDER_H_
#define TENSORFLOW_CORE_GRAPH_NODE_BUILDER_H_
#include <vector>
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
namespace tensorflow {
class NodeBuilder {
public:
struct NodeOut {
NodeOut(Node* n, int32_t i = 0);
NodeOut(OutputTensor t);
NodeOut(StringPiece name, int32_t i, DataType t);
NodeOut();
Node* node;
bool error;
string name;
int32 index;
DataType dt;
};
NodeBuilder(StringPiece name, StringPiece op_name,
const OpRegistryInterface* op_registry = OpRegistry::Global(),
const NodeDebugInfo* debug = nullptr);
NodeBuilder(StringPiece name, const OpDef* op_def);
NodeBuilder(const NodeDefBuilder& def_builder);
NodeBuilder& Input(Node* src_node, int src_index = 0);
NodeBuilder& Input(NodeOut src);
NodeBuilder& Input(absl::Span<const NodeOut> src_list);
NodeBuilder& ControlInput(Node* src_node);
NodeBuilder& ControlInputs(absl::Span<Node* const> src_nodes);
NodeBuilder& Device(StringPiece device_spec);
NodeBuilder& AssignedDevice(StringPiece device);
NodeBuilder& XlaCluster(StringPiece xla_cluster);
template <class T>
NodeBuilder& Attr(StringPiece attr_name, T&& value);
template <class T>
NodeBuilder& Attr(StringPiece attr_name, std::initializer_list<T> value);
Status Finalize(Graph* graph, Node** created_node, bool consume = false);
absl::StatusOr<Node*> Finalize(Graph* graph, bool consume = false);
const string& node_name() const { return def_builder_.node_name(); }
const OpDef& op_def() const { return def_builder_.op_def(); }
private:
static DataType SafeGetOutput(const Node* node, int i, bool* error) {
if (node != nullptr && i >= 0 && i < node->num_outputs()) {
*error = false;
return node->output_type(i);
} else {
*error = true;
return DT_FLOAT;
}
}
void AddIndexError(const Node* node, int i);
bool GetOutputType(const Node* node, int i, DataType* dt);
NodeDefBuilder def_builder_;
const OpRegistryInterface* op_registry_;
std::vector<NodeOut> inputs_;
std::vector<Node*> control_inputs_;
std::vector<string> errors_;
string assigned_device_;
};
template <class T>
NodeBuilder& NodeBuilder::Attr(StringPiece attr_name, T&& value) {
def_builder_.Attr(attr_name, std::forward<T>(value));
return *this;
}
template <class T>
NodeBuilder& NodeBuilder::Attr(StringPiece attr_name,
std::initializer_list<T> value) {
def_builder_.Attr(attr_name, value);
return *this;
}
}
#endif
#include "tensorflow/core/graph/node_builder.h"
#include <unordered_map>
#include <vector>
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
NodeBuilder::NodeOut::NodeOut(Node* n, int32_t i)
: node(n),
error(false),
name(node != nullptr ? node->name() : (error = true, "")),
index(i),
dt(SafeGetOutput(node, i, &error)) {}
NodeBuilder::NodeOut::NodeOut(OutputTensor t) : NodeOut(t.node, t.index) {}
NodeBuilder::NodeOut::NodeOut(StringPiece n, int32_t i, DataType t)
: node(nullptr), error(false), name(n), index(i), dt(t) {}
NodeBuilder::NodeOut::NodeOut()
: node(nullptr), error(true), index(0), dt(DT_FLOAT) {}
NodeBuilder::NodeBuilder(StringPiece name, StringPiece op_name,
const OpRegistryInterface* op_registry,
const NodeDebugInfo* debug)
: def_builder_(name, op_name, op_registry, debug) {}
NodeBuilder::NodeBuilder(StringPiece name, const OpDef* op_def)
: def_builder_(name, op_def) {}
NodeBuilder::NodeBuilder(const NodeDefBuilder& def_builder)
: def_builder_(def_builder) {}
NodeBuilder& NodeBuilder::Input(Node* src_node, int src_index) {
inputs_.emplace_back(src_node, src_index);
DataType dt;
if (GetOutputType(src_node, src_index, &dt)) {
def_builder_.Input(src_node->name(), src_index, dt);
}
return *this;
}
NodeBuilder& NodeBuilder::Input(NodeOut src) {
if (src.error) {
AddIndexError(src.node, src.index);
} else {
inputs_.emplace_back(src.node, src.index);
def_builder_.Input(src.name, src.index, src.dt);
}
return *this;
}
NodeBuilder& NodeBuilder::Input(absl::Span<const NodeOut> src_list) {
std::vector<NodeDefBuilder::NodeOut> srcs;
srcs.reserve(src_list.size());
for (const auto& node_out : src_list) {
if (node_out.error) {
AddIndexError(node_out.node, node_out.index);
} else {
srcs.emplace_back(node_out.name, node_out.index, node_out.dt);
inputs_.emplace_back(node_out.node, node_out.index);
}
}
def_builder_.Input(absl::Span<const NodeDefBuilder::NodeOut>(srcs));
return *this;
}
NodeBuilder& NodeBuilder::ControlInput(Node* src_node) {
control_inputs_.emplace_back(src_node);
def_builder_.ControlInput(src_node->name());
return *this;
}
NodeBuilder& NodeBuilder::ControlInputs(absl::Span<Node* const> src_nodes) {
control_inputs_.insert(control_inputs_.end(), src_nodes.begin(),
src_nodes.end());
for (const Node* src_node : src_nodes) {
def_builder_.ControlInput(src_node->name());
}
return *this;
}
NodeBuilder& NodeBuilder::Device(StringPiece device_spec) {
def_builder_.Device(device_spec);
return *this;
}
NodeBuilder& NodeBuilder::AssignedDevice(StringPiece device) {
assigned_device_ = string(device);
return *this;
}
NodeBuilder& NodeBuilder::XlaCluster(StringPiece xla_cluster) {
def_builder_.Attr("_XlaCluster", xla_cluster);
return *this;
}
absl::StatusOr<Node*> NodeBuilder::Finalize(Graph* graph, bool consume) {
Node* out;
TF_RETURN_IF_ERROR(Finalize(graph, &out, consume));
return out;
}
Status NodeBuilder::Finalize(Graph* graph, Node** created_node, bool consume) {
if (created_node != nullptr) {
*created_node = nullptr;
}
if (!errors_.empty()) {
return errors::InvalidArgument(absl::StrJoin(errors_, "\n"));
}
NodeDef node_def;
TF_RETURN_IF_ERROR(def_builder_.Finalize(&node_def, consume));
TF_RETURN_IF_ERROR(ValidateNodeDef(node_def, def_builder_.op_def()));
TF_RETURN_IF_ERROR(
CheckOpDeprecation(def_builder_.op_def(), graph->versions().producer()));
TF_ASSIGN_OR_RETURN(Node * node, graph->AddNode(std::move(node_def)));
node->set_assigned_device_name(assigned_device_);
for (size_t i = 0; i < inputs_.size(); ++i) {
if (inputs_[i].node != nullptr) {
graph->AddEdge(inputs_[i].node, inputs_[i].index, node, i);
}
}
for (Node* control_input : control_inputs_) {
graph->AddControlEdge(control_input, node);
}
if (created_node != nullptr) *created_node = node;
return absl::OkStatus();
}
void NodeBuilder::AddIndexError(const Node* node, int i) {
if (node == nullptr) {
errors_.emplace_back(
strings::StrCat("Attempt to add nullptr Node to node with type ",
def_builder_.op_def().name()));
} else {
errors_.emplace_back(strings::StrCat(
"Attempt to add output ", i, " of ", node->name(), " not in range [0, ",
node->num_outputs(), ") to node with type ",
def_builder_.op_def().name(), ". Node: ", FormatNodeForError(*node)));
}
}
bool NodeBuilder::GetOutputType(const Node* node, int i, DataType* dt) {
bool error;
*dt = SafeGetOutput(node, i, &error);
if (error) AddIndexError(node, i);
return !error;
}
} | #include "tensorflow/core/graph/node_builder.h"
#include <string>
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
REGISTER_OP("Source").Output("o: out_types").Attr("out_types: list(type)");
REGISTER_OP("Sink").Input("i: T").Attr("T: type");
TEST(NodeBuilderTest, Simple) {
Graph graph(OpRegistry::Global());
Node* source_node;
TF_EXPECT_OK(NodeBuilder("source_op", "Source")
.Attr("out_types", {DT_INT32, DT_STRING})
.Finalize(&graph, &source_node));
ASSERT_TRUE(source_node != nullptr);
TF_EXPECT_OK(NodeBuilder("sink1", "Sink")
.Input(source_node)
.Finalize(&graph, nullptr));
TF_EXPECT_OK(NodeBuilder("sink2", "Sink")
.Input(source_node, 1)
.Finalize(&graph, nullptr));
EXPECT_FALSE(NodeBuilder("sink3", "Sink")
.Input(source_node, 2)
.Finalize(&graph, nullptr)
.ok());
EXPECT_FALSE(NodeBuilder("sink4", "Sink")
.Input(source_node, -1)
.Finalize(&graph, nullptr)
.ok());
EXPECT_FALSE(NodeBuilder("sink5", "Sink")
.Input({source_node, -1})
.Finalize(&graph, nullptr)
.ok());
EXPECT_FALSE(NodeBuilder("sink6", "Sink")
.Input(nullptr)
.Finalize(&graph, nullptr)
.ok());
EXPECT_FALSE(NodeBuilder("sink7", "Sink")
.Input(NodeBuilder::NodeOut(nullptr, 0))
.Finalize(&graph, nullptr)
.ok());
}
REGISTER_OP("FullTypeOpBasicType")
.Output("o1: out_type")
.Attr("out_type: type")
.SetTypeConstructor([](OpDef* op_def) {
FullTypeDef* tdef =
op_def->mutable_output_arg(0)->mutable_experimental_full_type();
tdef->set_type_id(TFT_ARRAY);
FullTypeDef* arg = tdef->add_args();
arg->set_type_id(TFT_VAR);
arg->set_s("out_type");
return absl::OkStatus();
});
TEST(NodeBuilderTest, TypeConstructorBasicType) {
Graph graph(OpRegistry::Global());
Node* node;
TF_EXPECT_OK(NodeBuilder("op", "FullTypeOpBasicType")
.Attr("out_type", DT_FLOAT)
.Finalize(&graph, &node));
ASSERT_TRUE(node->def().has_experimental_type());
const FullTypeDef& ft = node->def().experimental_type();
ASSERT_EQ(ft.type_id(), TFT_PRODUCT);
ASSERT_EQ(ft.args_size(), 1);
auto ot = ft.args(0);
ASSERT_EQ(ot.type_id(), TFT_ARRAY);
ASSERT_EQ(ot.args(0).type_id(), TFT_FLOAT);
ASSERT_EQ(ot.args(0).args().size(), 0);
}
REGISTER_OP("FullTypeOpListType")
.Output("o1: out_types")
.Attr("out_types: list(type)")
.SetTypeConstructor([](OpDef* op_def) {
FullTypeDef* tdef =
op_def->mutable_output_arg(0)->mutable_experimental_full_type();
tdef->set_type_id(TFT_ARRAY);
FullTypeDef* arg = tdef->add_args();
arg->set_type_id(TFT_VAR);
arg->set_s("out_types");
return absl::OkStatus();
});
TEST(NodeBuilderTest, TypeConstructorListType) {
Graph graph(OpRegistry::Global());
Node* node;
ASSERT_FALSE(NodeBuilder("op", "FullTypeOpListType")
.Attr("out_types", {DT_FLOAT, DT_INT32})
.Finalize(&graph, &node)
.ok());
}
}
} |
1,433 | cpp | tensorflow/tensorflow | validate | tensorflow/core/graph/validate.cc | tensorflow/core/graph/validate_test.cc | #ifndef TENSORFLOW_CORE_GRAPH_VALIDATE_H_
#define TENSORFLOW_CORE_GRAPH_VALIDATE_H_
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace graph {
Status ValidateGraphDef(const GraphDef& graph_def,
const OpRegistryInterface& op_registry);
Status ValidateGraphDefAgainstOpRegistry(
const GraphDef& graph_def, const OpRegistryInterface& op_registry);
Status ValidateGraphDefAgainstOpList(const GraphDef& graph_def,
const OpList& op_list);
void GetOpListForValidation(
OpList* op_list, const OpRegistry& op_registry = *OpRegistry::Global());
Status ValidateGraphHasNoCycle(const Graph& graph);
Status VerifyNoDuplicateNodeNames(const GraphDef& graph);
}
}
#endif
#include "tensorflow/core/graph/validate.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace graph {
Status ValidateGraphDef(const GraphDef& graph_def,
const OpRegistryInterface& op_registry) {
Status s;
const int version = graph_def.versions().producer();
for (const NodeDef& node_def : graph_def.node()) {
const OpDef* op_def;
TF_RETURN_IF_ERROR(op_registry.LookUpOpDef(node_def.op(), &op_def));
TF_RETURN_IF_ERROR(ValidateNodeDef(node_def, *op_def));
TF_RETURN_IF_ERROR(CheckOpDeprecation(*op_def, version));
}
return s;
}
Status ValidateGraphDefAgainstOpRegistry(
const GraphDef& graph_def, const OpRegistryInterface& op_registry) {
GraphDef copy(graph_def);
TF_RETURN_IF_ERROR(AddDefaultAttrsToGraphDef(©, op_registry, 0));
return ValidateGraphDef(copy, op_registry);
}
Status ValidateGraphDefAgainstOpList(const GraphDef& graph_def,
const OpList& op_list) {
OpListOpRegistry registry(&op_list);
return ValidateGraphDefAgainstOpRegistry(graph_def, registry);
}
void GetOpListForValidation(OpList* op_list, const OpRegistry& op_registry) {
op_registry.Export(false, op_list);
RemoveDescriptionsFromOpList(op_list);
}
Status ValidateGraphHasNoCycle(const Graph& graph) {
std::vector<const Node*> ready;
std::vector<int> pending_count(graph.num_node_ids(), 0);
for (int i = 0; i < graph.num_node_ids(); ++i) {
const Node* n = graph.FindNodeId(i);
if (n == nullptr) continue;
pending_count[i] = n->in_edges().size();
if (n->IsMerge()) {
for (const Edge* e : n->in_edges()) {
if (!e->IsControlEdge() && e->src()->IsNextIteration()) {
pending_count[i]--;
}
}
}
if (pending_count[i] == 0) {
ready.push_back(n);
}
}
int processed = 0;
while (!ready.empty()) {
const Node* node = ready.back();
ready.pop_back();
++processed;
for (const Edge* out : node->out_edges()) {
const int output_id = out->dst()->id();
pending_count[output_id]--;
if (pending_count[output_id] == 0) {
ready.push_back(out->dst());
}
}
}
if (processed < graph.num_nodes()) {
std::vector<string> nodes_in_cycle;
for (int i = 0; i < pending_count.size() && nodes_in_cycle.size() < 3;
++i) {
if (pending_count[i] != 0) {
nodes_in_cycle.push_back(graph.FindNodeId(i)->name());
}
}
return errors::InvalidArgument(
"Graph is invalid, contains a cycle with ",
graph.num_nodes() - processed,
" nodes, including: ", absl::StrJoin(nodes_in_cycle, ", "));
}
return absl::OkStatus();
}
Status VerifyNoDuplicateNodeNames(const GraphDef& graph) {
absl::flat_hash_set<absl::string_view> nodes;
for (const auto& node : graph.node()) {
if (nodes.contains(node.name())) {
return errors::AlreadyExists("Node already exists: ", node.name());
}
nodes.insert(node.name());
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/graph/validate.h"
#include <string>
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/graph/subgraph.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
REGISTER_OP("FloatInput").Output("o: float");
REGISTER_OP("Int32Input").Output("o: int32");
TEST(ValidateGraphDefTest, TestValidGraph) {
const string graph_def_str =
"node { name: 'A' op: 'FloatInput' }"
"node { name: 'B' op: 'FloatInput' }"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }";
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;
TF_ASSERT_OK(graph::ValidateGraphDef(graph_def, *OpRegistry::Global()));
}
TEST(ValidateGraphDefTest, GraphWithUnspecifiedDefaultAttr) {
const string graph_def_str =
"node { name: 'A' op: 'FloatInput' }"
"node { name: 'B' op: 'Int32Input' }"
"node { "
" name: 'C' op: 'Sum' "
" attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] "
"}";
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;
Status s = graph::ValidateGraphDef(graph_def, *OpRegistry::Global());
EXPECT_FALSE(s.ok());
EXPECT_TRUE(absl::StrContains(s.ToString(), "NodeDef missing attr"));
TF_ASSERT_OK(AddDefaultAttrsToGraphDef(&graph_def, *OpRegistry::Global(), 0));
TF_ASSERT_OK(graph::ValidateGraphDef(graph_def, *OpRegistry::Global()));
}
TEST(ValidateGraphDefTest, GraphWithUnspecifiedRequiredAttr) {
const string graph_def_str =
"node { name: 'A' op: 'FloatInput' }"
"node { "
" name: 'B' op: 'Cast' "
" attr { key: 'SrcT' value { type: DT_FLOAT } }"
" input: ['A'] "
"}";
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;
Status s = graph::ValidateGraphDef(graph_def, *OpRegistry::Global());
EXPECT_FALSE(s.ok());
EXPECT_TRUE(absl::StrContains(s.ToString(), "NodeDef missing attr"));
TF_ASSERT_OK(AddDefaultAttrsToGraphDef(&graph_def, *OpRegistry::Global(), 0));
s = graph::ValidateGraphDef(graph_def, *OpRegistry::Global());
EXPECT_FALSE(s.ok());
EXPECT_TRUE(absl::StrContains(s.ToString(), "NodeDef missing attr"));
}
TEST(ValidateGraphDefAgainstOpListTest, GraphWithOpOnlyInOpList) {
OpRegistrationData op_reg_data;
TF_ASSERT_OK(OpDefBuilder("UniqueSnowflake").Finalize(&op_reg_data));
OpList op_list;
*op_list.add_op() = op_reg_data.op_def;
const string graph_def_str = "node { name: 'A' op: 'UniqueSnowflake' }";
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;
TF_ASSERT_OK(graph::ValidateGraphDefAgainstOpList(graph_def, op_list));
}
TEST(ValidateGraphDefAgainstOpListTest, GraphWithGlobalOpNotInOpList) {
OpRegistrationData op_reg_data;
TF_ASSERT_OK(OpDefBuilder("NotAnywhere").Finalize(&op_reg_data));
OpList op_list;
*op_list.add_op() = op_reg_data.op_def;
const string graph_def_str = "node { name: 'A' op: 'FloatInput' }";
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;
ASSERT_FALSE(graph::ValidateGraphDefAgainstOpList(graph_def, op_list).ok());
}
REGISTER_OP("HasDocs").Doc("This is in the summary.");
TEST(GetOpListForValidationTest, ShouldStripDocs) {
bool found_float = false;
bool found_int32 = false;
bool found_has_docs = false;
OpList op_list;
graph::GetOpListForValidation(&op_list);
for (const OpDef& op_def : op_list.op()) {
if (op_def.name() == "FloatInput") {
EXPECT_FALSE(found_float);
found_float = true;
}
if (op_def.name() == "Int32Input") {
EXPECT_FALSE(found_int32);
found_int32 = true;
}
if (op_def.name() == "HasDocs") {
EXPECT_FALSE(found_has_docs);
found_has_docs = true;
EXPECT_TRUE(op_def.summary().empty());
}
}
EXPECT_TRUE(found_float);
EXPECT_TRUE(found_int32);
EXPECT_TRUE(found_has_docs);
}
TEST(VerifyNoDuplicateNodeNames, NoDuplicateNodeNames) {
const string graph_def_str =
"node { name: 'A' op: 'FloatInput' }"
"node { name: 'B' op: 'Int32Input' }"
"node { "
" name: 'C' op: 'Sum' "
" attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] "
"}";
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;
TF_ASSERT_OK(graph::VerifyNoDuplicateNodeNames(graph_def));
}
TEST(VerifyNoDuplicateNodeNames, DuplicateNodeNames) {
const string graph_def_str =
"node { name: 'A' op: 'FloatInput' }"
"node { name: 'A' op: 'Int32Input' }"
"node { "
" name: 'C' op: 'Sum' "
" attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'A'] "
"}";
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;
EXPECT_EQ(graph::VerifyNoDuplicateNodeNames(graph_def).code(),
tensorflow::error::ALREADY_EXISTS);
}
TEST(ValidateGraphHasNoCycleTest, NoCyclePasses) {
const string graph_def_str =
"node { name: 'A' op: 'FloatInput' }"
"node { name: 'B' op: 'FloatInput' }"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }";
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;
Graph graph(OpRegistry::Global());
GraphConstructorOptions opts;
TF_ASSERT_OK(ConvertGraphDefToGraph(opts, graph_def, &graph));
TF_EXPECT_OK(graph::ValidateGraphHasNoCycle(graph));
}
TEST(ValidateGraphHasNoCycleTest, NoCycleWithMergePasses) {
const string graph_def_str =
R"EOF(
node { name: 'A' op: 'FloatInput' }
node { name: 'merge' op: 'Merge' input: [ 'A:0', 'next:0' ]
attr { key: "N" value: { i: 2 } }
attr { key: "T" value: { type: DT_FLOAT } } }
node { name: 'B' op: 'Mul'
attr { key: 'T' value { type: DT_FLOAT } }
input: [ 'merge:0', 'merge:0' ] }
node { name: 'next' op: 'NextIteration' input: ['B:0']
attr { key: "T" value: { type: DT_FLOAT } } }
)EOF";
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;
Graph graph(OpRegistry::Global());
GraphConstructorOptions opts;
TF_ASSERT_OK(ConvertGraphDefToGraph(opts, graph_def, &graph));
TF_EXPECT_OK(graph::ValidateGraphHasNoCycle(graph));
}
Node* AddNodeFromNodeDef(Graph& graph, const string& name,
const string& node_type, int num_inputs) {
auto builder = NodeDefBuilder(name, node_type);
for (int i = 0; i < num_inputs; ++i) {
builder = builder.Input(strings::StrCat("node_", i), i, DT_FLOAT);
}
NodeDef node_def;
TF_CHECK_OK(builder.Finalize(&node_def));
Status s;
Node* node = graph.AddNode(node_def, &s);
TF_CHECK_OK(s);
return node;
}
TEST(ValidateGraphHasNoCycleTest, CycleFails) {
Graph graph(OpRegistry::Global());
Node* a = AddNodeFromNodeDef(graph, "A", "FloatInput", 0);
Node* c = AddNodeFromNodeDef(graph, "B", "Mul", 2);
graph.AddEdge(a, 0, c, 0);
graph.AddEdge(c, 0, c, 1);
EXPECT_THAT(
graph::ValidateGraphHasNoCycle(graph),
tsl::testing::StatusIs(
tsl::error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex("Graph is invalid, contains a cycle")));
}
}
} |
1,434 | cpp | tensorflow/tensorflow | graph_def_builder | tensorflow/core/graph/graph_def_builder.cc | tensorflow/core/graph/graph_def_builder_test.cc | #ifndef TENSORFLOW_CORE_GRAPH_GRAPH_DEF_BUILDER_H_
#define TENSORFLOW_CORE_GRAPH_GRAPH_DEF_BUILDER_H_
#include <string>
#include <vector>
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
namespace tensorflow {
class GraphDefBuilder {
public:
class Options {
public:
Options(Graph* graph, Status* status);
~Options();
Options WithName(StringPiece name) const;
Options WithDevice(StringPiece device) const;
Options WithControlInput(Node* control_input) const;
Options WithControlInputs(absl::Span<Node* const> control_inputs) const;
template <class T>
Options WithAttr(StringPiece attr_name, T&& value) const {
return Options(*this).WithAttrImpl(attr_name, std::forward<T>(value));
}
template <class T>
Options WithAttr(StringPiece attr_name,
std::initializer_list<T> value) const {
return WithAttr<std::initializer_list<T>>(attr_name, std::move(value));
}
bool HaveError() const { return status_ != nullptr && !status_->ok(); }
string StatusToString() const {
return status_->ok() ? "OK" : std::string(status_->message());
}
string GetNameForOp(StringPiece op) const;
Node* FinalizeBuilder(NodeBuilder* builder) const;
void UpdateStatus(const Status& status) const;
const OpRegistryInterface* op_registry() const {
return graph_->op_registry();
}
private:
Options WithNameImpl(StringPiece name);
Options WithDeviceImpl(StringPiece device);
Options WithControlInputImpl(Node* control_input);
Options WithControlInputsImpl(absl::Span<Node* const> control_inputs);
template <class T>
Options WithAttrImpl(StringPiece name, T&& value) {
attrs_.emplace_back(string(name), AttrValue());
SetAttrValue(std::forward<T>(value), &attrs_.back().second);
return *this;
}
Graph* const graph_;
Status* const status_;
string name_;
string device_;
std::vector<Node*> control_inputs_;
std::vector<std::pair<string, AttrValue>> attrs_;
};
explicit GraphDefBuilder(
const OpRegistryInterface* op_registry = OpRegistry::Global())
: graph_(op_registry), flib_def_(op_registry), opts_(&graph_, &status_) {}
enum TestFailImmediatelyType { kFailImmediately };
explicit GraphDefBuilder(
TestFailImmediatelyType,
const OpRegistryInterface* op_registry = OpRegistry::Global())
: graph_(op_registry), flib_def_(op_registry), opts_(&graph_, nullptr) {}
const Options& opts() const { return opts_; }
Status ToGraphDef(GraphDef* graph_def) const;
Status AddFunctionLibrary(const FunctionDefLibrary& fdef_lib) {
return flib_def_.AddLibrary(fdef_lib);
}
bool HasFunction(const string& name) {
return flib_def_.Find(name) != nullptr;
}
private:
Graph graph_;
FunctionLibraryDefinition flib_def_;
Status status_;
Options opts_;
};
namespace ops {
typedef NodeBuilder::NodeOut NodeOut;
Node* SourceOp(const string& op_name, const GraphDefBuilder::Options& opts);
Node* UnaryOp(const string& op_name, NodeOut input,
const GraphDefBuilder::Options& opts);
Node* BinaryOp(const string& op_name, NodeOut a, NodeOut b,
const GraphDefBuilder::Options& opts);
Node* TernaryOp(const string& op_name, NodeOut a, NodeOut b, NodeOut c,
const GraphDefBuilder::Options& opts);
}
}
#endif
#include "tensorflow/core/graph/graph_def_builder.h"
#include <utility>
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
GraphDefBuilder::Options::Options(Graph* graph, Status* status)
: graph_(graph), status_(status) {}
GraphDefBuilder::Options::~Options() {}
GraphDefBuilder::Options GraphDefBuilder::Options::WithName(
StringPiece name) const {
return Options(*this).WithNameImpl(name);
}
GraphDefBuilder::Options GraphDefBuilder::Options::WithDevice(
StringPiece device) const {
return Options(*this).WithDeviceImpl(device);
}
GraphDefBuilder::Options GraphDefBuilder::Options::WithControlInput(
Node* control_input) const {
return Options(*this).WithControlInputImpl(control_input);
}
GraphDefBuilder::Options GraphDefBuilder::Options::WithControlInputs(
absl::Span<Node* const> control_inputs) const {
return Options(*this).WithControlInputsImpl(control_inputs);
}
GraphDefBuilder::Options GraphDefBuilder::Options::WithNameImpl(
StringPiece name) {
name_ = string(name);
return *this;
}
GraphDefBuilder::Options GraphDefBuilder::Options::WithDeviceImpl(
StringPiece device) {
device_ = string(device);
return *this;
}
GraphDefBuilder::Options GraphDefBuilder::Options::WithControlInputImpl(
Node* control_input) {
control_inputs_.push_back(control_input);
return *this;
}
GraphDefBuilder::Options GraphDefBuilder::Options::WithControlInputsImpl(
absl::Span<Node* const> control_inputs) {
control_inputs_.insert(control_inputs_.end(), control_inputs.begin(),
control_inputs.end());
return *this;
}
Status GraphDefBuilder::ToGraphDef(GraphDef* graph_def) const {
if (status_.ok()) {
graph_.ToGraphDef(graph_def);
*graph_def->mutable_library() = flib_def_.ToProto();
}
return status_;
}
string GraphDefBuilder::Options::GetNameForOp(StringPiece op) const {
if (name_.empty()) return graph_->NewName(op);
return name_;
}
Node* GraphDefBuilder::Options::FinalizeBuilder(NodeBuilder* builder) const {
builder->ControlInputs(control_inputs_);
if (!device_.empty()) builder->Device(device_);
for (const auto& attr : attrs_) {
builder->Attr(attr.first, attr.second);
}
Node* returned_node;
UpdateStatus(builder->Finalize(graph_, &returned_node));
return returned_node;
}
void GraphDefBuilder::Options::UpdateStatus(const Status& status) const {
if (status_ == nullptr) {
TF_CHECK_OK(status);
} else {
status_->Update(status);
}
}
namespace ops {
Node* SourceOp(const string& op_name, const GraphDefBuilder::Options& opts) {
if (opts.HaveError()) return nullptr;
NodeBuilder node_builder(opts.GetNameForOp(op_name), op_name,
opts.op_registry());
return opts.FinalizeBuilder(&node_builder);
}
Node* UnaryOp(const string& op_name, NodeOut input,
const GraphDefBuilder::Options& opts) {
if (opts.HaveError()) return nullptr;
NodeBuilder node_builder(opts.GetNameForOp(op_name), op_name,
opts.op_registry());
node_builder.Input(std::move(input));
return opts.FinalizeBuilder(&node_builder);
}
Node* BinaryOp(const string& op_name, NodeOut a, NodeOut b,
const GraphDefBuilder::Options& opts) {
if (opts.HaveError()) return nullptr;
NodeBuilder node_builder(opts.GetNameForOp(op_name), op_name,
opts.op_registry());
node_builder.Input(std::move(a)).Input(std::move(b));
return opts.FinalizeBuilder(&node_builder);
}
Node* TernaryOp(const string& op_name, NodeOut a, NodeOut b, NodeOut c,
const GraphDefBuilder::Options& opts) {
if (opts.HaveError()) return nullptr;
NodeBuilder node_builder(opts.GetNameForOp(op_name), op_name,
opts.op_registry());
node_builder.Input(std::move(a)).Input(std::move(b)).Input(std::move(c));
return opts.FinalizeBuilder(&node_builder);
}
}
} | #include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/common_runtime/graph_def_builder_util.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
TEST(GraphDefBuilderTest, Version) {
ASSERT_LT(0, TF_GRAPH_DEF_VERSION);
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Graph graph(OpRegistry::Global());
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, &graph));
ASSERT_EQ(graph.versions().producer(), TF_GRAPH_DEF_VERSION);
ASSERT_EQ(graph.versions().min_consumer(), TF_GRAPH_DEF_VERSION_MIN_CONSUMER);
GraphDef graph_def;
TF_EXPECT_OK(builder.ToGraphDef(&graph_def));
ASSERT_EQ(graph_def.versions().producer(), TF_GRAPH_DEF_VERSION);
ASSERT_EQ(graph_def.versions().min_consumer(),
TF_GRAPH_DEF_VERSION_MIN_CONSUMER);
}
}
} |
1,435 | cpp | tensorflow/tensorflow | costmodel | tensorflow/core/graph/costmodel.cc | tensorflow/core/graph/costmodel_test.cc | #ifndef TENSORFLOW_CORE_GRAPH_COSTMODEL_H_
#define TENSORFLOW_CORE_GRAPH_COSTMODEL_H_
#include <set>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/types.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
typedef std::unordered_map<StringPiece, int32, StringPieceHasher>
NodeNameToCostIdMap;
class StepStats;
class CostModel {
public:
explicit CostModel(bool is_global) : is_global_(is_global) {
unknown_shape_.set_unknown_rank(true);
}
void SuppressInfrequent();
bool is_global() const { return is_global_; }
inline int Id(const Node* n) const {
if (is_global_) {
return n->cost_id();
} else {
return n->id();
}
}
inline int GlobalId(const Node* n, int offset) const {
if (is_global_) {
return n->cost_id();
} else {
return n->id() + offset;
}
}
void InitFromGraph(const Graph& g);
void MergeFromGlobal(const CostModel& cm);
void MergeFromLocal(const Graph& g, const CostModel& cm);
void MergeFromStats(const NodeNameToCostIdMap& map, const StepStats& ss);
void SetNumOutputs(const Node* node, int num_outputs);
void RecordCount(const Node* node, int num_count);
int32 TotalCount(const Node* node) const;
void RecordSize(const Node* node, int output_slot, Bytes bytes);
Bytes TotalBytes(const Node* node, int output_slot) const;
Bytes SizeEstimate(const Node* node, int output_slot) const;
void RecordTime(const Node* node, Microseconds time);
Microseconds TotalTime(const Node* node) const;
Microseconds TimeEstimate(const Node* node) const;
void CheckInitialized(const Graph& graph) const;
void RecordMaxMemorySize(const Node* node, int output_slot, Bytes bytes,
const TensorShapeProto& tensor_shape,
const DataType& dtype);
Bytes MaxMemorySize(const Node* node, int output_slot) const;
const TensorShapeProto& MaxMemoryShape(const Node* node,
int output_slot) const;
DataType MaxMemoryType(const Node* node, int output_slot) const;
Bytes TempMemorySize(const Node* node) const;
Bytes PersistentMemorySize(const Node* node) const;
void RecordMemoryStats(const Node* node, const MemoryStats& memory_stats);
void RecordMaxExecutionTime(const Node* node, Microseconds time);
Microseconds MaxExecutionTime(const Node* node) const;
void RecordAllocationId(const Node* node, int output_slot, int64_t alloc_id);
int64_t AllocationId(const Node* node, int output_slot) const;
bool IsPersistentTensor(const Node* node, int64_t alloc_id) const;
static Microseconds CopyTimeEstimate(Bytes b, double network_latency_millis,
double estimated_gbps);
static Microseconds ComputationTimeEstimate(int64_t mathops);
void AddToCostGraphDef(const Graph* graph, CostGraphDef* cost_graph) const;
void WriteSummaryToLog() const;
void IncrementUpdateTimes();
int32 GetUpdateTimes() const;
private:
static Bytes MinTensorMemoryUsage(const TensorShapeProto& tensor_shape,
const DataType& dtype);
const bool is_global_;
void Ensure(int id, int num_outputs);
int32 min_count_ = 0;
int32 update_times_ = 0;
std::vector<int32> count_;
std::vector<Microseconds> time_;
std::vector<absl::InlinedVector<Bytes, 2UL>> slot_bytes_;
std::vector<Microseconds> max_exec_time_;
struct MemUsage {
MemUsage() : temp_memory_size(0), persistent_memory_size(0) {}
Bytes temp_memory_size;
Bytes persistent_memory_size;
absl::InlinedVector<Bytes, 2UL> output_port_mem;
absl::InlinedVector<TensorShapeProto, 2UL> output_port_shape;
absl::InlinedVector<DataType, 2UL> output_port_type;
};
std::vector<MemUsage> max_mem_usage_;
std::vector<absl::InlinedVector<int64_t, 2UL>> output_port_alloc_ids_;
std::set<int64_t> persistent_alloc_ids_;
TensorShapeProto unknown_shape_;
CostModel(const CostModel&) = delete;
void operator=(const CostModel&) = delete;
};
}
#endif
#include "tensorflow/core/graph/costmodel.h"
#include <algorithm>
#include <vector>
#include "tensorflow/core/framework/allocation_description.pb.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor_description.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
namespace {
const Microseconds kDefaultTimeEstimate(1);
const Microseconds kMinTimeEstimate(1);
}
void CostModel::SuppressInfrequent() {
if (count_.empty()) return;
std::vector<int32> non_zero;
for (auto v : count_) {
if (v > 0) non_zero.push_back(v);
}
const size_t sz = non_zero.size();
if (sz > 0) {
std::nth_element(non_zero.begin(), non_zero.begin() + sz / 2,
non_zero.end());
int32_t median_value = non_zero[sz / 2];
min_count_ = median_value / 2;
VLOG(1) << "num non_zero vals: " << non_zero.size() << " median_value "
<< median_value;
} else {
min_count_ = 1;
}
}
void CostModel::MergeFromLocal(const Graph& g, const CostModel& cm) {
CHECK(is_global_);
CHECK(!cm.is_global());
for (const Node* n : g.nodes()) {
const int local_id = cm.Id(n);
const int global_id = Id(n);
if (local_id < 0 || global_id < 0) continue;
int num_slots = cm.slot_bytes_[local_id].size();
Ensure(global_id, num_slots);
count_[global_id] += cm.count_[local_id];
time_[global_id] += cm.time_[local_id];
if (num_slots > 0) {
if (slot_bytes_[global_id].empty()) {
slot_bytes_[global_id].resize(num_slots);
} else {
CHECK_EQ(num_slots, slot_bytes_[global_id].size());
}
for (int s = 0; s < num_slots; ++s) {
auto& current_v = slot_bytes_[global_id][s];
auto other_v = cm.slot_bytes_[local_id][s];
if (current_v < 0) {
current_v = other_v;
} else if (other_v > 0) {
current_v += other_v;
}
}
}
}
}
void CostModel::MergeFromGlobal(const CostModel& cm) {
CHECK(is_global_);
CHECK_EQ(true, cm.is_global());
const int num_nodes = cm.count_.size();
for (int i = num_nodes - 1; i >= 0; --i) {
int num_slots = cm.slot_bytes_[i].size();
Ensure(i, num_slots);
count_[i] += cm.count_[i];
time_[i] += cm.time_[i];
if (num_slots > 0) {
if (slot_bytes_[i].empty()) {
slot_bytes_[i].resize(num_slots);
} else {
CHECK_EQ(num_slots, slot_bytes_[i].size());
}
for (int s = 0; s < num_slots; ++s) {
auto& current_v = slot_bytes_[i][s];
auto other_v = cm.slot_bytes_[i][s];
if (current_v < 0) {
current_v = other_v;
} else if (other_v > 0) {
current_v += other_v;
}
}
}
}
}
void CostModel::MergeFromStats(const NodeNameToCostIdMap& map,
const StepStats& ss) {
CHECK(is_global_);
for (auto& ds : ss.dev_stats()) {
for (auto& ns : ds.node_stats()) {
NodeNameToCostIdMap::const_iterator iter = map.find(ns.node_name());
if (iter == map.end()) continue;
int32_t global_id = iter->second;
Ensure(global_id, ns.output_size());
int64_t elapsed_micros =
ns.op_end_rel_micros() - ns.op_start_rel_micros();
count_[global_id]++;
time_[global_id] += elapsed_micros;
for (auto& no : ns.output()) {
int si = no.slot();
if (static_cast<size_t>(si) >= slot_bytes_[global_id].size()) {
slot_bytes_[global_id].resize(1 + si);
}
auto& current_v = slot_bytes_[global_id][si];
auto other_v =
no.tensor_description().allocation_description().requested_bytes();
if (current_v < 0) {
current_v = other_v;
} else if (other_v > 0) {
current_v += other_v;
}
}
}
}
}
void CostModel::Ensure(int id, int num_outputs) {
if (slot_bytes_.size() <= static_cast<size_t>(id)) {
slot_bytes_.resize(id + 1);
count_.resize(id + 1);
time_.resize(id + 1);
max_mem_usage_.resize(id + 1);
max_exec_time_.resize(id + 1);
output_port_alloc_ids_.resize(id + 1);
}
if (num_outputs > 0) {
auto perslot = &slot_bytes_[id];
auto output_port_alloc_ids = &output_port_alloc_ids_[id];
auto max_mem_usage = &max_mem_usage_[id];
CHECK_LE(perslot->size(), num_outputs);
DCHECK_EQ(output_port_alloc_ids->size(), perslot->size());
DCHECK_EQ(max_mem_usage->output_port_mem.size(), perslot->size());
DCHECK_EQ(max_mem_usage->output_port_shape.size(), perslot->size());
DCHECK_EQ(max_mem_usage->output_port_type.size(), perslot->size());
perslot->resize(num_outputs, Bytes(-1));
output_port_alloc_ids->resize(num_outputs, -1);
max_mem_usage->output_port_mem.resize(num_outputs, Bytes(-1));
max_mem_usage->output_port_shape.resize(num_outputs, unknown_shape_);
max_mem_usage->output_port_type.resize(num_outputs, DT_INVALID);
}
}
void CostModel::SetNumOutputs(const Node* node, int num_outputs) {
const int id = Id(node);
if (id < 0) return;
Ensure(id, 0);
auto perslot = &slot_bytes_[id];
if (!perslot->empty()) {
CHECK_EQ(num_outputs, perslot->size())
<< "Cannot resize slot_bytes, node=" << node->name();
}
Ensure(id, num_outputs);
}
void CostModel::RecordCount(const Node* node, int count) {
const int id = Id(node);
if (id < 0) return;
CHECK_LT(id, slot_bytes_.size());
count_[id] += count;
}
int32 CostModel::TotalCount(const Node* node) const {
const int id = Id(node);
if (id < 0) return 0;
return (static_cast<size_t>(id) < slot_bytes_.size()) ? count_[id] : 0;
}
void CostModel::RecordSize(const Node* node, int slot, Bytes bytes) {
const int id = Id(node);
if (id < 0) return;
CHECK_LT(id, slot_bytes_.size());
auto perslot = &slot_bytes_[id];
CHECK_LT(slot, perslot->size());
auto v = &(*perslot)[slot];
if (*v >= 0) {
*v += bytes;
} else {
*v = bytes;
}
}
Bytes CostModel::TotalBytes(const Node* node, int slot) const {
const int id = Id(node);
if (id < 0 || static_cast<size_t>(id) >= slot_bytes_.size() ||
slot_bytes_[id].size() <= static_cast<size_t>(slot)) {
return Bytes(0);
}
return slot_bytes_[id][slot];
}
Bytes CostModel::SizeEstimate(const Node* node, int slot) const {
int32_t count = TotalCount(node);
if (count < min_count_) return Bytes(0);
return TotalBytes(node, slot) / std::max(1, TotalCount(node));
}
void CostModel::RecordTime(const Node* node, Microseconds time) {
const int id = Id(node);
if (id < 0) return;
DCHECK(node->IsOp()) << node->DebugString();
Ensure(id, node->num_outputs());
time_[id] += time;
}
Microseconds CostModel::TotalTime(const Node* node) const {
DCHECK(node->IsOp()) << node->DebugString();
const int id = Id(node);
if (id < 0 || static_cast<size_t>(id) >= time_.size() ||
time_[id] < Microseconds(0)) {
return Microseconds(0);
}
return time_[id];
}
Microseconds CostModel::TimeEstimate(const Node* node) const {
int32_t count = TotalCount(node);
if (count <= min_count_) return kMinTimeEstimate;
return std::max(kMinTimeEstimate, TotalTime(node) / std::max(1, count));
}
void CostModel::CheckInitialized(const Graph& graph) const {
for (const Node* n : graph.op_nodes()) {
CHECK(static_cast<size_t>(n->id()) < time_.size() &&
time_[n->id()] >= Microseconds(0))
<< ": no time estimate for " << n->DebugString();
CHECK(static_cast<size_t>(n->id()) < slot_bytes_.size())
<< ": no size estimate for " << n->DebugString();
const auto& perslot = slot_bytes_[n->id()];
for (size_t i = 0; i < perslot.size(); i++) {
CHECK_GE(perslot[i], Bytes(0)) << ": no size estimate for output# " << i
<< " of " << n->DebugString();
}
}
}
void CostModel::RecordMaxMemorySize(const Node* node, int output_slot,
Bytes bytes,
const TensorShapeProto& tensor_shape,
const DataType& dtype) {
const int id = Id(node);
if (id < 0) return;
if (output_slot >= node->num_outputs()) {
LOG(ERROR) << "Unexpected output slot for node " << node->DebugString()
<< ". Got " << output_slot << " but its num_outputs is "
<< node->num_outputs();
return;
}
Ensure(id, node->num_outputs());
auto& current_max = max_mem_usage_[id].output_port_mem[output_slot];
if (bytes.value() < 0) {
bytes = MinTensorMemoryUsage(tensor_shape, dtype);
}
if (bytes.value() > current_max.value()) {
current_max = bytes.value();
max_mem_usage_[id].output_port_shape[output_slot] = tensor_shape;
max_mem_usage_[id].output_port_type[output_slot] = dtype;
}
}
Bytes CostModel::MaxMemorySize(const Node* node, int slot) const {
const int id = Id(node);
if (id < 0 || static_cast<size_t>(id) >= max_mem_usage_.size() ||
max_mem_usage_[id].output_port_mem.size() <= static_cast<size_t>(slot)) {
return Bytes(0);
}
return max_mem_usage_[id].output_port_mem[slot];
}
const TensorShapeProto& CostModel::MaxMemoryShape(const Node* node,
int slot) const {
const int id = Id(node);
if (id < 0 || static_cast<size_t>(id) >= max_mem_usage_.size() ||
max_mem_usage_[id].output_port_shape.size() <=
static_cast<size_t>(slot)) {
return unknown_shape_;
}
return max_mem_usage_[id].output_port_shape[slot];
}
DataType CostModel::MaxMemoryType(const Node* node, int slot) const {
const int id = Id(node);
if (id < 0 || static_cast<size_t>(id) >= max_mem_usage_.size() ||
max_mem_usage_[id].output_port_type.size() <= static_cast<size_t>(slot)) {
return DT_INVALID;
}
return max_mem_usage_[id].output_port_type[slot];
}
Bytes CostModel::TempMemorySize(const Node* node) const {
const int id = Id(node);
if (id < 0 || static_cast<size_t>(id) >= max_mem_usage_.size()) {
return Bytes(0);
}
return max_mem_usage_[id].temp_memory_size;
}
Bytes CostModel::PersistentMemorySize(const Node* node) const {
const int id = Id(node);
if (id < 0 || static_cast<size_t>(id) >= max_mem_usage_.size()) {
return Bytes(0);
}
return max_mem_usage_[id].persistent_memory_size;
}
void CostModel::RecordMemoryStats(const Node* node,
const MemoryStats& memory_stats) {
const int id = Id(node);
if (id < 0) return;
Ensure(id, node->num_outputs());
max_mem_usage_[id].temp_memory_size = memory_stats.temp_memory_size();
max_mem_usage_[id].persistent_memory_size =
memory_stats.persistent_memory_size();
for (int64_t alloc_id : memory_stats.persistent_tensor_alloc_ids()) {
if (alloc_id > 0) {
persistent_alloc_ids_.insert(alloc_id);
}
}
}
void CostModel::RecordMaxExecutionTime(const Node* node, Microseconds time) {
const int id = Id(node);
if (id < 0) return;
Ensure(id, node->num_outputs());
max_exec_time_[id] = std::max(max_exec_time_[id], time);
}
Microseconds CostModel::MaxExecutionTime(const Node* node) const {
const int id = Id(node);
if (id < 0 || static_cast<size_t>(id) >= max_exec_time_.size()) {
return Microseconds(0);
}
return max_exec_time_[id];
}
void CostModel::RecordAllocationId(const Node* node, int output_slot,
int64_t alloc_id) {
const int id = Id(node);
if (id < 0) return;
Ensure(id, node->num_outputs());
output_port_alloc_ids_[id][output_slot] = alloc_id;
}
int64_t CostModel::AllocationId(const Node* node, int slot) const {
const int id = Id(node);
if (id < 0 || static_cast<size_t>(id) >= output_port_alloc_ids_.size() ||
output_port_alloc_ids_[id].size() <= static_cast<size_t>(slot)) {
return -1;
}
return output_port_alloc_ids_[id][slot];
}
bool CostModel::IsPersistentTensor(const Node* node, int64_t alloc_id) const {
if (persistent_alloc_ids_.count(alloc_id) > 0) {
return true;
}
return false;
}
Microseconds CostModel::CopyTimeEstimate(Bytes b, double network_latency_millis,
double estimated_gbps) {
int64_t copy_bytes = b.value();
const double bytes_per_usec = estimated_gbps * 1000.0 / 8;
const double min_micros = network_latency_millis * 1000.0;
return Microseconds(
static_cast<int64_t>(copy_bytes / bytes_per_usec + min_micros));
}
Microseconds CostModel::ComputationTimeEstimate(int64_t math_ops) {
return Microseconds(math_ops / 1000);
}
void CostModel::IncrementUpdateTimes() { update_times_++; }
int32 CostModel::GetUpdateTimes() const { return update_times_; }
namespace {
static void AddNodesToCostModel(const Graph& g, CostModel* cost_model) {
for (Node* n : g.nodes()) {
const int num_outputs = n->num_outputs();
cost_model->SetNumOutputs(n, num_outputs);
for (int output = 0; output < num_outputs; output++) {
cost_model->RecordSize(n, output, Bytes(1));
}
}
}
static void AssignSizes(const Graph& g, CostModel* cost_model) {
for (const Edge* e : g.edges()) {
if (e->IsControlEdge()) {
continue;
}
const Node* src = e->src();
Bytes size(1);
cost_model->RecordSize(src, e->src_output(), size);
}
}
static Microseconds TimeEstimateForNode(CostModel* cost_model, Node* n) {
CHECK(n->IsOp());
VLOG(2) << "Node " << n->id() << ": " << n->name()
<< " type_string: " << n->type_string();
if (IsConstant(n) || IsVariable(n)) {
return Microseconds(0);
}
return kDefaultTimeEstimate;
}
static void EstimateComputationCosts(const Graph& g, CostModel* cost_model) {
for (Node* n : g.nodes()) {
if (!n->IsOp()) continue;
cost_model->RecordTime(n, TimeEstimateForNode(cost_model, n));
}
}
}
void CostModel::InitFromGraph(const Graph& g) {
const int num_node_ids = g.num_node_ids();
slot_bytes_.reserve(num_node_ids);
count_.reserve(num_node_ids);
time_.reserve(num_node_ids);
max_mem_usage_.reserve(num_node_ids);
max_exec_time_.reserve(num_node_ids);
output_port_alloc_ids_.reserve(num_node_ids);
AddNodesToCostModel(g, this);
AssignSizes(g, this);
EstimateComputationCosts(g, this);
CheckInitialized(g);
}
void CostModel::AddToCostGraphDef(const Graph* graph,
CostGraphDef* cost_graph) const {
std::vector<const Edge*> inputs;
std::vector<const Edge*> control_inputs;
int offset = cost_graph->node_size();
for (const Node* n : graph->nodes()) {
CostGraphDef::Node* cnode = cost_graph->add_node();
cnode->set_name(n->name());
cnode->set_device(n->assigned_device_name());
cnode->set_id(GlobalId(n, offset));
inputs.clear();
inputs.resize(n->num_inputs(), nullptr);
control_inputs.clear();
for (const Edge* e : n->in_edges()) {
if (e->IsControlEdge()) {
control_inputs.push_back(e);
} else {
inputs[e->dst_input()] = e;
}
}
std::sort(control_inputs.begin(), control_inputs.end(),
[this](Edge const* a, Edge const* b) {
return Id(a->src()) < Id(b->src());
});
for (const Edge* e : inputs) {
CostGraphDef::Node::InputInfo* input_info = cnode->add_input_info();
input_info->set_preceding_node(GlobalId(e->src(), offset));
input_info->set_preceding_port(e->src_output());
}
for (int i = 0; i < n->num_outputs(); i++) {
CostGraphDef::Node::OutputInfo* output_info = cnode->add_output_info();
int64_t alloc_id = AllocationId(n, i);
int64_t alias_to_input = -1;
for (const Edge* e : inputs) {
int64_t input_alloc_id = AllocationId(e->src(), e->src_output());
if (input_alloc_id == alloc_id) {
alias_to_input = e->dst_input();
break;
}
}
output_info->set_alias_input_port(alias_to_input);
output_info->set_dtype(MaxMemoryType(n, i));
*output_info->mutable_shape() = MaxMemoryShape(n, i);
if (alias_to_input < 0 && IsPersistentTensor(n, alloc_id)) {
output_info->set_size(0);
} else {
output_info->set_size(MaxMemorySize(n, i).value());
}
}
for (const Edge* e : control_inputs) {
cnode->add_control_input(GlobalId(e->src(), offset));
}
cnode->set_temporary_memory_size(TempMemorySize(n).value());
cnode->set_persistent_memory_size(PersistentMemorySize(n).value());
cnode->set_compute_cost(MaxExecutionTime(n).value());
cnode->set_is_final(n->IsSend());
}
}
void CostModel::WriteSummaryToLog() const {
LOG(INFO) << " min_count_=" << min_count_;
for (size_t i = 0; i < count_.size(); ++i) {
LOG(INFO) << "Node " << i << " count " << count_[i] << " total time "
<< time_[i] << " avg time "
<< (time_[i] / (std::max(1, count_[i])));
}
}
Bytes CostModel::MinTensorMemoryUsage(const TensorShapeProto& tensor_shape,
const DataType& dtype) {
if (tensor_shape.unknown_rank()) {
return Bytes(-1);
}
size_t num_coefficients = 1;
for (const TensorShapeProto::Dim& dim : tensor_shape.dim()) {
num_coefficients *= std::max<size_t>(dim.size(), 1);
}
return Bytes(num_coefficients * DataTypeSize(dtype));
}
} | #include "tensorflow/core/graph/costmodel.h"
#include <memory>
#include <string>
#include <unordered_map>
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/core/common_runtime/costmodel_manager.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/step_stats_collector.h"
#include "tensorflow/core/framework/allocation_description.pb.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor_description.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/types.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace {
using ::testing::Not;
MATCHER_P(ShapeProtoEquals, other, "") {
if (arg.unknown_rank()) {
return other.unknown_rank();
}
if (arg.dim_size() != other.dim_size()) {
return false;
}
for (int i = 0; i < arg.dim_size(); ++i) {
if (arg.dim(i).size() != other.dim(i).size()) {
return false;
}
}
return true;
}
static void InitGraph(const string& s, Graph* graph) {
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(s, &graph_def)) << s;
GraphConstructorOptions opts;
TF_CHECK_OK(ConvertGraphDefToGraph(opts, graph_def, graph));
}
static void InitModelFromGraph(const Graph& graph, CostModel& cm) {
for (const auto& node : graph.nodes()) {
cm.SetNumOutputs(node, node->num_outputs());
}
}
static std::unique_ptr<Graph> CreateBasicTestGraph() {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }",
graph.get());
return graph;
}
Node* FindNode(const Graph& graph, std::string name) {
for (const auto& node : graph.nodes()) {
if (node->name() == name) {
return node;
}
}
return nullptr;
}
Node* AddNode(Graph& graph, const string& name, const string& node_type,
int num_inputs) {
auto builder = NodeDefBuilder(name, node_type);
for (int i = 0; i < num_inputs; ++i) {
builder = builder.Input(strings::StrCat("node_", i), i, DT_FLOAT);
}
NodeDef node_def;
TF_CHECK_OK(builder.Finalize(&node_def));
Status s;
Node* node = graph.AddNode(node_def, &s);
TF_CHECK_OK(s);
return node;
}
static void GenerateStepStats(Graph* graph, StepStats* step_stats,
const string& device_name) {
DeviceStepStats* device_stepstats = step_stats->add_dev_stats();
device_stepstats->set_device(device_name);
for (const auto& node_def : graph->nodes()) {
NodeExecStats* node_stats = device_stepstats->add_node_stats();
node_stats->set_node_name(node_def->name());
}
}
REGISTER_OP("Input").Output("o: float").SetIsStateful();
TEST(CostModelTest, WorksWithManager) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto graph1 = std::make_unique<Graph>(OpRegistry::Global());
auto graph2 = std::make_unique<Graph>(OpRegistry::Global());
InitGraph(
"node { name: 'A1' op: 'Input'}"
"node { name: 'B1' op: 'Input'}"
"node { name: 'C1' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A1', 'B1'] }"
"node { name: 'D1' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A1', 'B1'] }",
graph1.get());
InitGraph(
"node { name: 'A2' op: 'Input'}"
"node { name: 'B2' op: 'Input'}"
"node { name: 'C2' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A2', 'B2'] }"
"node { name: 'D2' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A2', 'B2'] }",
graph2.get());
StepStats step_stats;
GenerateStepStats(graph1.get(), &step_stats, "DummyDevice1");
GenerateStepStats(graph2.get(), &step_stats, "DummyDevice2");
StepStatsCollector collector(&step_stats);
std::unordered_map<string, const Graph*> device_map;
device_map["DummyDevice1"] = graph1.get();
device_map["DummyDevice2"] = graph2.get();
CostModelManager cost_model_manager;
collector.BuildCostModel(&cost_model_manager, device_map);
CostGraphDef cost_graph_def;
TF_ASSERT_OK(
cost_model_manager.AddToCostGraphDef(graph1.get(), &cost_graph_def));
TF_ASSERT_OK(
cost_model_manager.AddToCostGraphDef(graph2.get(), &cost_graph_def));
ASSERT_EQ(cost_graph_def.node_size(), 12);
absl::flat_hash_map<int32, const CostGraphDef::Node> ids;
for (auto node : cost_graph_def.node()) {
int32_t index = node.id();
auto result = ids.insert({index, node});
EXPECT_TRUE(result.second);
}
}
TEST(CostModelTest, GlobalId) {
auto graph = CreateBasicTestGraph();
CostModel cm_local(false);
CostModel cm_global(true);
constexpr int kOffset = 7;
for (const auto& node : graph->nodes()) {
EXPECT_EQ(cm_local.GlobalId(node, kOffset), node->id() + kOffset);
EXPECT_EQ(cm_global.GlobalId(node, kOffset), node->cost_id());
}
}
TEST(CostModelTest, RecordTime) {
auto graph = CreateBasicTestGraph();
CostModel cm(false);
InitModelFromGraph(*graph, cm);
constexpr int kIters = 100;
constexpr int kMicrosPerIter = 1000;
for (int i = 0; i < kIters; ++i) {
for (const auto& node : graph->op_nodes()) {
cm.RecordTime(node, node->id() * Microseconds(kMicrosPerIter));
}
}
for (const auto& node : graph->op_nodes()) {
EXPECT_EQ(cm.TotalTime(node),
Microseconds(node->id() * kIters * kMicrosPerIter));
}
Node* E = AddNode(*graph, "E", "Mul", 2);
EXPECT_EQ(cm.TotalTime(E), Microseconds(0));
}
TEST(CostModelTest, RecordCount) {
auto graph = CreateBasicTestGraph();
CostModel cm(false);
InitModelFromGraph(*graph, cm);
constexpr int kIters = 100;
constexpr int kCountPerIter = 4;
for (int i = 0; i < kIters; ++i) {
for (const auto& node : graph->op_nodes()) {
cm.RecordCount(node, node->id() * kCountPerIter);
}
}
for (const auto& node : graph->op_nodes()) {
EXPECT_EQ(cm.TotalCount(node), node->id() * kIters * kCountPerIter);
}
Node* E = AddNode(*graph, "E", "Mul", 2);
EXPECT_EQ(cm.TotalCount(E), 0);
}
TEST(CostModelTest, RecordSize) {
auto graph = CreateBasicTestGraph();
CostModel cm(false);
InitModelFromGraph(*graph, cm);
constexpr int kIters = 100;
constexpr int kBytesPerIter = 4;
for (int i = 0; i < kIters; ++i) {
for (const auto& node : graph->op_nodes()) {
for (int slot = 0; slot < node->num_outputs(); ++slot) {
cm.RecordSize(node, slot, Bytes((node->id() + slot) * kBytesPerIter));
}
}
}
for (const auto& node : graph->op_nodes()) {
for (int slot = 0; slot < node->num_outputs(); ++slot) {
EXPECT_EQ(cm.TotalBytes(node, slot),
Bytes((node->id() + slot) * kIters * kBytesPerIter));
}
}
Node* E = AddNode(*graph, "E", "Mul", 2);
EXPECT_EQ(cm.TotalBytes(E, 0), Bytes(0));
}
TEST(CostModelTest, SizeEstimate) {
auto graph = CreateBasicTestGraph();
CostModel cm(false);
InitModelFromGraph(*graph, cm);
Node* C = FindNode(*graph, "C");
constexpr int kBytesPerCount = 31;
constexpr int kCount = 17;
cm.RecordCount(C, kCount);
cm.RecordSize(C, 0, Bytes(kCount * kBytesPerCount));
EXPECT_EQ(cm.SizeEstimate(C, 0), Bytes(kBytesPerCount));
}
TEST(CostModelTest, TimeEstimate) {
auto graph = CreateBasicTestGraph();
CostModel cm(false);
InitModelFromGraph(*graph, cm);
Node* C = FindNode(*graph, "C");
constexpr int kMicrosPerCount = 31;
constexpr int kCount = 17;
cm.RecordCount(C, kCount);
cm.RecordTime(C, Microseconds(kCount * kMicrosPerCount));
EXPECT_EQ(cm.TimeEstimate(C), Microseconds(kMicrosPerCount));
}
TensorShapeProto CreateTensorShapeProto(absl::Span<const int64_t> dims) {
TensorShapeProto shape;
for (int i = 0; i < dims.size(); ++i) {
shape.add_dim()->set_size(dims[i]);
}
return shape;
}
int64_t Count(const TensorShapeProto& shape) {
int64_t count = 1;
for (int i = 0; i < shape.dim_size(); ++i) {
count *= shape.dim(i).size();
}
return count;
}
TEST(CostModelTest, RecordMaxMemorySize) {
auto graph = CreateBasicTestGraph();
CostModel cm(false);
Node* C = FindNode(*graph, "C");
InitModelFromGraph(*graph, cm);
EXPECT_EQ(cm.MaxMemorySize(C, 0), Bytes(-1));
{
const TensorShapeProto shape = CreateTensorShapeProto({2, 5, 10});
const DataType dtype = DataType::DT_FLOAT;
const Bytes bytes = Bytes(Count(shape) * sizeof(float));
cm.RecordMaxMemorySize(C, 0, bytes, shape, dtype);
EXPECT_EQ(cm.MaxMemorySize(C, 0), bytes);
EXPECT_EQ(cm.MaxMemoryType(C, 0), dtype);
EXPECT_THAT(cm.MaxMemoryShape(C, 0), ShapeProtoEquals(shape));
}
{
const TensorShapeProto shape = CreateTensorShapeProto({3, 6, 11});
const DataType dtype = DataType::DT_DOUBLE;
const Bytes bytes = Bytes(Count(shape) * sizeof(double));
cm.RecordMaxMemorySize(C, 0, bytes, shape, dtype);
EXPECT_EQ(cm.MaxMemorySize(C, 0), bytes);
EXPECT_EQ(cm.MaxMemoryType(C, 0), dtype);
EXPECT_THAT(cm.MaxMemoryShape(C, 0), ShapeProtoEquals(shape));
}
{
const TensorShapeProto shape = CreateTensorShapeProto({1, 1, 1});
const DataType dtype = DataType::DT_BFLOAT16;
const Bytes bytes = Bytes(Count(shape) * sizeof(double));
cm.RecordMaxMemorySize(C, 0, bytes, shape, dtype);
EXPECT_GT(cm.MaxMemorySize(C, 0), bytes);
EXPECT_NE(cm.MaxMemoryType(C, 0), dtype);
EXPECT_THAT(cm.MaxMemoryShape(C, 0), Not(ShapeProtoEquals(shape)));
}
{
const TensorShapeProto shape = CreateTensorShapeProto({100, 100, 100});
const DataType dtype = DataType::DT_BFLOAT16;
cm.RecordMaxMemorySize(C, 0, Bytes(-1), shape, dtype);
EXPECT_EQ(cm.MaxMemorySize(C, 0), Bytes(Count(shape) * sizeof(bfloat16)));
EXPECT_EQ(cm.MaxMemoryType(C, 0), dtype);
EXPECT_THAT(cm.MaxMemoryShape(C, 0), ShapeProtoEquals(shape));
}
Node* E = AddNode(*graph, "E", "Mul", 2);
EXPECT_EQ(cm.MaxMemorySize(E, 0), Bytes(0));
EXPECT_THAT(cm.MaxMemoryType(E, 0), DataType::DT_INVALID);
TensorShapeProto unknown;
unknown.set_unknown_rank(true);
EXPECT_THAT(cm.MaxMemoryShape(E, 0), ShapeProtoEquals(unknown));
}
TEST(CostModelTest, RecordMaxExecutionTime) {
auto graph = CreateBasicTestGraph();
CostModel cm(false);
InitModelFromGraph(*graph, cm);
Node* C = FindNode(*graph, "C");
EXPECT_EQ(cm.MaxExecutionTime(C), Microseconds(0));
cm.RecordMaxExecutionTime(C, Microseconds(13));
EXPECT_EQ(cm.MaxExecutionTime(C), Microseconds(13));
cm.RecordMaxExecutionTime(C, Microseconds(27));
EXPECT_EQ(cm.MaxExecutionTime(C), Microseconds(27));
cm.RecordMaxExecutionTime(C, Microseconds(9));
EXPECT_EQ(cm.MaxExecutionTime(C), Microseconds(27));
Node* E = AddNode(*graph, "E", "Mul", 2);
EXPECT_EQ(cm.MaxExecutionTime(E), Microseconds(0));
}
TEST(CostModelTest, RecordMemoryStats) {
auto graph = CreateBasicTestGraph();
CostModel cm(false);
InitModelFromGraph(*graph, cm);
Node* C = FindNode(*graph, "C");
MemoryStats stats;
stats.set_temp_memory_size(256);
stats.set_persistent_memory_size(16);
stats.add_persistent_tensor_alloc_ids(1);
stats.add_persistent_tensor_alloc_ids(3);
stats.add_persistent_tensor_alloc_ids(5);
stats.add_persistent_tensor_alloc_ids(5);
cm.RecordMemoryStats(C, stats);
EXPECT_EQ(cm.TempMemorySize(C), stats.temp_memory_size());
EXPECT_EQ(cm.PersistentMemorySize(C), stats.persistent_memory_size());
EXPECT_TRUE(cm.IsPersistentTensor(C, 1));
EXPECT_TRUE(cm.IsPersistentTensor(C, 3));
EXPECT_TRUE(cm.IsPersistentTensor(C, 5));
EXPECT_FALSE(cm.IsPersistentTensor(C, 31));
Node* E = AddNode(*graph, "E", "Mul", 2);
EXPECT_EQ(cm.TempMemorySize(E), Bytes(0));
EXPECT_EQ(cm.PersistentMemorySize(E), Bytes(0));
}
TEST(CostModelTest, RecordAllocationId) {
auto graph = CreateBasicTestGraph();
CostModel cm(false);
InitModelFromGraph(*graph, cm);
Node* C = FindNode(*graph, "C");
cm.RecordAllocationId(C, 0, 13);
EXPECT_EQ(cm.AllocationId(C, 0), 13);
EXPECT_EQ(cm.AllocationId(C, 7), -1);
Node* E = AddNode(*graph, "E", "Mul", 2);
EXPECT_EQ(cm.AllocationId(E, 0), -1);
}
TEST(CostModelTest, CopyTimeEstimate) {
int64_t bytes = 32568;
double latency_ms = 10.2;
double gbps = 2.2;
double bytes_per_usec = gbps * 1000 / 8;
double cost_usecs = (bytes / bytes_per_usec + latency_ms * 1000);
EXPECT_EQ(CostModel::CopyTimeEstimate(Bytes(bytes), latency_ms, gbps),
Microseconds(static_cast<uint64_t>(cost_usecs)));
}
TEST(CostModelTest, ComputationTimeEstimate) {
constexpr int64_t kNumMathOps = 32150;
EXPECT_EQ(CostModel::ComputationTimeEstimate(kNumMathOps),
Microseconds(kNumMathOps / 1000));
}
TEST(CostModel, UpdateTimes) {
CostModel cm(false);
EXPECT_EQ(cm.GetUpdateTimes(), 0);
constexpr int kNumUpdates = 111;
for (int i = 0; i < kNumUpdates; ++i) {
cm.IncrementUpdateTimes();
}
EXPECT_EQ(cm.GetUpdateTimes(), kNumUpdates);
}
TEST(CostModel, SuppressInfrequent) {
CostModel cm(false);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
Node* A = AddNode(*graph, "A", "Mul", 2);
Node* B = AddNode(*graph, "B", "Mul", 2);
Node* C = AddNode(*graph, "B", "Mul", 2);
InitModelFromGraph(*graph, cm);
cm.RecordCount(A, 1000);
cm.RecordSize(A, 0, Bytes(8 * 1000));
cm.RecordTime(A, Microseconds(8 * 1000));
cm.RecordCount(B, 2000);
cm.RecordSize(B, 0, Bytes(2000 * 10));
cm.RecordTime(B, Microseconds(2000 * 10));
cm.RecordCount(C, 17);
cm.RecordSize(C, 0, Bytes(32 * 17));
cm.RecordTime(C, Microseconds(32 * 17));
EXPECT_EQ(cm.SizeEstimate(A, 0), Bytes(8));
EXPECT_EQ(cm.TimeEstimate(A), Microseconds(8));
EXPECT_EQ(cm.SizeEstimate(B, 0), Bytes(10));
EXPECT_EQ(cm.TimeEstimate(B), Microseconds(10));
EXPECT_EQ(cm.SizeEstimate(C, 0), Bytes(32));
EXPECT_EQ(cm.TimeEstimate(C), Microseconds(32));
cm.SuppressInfrequent();
EXPECT_EQ(cm.SizeEstimate(A, 0), Bytes(8));
EXPECT_EQ(cm.TimeEstimate(A), Microseconds(8));
EXPECT_EQ(cm.SizeEstimate(B, 0), Bytes(10));
EXPECT_EQ(cm.TimeEstimate(B), Microseconds(10));
EXPECT_EQ(cm.SizeEstimate(C, 0), Bytes(0));
EXPECT_EQ(cm.TimeEstimate(C), Microseconds(1));
}
TEST(CostModelTest, MergeFromLocal) {
CostModel cm_global(true);
CostModel cm_local(false);
auto graph = CreateBasicTestGraph();
InitModelFromGraph(*graph, cm_global);
Node* C = FindNode(*graph, "C");
Node* D = FindNode(*graph, "D");
cm_global.RecordCount(C, 23);
cm_global.RecordSize(C, 0, Bytes(23));
cm_global.RecordTime(C, Microseconds(123));
cm_global.RecordCount(D, 17);
cm_global.RecordSize(D, 0, Bytes(17));
cm_global.RecordTime(D, Microseconds(117));
Node* E = AddNode(*graph, "E", "Mul", 2);
graph->AddEdge(C, 0, E, 0);
graph->AddEdge(D, 0, E, 1);
Node* F = AddNode(*graph, "F", "Mul", 2);
graph->AddEdge(E, 0, F, 0);
graph->AddEdge(D, 0, F, 1);
InitModelFromGraph(*graph, cm_local);
cm_local.RecordCount(E, 37);
cm_local.RecordSize(E, 0, Bytes(37));
cm_local.RecordTime(E, Microseconds(137));
cm_local.RecordCount(F, 41);
cm_local.RecordSize(F, 0, Bytes(41));
cm_local.RecordTime(F, Microseconds(141));
cm_local.RecordCount(C, 1);
cm_local.RecordSize(C, 0, Bytes(1));
cm_local.RecordTime(C, Microseconds(100));
cm_global.MergeFromLocal(*graph, cm_local);
EXPECT_EQ(cm_global.TotalCount(E), cm_local.TotalCount(E));
EXPECT_EQ(cm_global.TotalBytes(E, 0), cm_local.TotalBytes(E, 0));
EXPECT_EQ(cm_global.TotalTime(E), cm_local.TotalTime(E));
EXPECT_EQ(cm_global.TotalCount(F), cm_local.TotalCount(F));
EXPECT_EQ(cm_global.TotalBytes(F, 0), cm_local.TotalBytes(F, 0));
EXPECT_EQ(cm_global.TotalTime(F), cm_local.TotalTime(F));
EXPECT_EQ(cm_global.TotalCount(C), Microseconds(24));
EXPECT_EQ(cm_global.TotalBytes(C, 0), Bytes(24));
EXPECT_EQ(cm_global.TotalTime(C), Microseconds(223));
}
TEST(CostModelTest, MergeFromGlobal) {
CostModel cm1(true);
CostModel cm2(true);
auto graph = CreateBasicTestGraph();
InitModelFromGraph(*graph, cm1);
Node* C = FindNode(*graph, "C");
Node* D = FindNode(*graph, "D");
cm1.RecordCount(C, 23);
cm1.RecordSize(C, 0, Bytes(23));
cm1.RecordTime(C, Microseconds(123));
cm1.RecordCount(D, 17);
cm1.RecordSize(D, 0, Bytes(17));
cm1.RecordTime(D, Microseconds(117));
Node* E = AddNode(*graph, "E", "Mul", 2);
graph->AddEdge(C, 0, E, 0);
graph->AddEdge(D, 0, E, 1);
Node* F = AddNode(*graph, "F", "Mul", 2);
graph->AddEdge(E, 0, F, 0);
graph->AddEdge(D, 0, F, 1);
InitModelFromGraph(*graph, cm2);
cm2.RecordCount(E, 37);
cm2.RecordSize(E, 0, Bytes(37));
cm2.RecordTime(E, Microseconds(137));
cm2.RecordCount(F, 41);
cm2.RecordSize(F, 0, Bytes(41));
cm2.RecordTime(F, Microseconds(141));
cm2.RecordCount(C, 1);
cm2.RecordSize(C, 0, Bytes(1));
cm2.RecordTime(C, Microseconds(100));
cm1.MergeFromGlobal(cm2);
EXPECT_EQ(cm1.TotalCount(E), cm2.TotalCount(E));
EXPECT_EQ(cm1.TotalBytes(E, 0), cm2.TotalBytes(E, 0));
EXPECT_EQ(cm1.TotalTime(E), cm2.TotalTime(E));
EXPECT_EQ(cm1.TotalCount(F), cm2.TotalCount(F));
EXPECT_EQ(cm1.TotalBytes(F, 0), cm2.TotalBytes(F, 0));
EXPECT_EQ(cm1.TotalTime(F), cm2.TotalTime(F));
EXPECT_EQ(cm1.TotalCount(C), Microseconds(24));
EXPECT_EQ(cm1.TotalBytes(C, 0), Bytes(24));
EXPECT_EQ(cm1.TotalTime(C), Microseconds(223));
}
NodeExecStats CreateNodeExecStats(const Node* node, int64_t time,
int64_t bytes) {
NodeExecStats stats;
stats.set_node_name(node->name());
stats.set_op_start_rel_micros(10);
stats.set_op_end_rel_micros(10 + time);
for (int i = 0; i < node->num_outputs(); ++i) {
NodeOutput* no = stats.add_output();
no->set_slot(i);
no->mutable_tensor_description()
->mutable_allocation_description()
->set_requested_bytes(bytes);
}
return stats;
}
TEST(CostModelTest, MergeFromStats) {
CostModel cm(true);
auto graph = CreateBasicTestGraph();
InitModelFromGraph(*graph, cm);
Node* C = FindNode(*graph, "C");
Node* D = FindNode(*graph, "D");
cm.RecordCount(C, 23);
cm.RecordTime(C, Microseconds(123));
cm.RecordCount(D, 17);
cm.RecordTime(D, Microseconds(117));
Node* E = AddNode(*graph, "E", "Mul", 2);
graph->AddEdge(C, 0, E, 0);
graph->AddEdge(D, 0, E, 1);
Node* F = AddNode(*graph, "F", "Mul", 2);
graph->AddEdge(E, 0, F, 0);
graph->AddEdge(D, 0, F, 1);
StepStats stats;
DeviceStepStats* dstats = stats.add_dev_stats();
*(dstats->add_node_stats()) = CreateNodeExecStats(C, 10, 10);
*(dstats->add_node_stats()) = CreateNodeExecStats(D, 10, 10);
*(dstats->add_node_stats()) = CreateNodeExecStats(E, 20, 20);
*(dstats->add_node_stats()) = CreateNodeExecStats(E, 20, 20);
*(dstats->add_node_stats()) = CreateNodeExecStats(F, 30, 30);
*(dstats->add_node_stats()) = CreateNodeExecStats(F, 30, 30);
NodeNameToCostIdMap id_map;
for (const auto& node : graph->nodes()) {
id_map.emplace(node->name(), node->cost_id());
}
cm.MergeFromStats(id_map, stats);
EXPECT_EQ(cm.TotalCount(C), 24);
EXPECT_EQ(cm.TotalTime(C), Microseconds(133));
EXPECT_EQ(cm.TotalBytes(C, 0), Bytes(10));
EXPECT_EQ(cm.TotalCount(D), 18);
EXPECT_EQ(cm.TotalTime(D), Microseconds(127));
EXPECT_EQ(cm.TotalBytes(D, 0), Bytes(10));
EXPECT_EQ(cm.TotalCount(E), 2);
EXPECT_EQ(cm.TotalTime(E), Microseconds(40));
EXPECT_EQ(cm.TotalBytes(E, 0), Bytes(40));
EXPECT_EQ(cm.TotalCount(F), 2);
EXPECT_EQ(cm.TotalTime(F), Microseconds(60));
EXPECT_EQ(cm.TotalBytes(F, 0), Bytes(60));
}
}
} |
1,436 | cpp | tensorflow/tensorflow | algorithm | third_party/xla/xla/service/memory_space_assignment/algorithm.cc | tensorflow/core/graph/algorithm_test.cc | #ifndef XLA_SERVICE_MEMORY_SPACE_ASSIGNMENT_ALGORITHM_H_
#define XLA_SERVICE_MEMORY_SPACE_ASSIGNMENT_ALGORITHM_H_
#include <algorithm>
#include <cstdint>
#include <list>
#include <map>
#include <memory>
#include <optional>
#include <set>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include <variant>
#include <vector>
#if defined(__GNUC__) || defined(__clang__)
#include "absl/container/btree_map.h"
#endif
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/service/buffer_value.h"
#include "xla/service/call_graph.h"
#include "xla/service/heap_simulator/allocation_block.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/service/memory_space_assignment/allocation.h"
#include "xla/service/memory_space_assignment/buffer_interval_comparator.h"
#include "xla/service/memory_space_assignment/memory_space_assignment.pb.h"
#include "xla/service/memory_space_assignment/options.h"
#include "xla/service/memory_space_assignment/slice.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
namespace xla {
namespace memory_space_assignment {
class AllocationValue {
public:
struct Use {
HloUse hlo_use;
int64_t time;
std::vector<HloPosition> aliases;
bool operator==(const Use& other) const {
return hlo_use == other.hlo_use && time == other.time &&
aliases == other.aliases;
}
template <typename H>
friend H AbslHashValue(H h, const Use& s) {
return H::combine(std::move(h), s.hlo_use, s.time, s.aliases);
}
};
AllocationValue(const HloValue* value, const HloPosition& position,
int64_t size)
: value_(value),
defining_position_(position),
size_(size),
requires_contiguous_allocation_(false) {}
const HloPosition& defining_position() const { return defining_position_; }
const HloInstruction* defining_instruction() const {
return defining_position().instruction;
}
int64_t size() const { return size_; }
const std::vector<Use>& uses() const { return uses_; }
std::vector<Use>& uses() { return uses_; }
const HloValue* value() const { return value_; }
const HloComputation* computation() const {
return defining_instruction()->parent();
}
AllocationSequence* mutable_allocation_sequence() {
return &allocation_sequence_;
}
const AllocationSequence* allocation_sequence() const {
return &allocation_sequence_;
}
bool requires_contiguous_allocation() const {
return requires_contiguous_allocation_;
}
void set_requires_contiguous_allocation(bool requires_contiguous_allocation) {
requires_contiguous_allocation_ = requires_contiguous_allocation;
}
void AddUse(const HloUse& use, int64_t use_time) {
uses_.push_back({use, use_time, {}});
}
std::string ToString() const;
std::string ToShortString() const;
private:
const HloValue* value_;
HloPosition defining_position_;
int64_t size_;
bool requires_contiguous_allocation_;
std::vector<Use> uses_;
AllocationSequence allocation_sequence_;
};
struct AsynchronousCopy {
int64_t exclusive_start_time;
int64_t end_time;
float resource;
MemorySpace destination;
int64_t id;
std::tuple<int64_t, int64_t, float, MemorySpace, int64_t> AsTuple() const {
return std::make_tuple(exclusive_start_time, end_time, resource,
destination, id);
}
};
bool operator<(const AsynchronousCopy& a, const AsynchronousCopy& b);
bool operator==(const AsynchronousCopy& a, const AsynchronousCopy& b);
bool operator!=(const AsynchronousCopy& a, const AsynchronousCopy& b);
class AsynchronousCopyOrdering {
public:
AsynchronousCopyOrdering() = default;
void AddCopy(const AsynchronousCopy& copy);
void RemoveCopy(const AsynchronousCopy& copy);
bool ViolatesOrdering(int64_t exclusive_start_time, int64_t end_time) const;
private:
struct Interval {
int64_t exclusive_start_time;
int64_t end_time;
bool operator<(const Interval& other) const {
return (exclusive_start_time < other.exclusive_start_time &&
end_time <= other.end_time) ||
(exclusive_start_time <= other.exclusive_start_time &&
end_time < other.end_time);
}
};
std::map<Interval, std::set<AsynchronousCopy>> ranges_;
};
class AsynchronousCopyResource {
public:
struct ResourceSpec {
int64_t exclusive_start_time;
int64_t end_time;
float resource;
};
AsynchronousCopyResource() = default;
explicit AsynchronousCopyResource(absl::Span<const float> initial_resources)
: initial_resources_(initial_resources.begin(), initial_resources.end()),
delay_(initial_resources.size(), 0) {}
void AddCopy(const AsynchronousCopy& copy);
void RemoveCopy(const AsynchronousCopy& copy);
bool HasEnoughResource(int64_t exclusive_start_time, int64_t end_time,
float resource);
bool HasEnoughResourceMultiCheck(const std::vector<ResourceSpec>& specs);
std::vector<float> GetCurrentResources() const {
std::vector<float> current_resources(initial_resources_.begin(),
initial_resources_.end());
for (int i = 0; i < current_resources.size(); ++i) {
current_resources[i] -= std::min(current_resources[i], delay_[i]);
}
return current_resources;
}
std::string Dump(int64_t start_time, int64_t end_time,
MemorySpace memory_space_filter) const;
private:
bool ConsumeResource(
int64_t exclusive_start_time, int64_t end_time, float resource,
absl::flat_hash_map<int64_t, float>* delay_change_map = nullptr,
float resource_to_free = 0.0);
void RemoveCopy(std::list<AsynchronousCopy>::iterator& copy_it);
std::list<AsynchronousCopy> async_copies_;
#if defined(__GNUC__) || defined(__clang__)
absl::btree_map<int64_t, std::list<AsynchronousCopy>::iterator>
async_copy_time_map_;
#else
std::map<int64_t, std::list<AsynchronousCopy>::iterator> async_copy_time_map_;
#endif
std::vector<float> initial_resources_;
std::vector<float> delay_;
};
class MsaAlgorithm : public GlobalDecreasingSizeBestFitHeap<HloValue> {
public:
using HloPositionOrUse = std::variant<HloPosition, HloUse>;
MsaAlgorithm(AllocationSequence* allocations, const Options& options,
const HloAliasAnalysis& alias_analysis,
const HloLiveRange& hlo_live_range);
void AllocateCrossProgramPrefetchBuffer(
HloModule* module, const MsaBufferInterval& prefetch_candidate);
absl::StatusOr<HeapSimulator::Result<HloValue>> Finish() override;
protected:
std::vector<const MsaBufferInterval*> GetSortedColocatedIntervals(
const MsaBufferInterval& interval) const;
void CreateAllocationValues(
const MsaBufferInterval& buffer_interval,
std::vector<AllocationValue>& allocation_values) const;
virtual void CreateAllocationValuesFromColocatedIntervals(
absl::Span<const MsaBufferInterval* const> colocated_intervals,
std::vector<AllocationValue>& allocation_values);
void FindAliases(std::vector<AllocationValue>* allocation_values) const;
AllocationSequence* allocations() { return allocations_; }
const Options& options() const { return options_; }
const HloAliasAnalysis& alias_analysis() { return alias_analysis_; }
const HloLiveRange& hlo_live_range() { return hlo_live_range_; }
private:
struct RepackAllocationBlock : AllocationBlock {
Allocation* allocation;
};
struct AliasedOffset {
int64_t offset;
absl::flat_hash_set<const Allocation*> allocations;
};
struct AllocationRequest {
int64_t inclusive_start_time;
int64_t end_time;
int64_t latest_prefetch_time;
int64_t size;
bool prefer_no_copy_alternate_mem_allocation;
bool allow_no_copy_alternate_mem_allocation;
bool require_no_copy_alternate_mem_allocation;
bool allow_prefetch;
std::optional<int64_t> earliest_prefetch_time;
std::optional<int64_t> preferred_prefetch_time;
AliasedOffset* preferred_offset;
const AllocationValue::Use* use;
AllocationValue* allocation_value;
absl::Span<const int64_t> all_use_times;
};
struct RequiredMemoryAssignment {
MemorySpace memory_space;
int64_t time;
AliasedOffset* offset;
bool equals_ignoring_time(const RequiredMemoryAssignment& other) const {
return memory_space == other.memory_space && offset == other.offset;
}
bool operator==(const RequiredMemoryAssignment& other) const {
return memory_space == other.memory_space && time == other.time &&
offset == other.offset;
}
bool operator!=(const RequiredMemoryAssignment& other) const {
return !(*this == other);
}
};
struct LoopOptimizedAllocationInfo {
int64_t use_index;
int64_t loop_size;
const Allocation* loop_optimized_allocation;
};
struct PrefetchContext {
struct WorkingIntervals {
MsaBufferInterval full;
std::unique_ptr<SlicedBufferInterval> sliced;
};
struct SlicedSolution {
std::vector<SliceDecision> slice_decisions_sorted_by_start_time;
std::vector<std::pair<MsaBufferInterval, Chunk>>
slices_for_pending_chunks;
std::string prefetch_picker_debug_string;
};
struct UnslicedSolution {
Chunk chunk_candidate;
float prefetch_resource;
std::string prefetch_picker_debug_string;
};
WorkingIntervals& GetMutableWorkingIntervals(bool for_sliced_solution) {
if (for_sliced_solution) {
return sliced_solution_intervals;
}
return unsliced_solution_intervals;
}
const WorkingIntervals& GetWorkingIntervals(
bool for_sliced_solution) const {
if (for_sliced_solution) {
return sliced_solution_intervals;
}
return unsliced_solution_intervals;
}
const AllocationRequest* request;
Allocation* prev_allocation_in_default_mem;
int64_t exclusive_prefetch_start_time = -1;
int64_t prefetch_end_time = -1;
const Shape* full_shape;
int64_t extra_async_copy_limit = 0;
std::optional<int64_t> exclusive_out_of_mem_start = std::nullopt;
std::optional<SliceProposalCollection> slice_proposal_collection =
std::nullopt;
WorkingIntervals sliced_solution_intervals;
std::optional<SlicedSolution> sliced_solution;
WorkingIntervals unsliced_solution_intervals;
std::optional<UnslicedSolution> unsliced_solution;
};
enum class Result {
kSuccess = 0,
kFailOutOfMemory = 1,
kFailPrevAllocationNotInAlternateMem = 2,
kFailLiveRangeTooLong = 4,
kFailLiveRangeTooShort = 8,
kFailOutOfAsyncCopies = 16,
kFailViolatesAsyncCopyResource = 32,
kFailRequiresUncommit = 64,
kAllSlicesHaveTheSameStartTime = 128,
kFailConflictingPreferredOffsets = 256
};
static bool result_is(Result result, Result failure) {
return static_cast<int>(result) & static_cast<int>(failure);
}
static Result result_mark(Result failure, Result& result) {
result = static_cast<Result>(static_cast<int>(result) |
static_cast<int>(failure));
return result;
}
static bool result_requires_uncommit(Result result) {
return result_is(result, Result::kFailRequiresUncommit);
}
static bool result_failed_because_of_async_copy(Result result) {
return result_is(result, Result::kFailOutOfAsyncCopies) ||
result_is(result, Result::kFailViolatesAsyncCopyResource);
}
absl::Status OptimizeMemoryBoundLoop(int loop_start_idx, int loop_end_idx,
int loop_size);
void IdentifyAndOptimizeMemoryBoundLoops();
void AllocateReservedScopedAllocations();
AliasedOffset* GetAliasedOffset(const Allocation& allocation);
void CreateOrAddToAliasedOffset(const Allocation& allocation,
AliasedOffset* aliased_offset);
static Allocation* GetLiveAllocationAt(const AllocationSequence& allocations,
int64_t time);
bool IsUseAllowedInAlternateMemory(const AllocationValue& value,
const HloUse& use) const;
AliasedOffset* UpdatePreferredOffsetForUse(
const AllocationValue::Use& use, AliasedOffset* preferred_offset) const;
void UpdateAllocationRequirementForUseAliases(
const AllocationValue& allocation_value, const AllocationValue::Use& use,
int64_t use_time);
void MaybeCreateMirroredParentAllocationForWhileUse(
const AllocationValue& allocation_value, const AllocationValue::Use& use,
int64_t use_time, absl::Span<AllocationValue> allocation_values,
absl::flat_hash_map<const HloComputation*, AliasedOffset*>&
preferred_offset_for_computation);
AllocationRequest CreateAllocationRequest(
AllocationValue& allocation_value, const AllocationValue::Use& use,
const AllocationValue::Use* previous_use, AliasedOffset* preferred_offset,
int64_t definition_time, bool require_no_copy_alternate_mem_allocation,
const std::vector<int64_t>& all_use_times);
absl::StatusOr<Result> AllocateAllocationValues(
absl::Span<AllocationValue> allocation_values); | #include "tensorflow/core/graph/algorithm.h"
#include <string>
#include <vector>
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_def_builder_util.h"
#include "tensorflow/core/graph/benchmark_testlib.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/graph/subgraph.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
REGISTER_OP("TestParams").Output("o: float");
REGISTER_OP("TestInput").Output("a: float").Output("b: float");
REGISTER_OP("TestMul").Input("a: float").Input("b: float").Output("o: float");
REGISTER_OP("TestUnary").Input("a: float").Output("o: float");
REGISTER_OP("TestBinary")
.Input("a: float")
.Input("b: float")
.Output("o: float");
bool ExpectBefore(const std::vector<std::pair<string, string>>& ordered_pairs,
const std::vector<Node*>& inputs, string* error) {
for (const std::pair<string, string>& pair : ordered_pairs) {
const string& before_node = pair.first;
const string& after_node = pair.second;
bool seen_before = false;
bool seen_both = false;
for (const Node* node : inputs) {
if (!seen_before && after_node == node->name()) {
*error = strings::StrCat("Saw ", after_node, " before ", before_node);
return false;
}
if (before_node == node->name()) {
seen_before = true;
} else if (after_node == node->name()) {
seen_both = seen_before;
break;
}
}
if (!seen_both) {
*error = strings::StrCat("didn't see either ", before_node, " or ",
after_node);
return false;
}
}
return true;
}
TEST(AlgorithmTest, ReversePostOrder) {
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
using namespace ::tensorflow::ops;
Node* w1 = SourceOp("TestParams", b.opts().WithName("W1"));
Node* w2 = SourceOp("TestParams", b.opts().WithName("W2"));
Node* input =
SourceOp("TestInput", b.opts().WithName("input").WithControlInput(w1));
Node* t1 = BinaryOp("TestMul", w1, {input, 1}, b.opts().WithName("t1"));
BinaryOp("TestMul", w1, {input, 1},
b.opts().WithName("t2").WithControlInput(t1));
BinaryOp("TestMul", w2, {input, 1}, b.opts().WithName("t3"));
Graph g(OpRegistry::Global());
TF_ASSERT_OK(GraphDefBuilderToGraph(b, &g));
std::vector<Node*> order;
GetReversePostOrder(g, &order);
std::vector<std::pair<string, string>> reverse_orders = {
{"W1", "input"}, {"W1", "t1"}, {"W1", "t2"}, {"W1", "t3"},
{"input", "t1"}, {"input", "t3"}, {"t1", "t2"}, {"W2", "t3"}};
string error;
EXPECT_TRUE(ExpectBefore(reverse_orders, order, &error)) << error;
reverse_orders = {{"input", "W1"}};
EXPECT_FALSE(ExpectBefore(reverse_orders, order, &error));
GetPostOrder(g, &order);
std::vector<std::pair<string, string>> orders = {
{"input", "W1"}, {"t1", "W1"}, {"t2", "W1"}, {"t3", "W1"},
{"t1", "input"}, {"t3", "input"}, {"t2", "t1"}, {"t3", "W2"}};
EXPECT_TRUE(ExpectBefore(orders, order, &error)) << error;
orders = {{"W1", "t3"}};
EXPECT_FALSE(ExpectBefore(orders, order, &error));
}
TEST(AlgorithmTest, ReversePostOrderStable) {
int64_t run_count = 100;
using namespace ::tensorflow::ops;
for (int64_t i = 0; i < run_count; ++i) {
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
string error;
Node* w1 = SourceOp("TestParams", b.opts().WithName("W1"));
Node* input =
SourceOp("TestInput", b.opts().WithName("input").WithControlInput(w1));
BinaryOp("TestMul", w1, {input, 1}, b.opts().WithName("t2"));
for (int64_t j = 0; j < i; ++j) {
BinaryOp("TestMul", w1, {input, 1},
b.opts().WithName(strings::StrCat("internal", j)));
}
BinaryOp("TestMul", w1, {input, 1}, b.opts().WithName("t3"));
Graph g(OpRegistry::Global());
TF_ASSERT_OK(GraphDefBuilderToGraph(b, &g));
std::vector<Node*> order;
GetReversePostOrder(g, &order, NodeComparatorName());
EXPECT_TRUE(ExpectBefore({{"t2", "t3"}}, order, &error));
}
}
TEST(AlgorithmTest, PostOrderWithEdgeFilter) {
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* n0 = ops::SourceOp("TestParams", b.opts().WithName("n0"));
Node* n1 = ops::UnaryOp("TestUnary", n0, b.opts().WithName("n1"));
Node* n2 = ops::UnaryOp("TestUnary", n1, b.opts().WithName("n2"));
Node* n3 = ops::BinaryOp("TestBinary", n2, n0, b.opts().WithName("n3"));
Graph g(OpRegistry::Global());
TF_ASSERT_OK(GraphDefBuilderToGraph(b, &g));
g.AddEdge(g.FindNodeId(n3->id()), 0, g.FindNodeId(n1->id()), 1);
std::vector<Node*> post_order;
auto edge_filter = [&](const Edge& e) {
return !(e.src()->id() == n3->id() && e.dst()->id() == n1->id());
};
std::vector<Node*> expected_post_order = {
g.sink_node(), g.FindNodeId(n3->id()), g.FindNodeId(n2->id()),
g.FindNodeId(n1->id()), g.FindNodeId(n0->id()), g.source_node()};
std::vector<Node*> expected_reverse_post_order = expected_post_order;
std::reverse(expected_reverse_post_order.begin(),
expected_reverse_post_order.end());
GetPostOrder(g, &post_order, {},
edge_filter);
ASSERT_EQ(expected_post_order.size(), post_order.size());
for (int i = 0; i < post_order.size(); i++) {
CHECK_EQ(post_order[i], expected_post_order[i])
<< post_order[i]->name() << " vs. " << expected_post_order[i]->name();
}
std::vector<Node*> reverse_post_order;
GetReversePostOrder(g, &reverse_post_order, {},
edge_filter);
ASSERT_EQ(expected_reverse_post_order.size(), reverse_post_order.size());
for (int i = 0; i < reverse_post_order.size(); i++) {
CHECK_EQ(reverse_post_order[i], expected_reverse_post_order[i])
<< reverse_post_order[i]->name() << " vs. "
<< expected_reverse_post_order[i]->name();
}
}
void BM_PruneForReverseReachability(::testing::benchmark::State& state) {
const int num_nodes = state.range(0);
const int num_edges_per_node = state.range(1);
const GraphDef graph_def =
test::CreateGraphDef(num_nodes, num_edges_per_node);
const auto registry = OpRegistry::Global();
GraphConstructorOptions opts;
for (auto s : state) {
state.PauseTiming();
Graph graph(registry);
TF_CHECK_OK(ConvertGraphDefToGraph(opts, graph_def, &graph));
std::unordered_set<const Node*> visited;
visited.insert(graph.FindNodeId(graph.num_nodes() - 1));
state.ResumeTiming();
PruneForReverseReachability(&graph, std::move(visited));
}
}
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(10, 2);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 6, 2);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 9, 2);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 12, 2);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 15, 2);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(10, 4);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 6, 4);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 9, 4);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 12, 4);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 15, 4);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(10, 8);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 6, 8);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 9, 8);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 12, 8);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 15, 8);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(10, 16);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 6, 16);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 9, 16);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 12, 16);
BENCHMARK(BM_PruneForReverseReachability)->ArgPair(1 << 15, 16);
}
} |
1,437 | cpp | tensorflow/tensorflow | optimizer_cse | tensorflow/core/graph/optimizer_cse.cc | tensorflow/core/graph/optimizer_cse_test.cc | #ifndef TENSORFLOW_CORE_GRAPH_OPTIMIZER_CSE_H_
#define TENSORFLOW_CORE_GRAPH_OPTIMIZER_CSE_H_
#include <sys/types.h>
#include "tensorflow/core/graph/graph.h"
namespace tensorflow {
extern bool OptimizeCSE(Graph* g,
const std::function<bool(const Node*)>& consider_fn);
}
#endif
#include "tensorflow/core/graph/optimizer_cse.h"
#include <iostream>
#include <unordered_map>
#include <utility>
#include <vector>
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
class OptimizerCSE {
public:
explicit OptimizerCSE(Graph* g) : g_(g) {}
bool Optimize(const std::function<bool(const Node*)>& consider_fn);
private:
static size_t NodeHash(const Node* n);
static bool Equivalent(const Node* a, const Node* b,
AttrSlice::Scratch* scratch);
Graph* g_;
};
static void FillInputs(
const Node* n, absl::InlinedVector<const Node*, 4UL>* control_edges,
absl::InlinedVector<std::pair<const Node*, int>, 4UL>* in) {
DCHECK_EQ(in->size(), n->num_inputs());
control_edges->clear();
for (const Edge* e : n->in_edges()) {
if (e->IsControlEdge()) {
control_edges->push_back(e->src());
} else {
(*in)[e->dst_input()] = std::make_pair(e->src(), e->src_output());
}
}
std::sort(control_edges->begin(), control_edges->end());
if (n->op_def().is_commutative()) {
std::sort(in->begin(), in->end());
}
}
static size_t kIllegalNodeHash = 0;
class Hasher {
public:
uint64 hash() { return h_ == kIllegalNodeHash ? kIllegalNodeHash + 1 : h_; }
void MixString(const string& s) { h_ = Hash64(s.data(), s.size(), h_); }
void MixInteger(size_t z) { h_ = Hash64Combine(h_, z); }
void MixProto(const protobuf::MessageLite& msg) {
msg.ByteSizeLong();
HashingOutputStream hasher;
{
protobuf::io::CodedOutputStream stream(&hasher);
stream.EnableAliasing(true);
stream.SetSerializationDeterministic(true);
msg.SerializeWithCachedSizes(&stream);
}
h_ = Hash64Combine(h_, hasher.hash());
}
private:
class HashingOutputStream : public protobuf::io::ZeroCopyOutputStream {
public:
static constexpr size_t kBufSize = 228;
static constexpr uint64 kDefaultSeed = 2570847921467975139ULL;
bool Next(void** data, int* size) override {
if (i_ == kBufSize) {
Mix(buf_, kBufSize);
*data = buf_;
*size = kBufSize;
} else {
*data = buf_ + i_;
*size = kBufSize - i_;
}
i_ = kBufSize;
return true;
}
void BackUp(int count) override { i_ -= count; }
int64_t ByteCount() const override { return byte_count_; }
bool WriteAliasedRaw(const void* void_data, int size) override {
const char* data = static_cast<const char*>(void_data);
const auto remaining = kBufSize - i_;
if (remaining > 0) {
if (size < remaining) {
memcpy(buf_ + i_, data, size);
i_ += size;
return true;
}
memcpy(buf_ + i_, data, remaining);
i_ = kBufSize;
data += remaining;
size -= remaining;
}
if (i_ == kBufSize) {
Mix(buf_, kBufSize);
i_ = 0;
}
while (size >= kBufSize) {
Mix(data, kBufSize);
data += kBufSize;
size -= kBufSize;
}
memcpy(buf_, data, size);
i_ = size;
return true;
}
bool AllowsAliasing() const override { return true; }
uint64 hash() {
if (i_ != 0) {
Mix(buf_, i_);
i_ = 0;
}
return h_;
}
private:
void Mix(const char* p, size_t n) {
byte_count_ += n;
h_ = Hash64(p, n, h_);
}
char buf_[kBufSize];
int i_ = 0;
int64_t byte_count_ = 0;
uint64 h_ = kDefaultSeed;
};
uint64 h_ = HashingOutputStream::kDefaultSeed;
};
size_t OptimizerCSE::NodeHash(const Node* n) {
Hasher hasher;
hasher.MixString(n->type_string());
hasher.MixInteger(n->output_types().size());
for (DataType dt : n->output_types()) {
hasher.MixInteger(dt);
}
hasher.MixInteger(n->num_inputs());
absl::InlinedVector<const Node*, 4UL> control_edges;
absl::InlinedVector<std::pair<const Node*, int>, 4UL> in(n->num_inputs());
FillInputs(n, &control_edges, &in);
for (const auto& edge : in) {
hasher.MixInteger(edge.first->id());
hasher.MixInteger(edge.second);
}
#if !defined(__ANDROID__)
size_t attr_hashes = 0;
for (const auto& attr : n->attrs()) {
Hasher h;
h.MixString(attr.first);
h.MixProto(attr.second);
attr_hashes = Hash64CombineUnordered(attr_hashes, h.hash());
}
hasher.MixInteger(attr_hashes);
#endif
return hasher.hash();
}
static bool HasRefInput(const Node* n) {
for (auto dt : n->input_types()) {
if (IsRefType(dt)) return true;
}
return false;
}
bool OptimizerCSE::Equivalent(const Node* a, const Node* b,
AttrSlice::Scratch* scratch) {
if (a->type_string() != b->type_string()) return false;
if (a->op_def().is_stateful()) return false;
if (HasRefInput(a) || HasRefInput(b)) return false;
if (!a->attrs().EqualAttrs(b->attrs(), scratch)) return false;
if (a->num_inputs() != b->num_inputs()) return false;
const int N_in = a->num_inputs();
absl::InlinedVector<const Node*, 4UL> a_control_edges;
absl::InlinedVector<const Node*, 4UL> b_control_edges;
absl::InlinedVector<std::pair<const Node*, int>, 4UL> a_in(N_in);
absl::InlinedVector<std::pair<const Node*, int>, 4UL> b_in(N_in);
FillInputs(a, &a_control_edges, &a_in);
FillInputs(b, &b_control_edges, &b_in);
if (a_in != b_in) return false;
if (a_control_edges != b_control_edges) return false;
return true;
}
bool OptimizerCSE::Optimize(
const std::function<bool(const Node*)>& consider_fn) {
std::vector<Node*> order;
GetReversePostOrder(*g_, &order, NodeComparatorID());
std::unordered_map<size_t, Node*> available;
bool changed = false;
AttrSlice::Scratch scratch;
for (Node* n : order) {
if (!n->IsOp()) continue;
if (n->type_string() == "Placeholder" ||
n->type_string() == "PlaceholderV2" ||
n->type_string() == "PlaceholderWithDefault") {
continue;
}
if (consider_fn != nullptr && !consider_fn(n)) continue;
size_t h = NodeHash(n);
Node** candidate = &available[h];
if (*candidate == nullptr) {
*candidate = n;
} else if (Equivalent(*candidate, n, &scratch)) {
VLOG(1) << "CSE: equivalent: " << (*candidate)->name() << " and "
<< n->name();
for (const Edge* e : n->out_edges()) {
g_->AddEdge(*candidate, e->src_output(), e->dst(), e->dst_input());
}
MergeDebugInfo(NodeDebugInfo(*n), *candidate);
g_->RemoveNode(n);
changed = true;
}
}
return changed;
}
bool OptimizeCSE(Graph* g,
const std::function<bool(const Node*)>& consider_fn) {
OptimizerCSE opt(g);
return opt.Optimize(consider_fn);
}
} | #include "tensorflow/core/graph/optimizer_cse.h"
#include <utility>
#include <vector>
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tsl/platform/protobuf.h"
namespace tensorflow {
namespace {
static void InitGraph(const string& s, Graph* graph) {
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(s, &graph_def)) << s;
GraphConstructorOptions opts;
TF_CHECK_OK(ConvertGraphDefToGraph(opts, graph_def, graph));
}
class OptimizerCSETest : public ::testing::Test {
public:
OptimizerCSETest() : graph_(OpRegistry::Global()) {}
void InitGraph(const string& s) {
::tensorflow::InitGraph(s, &graph_);
original_ = CanonicalGraphString(&graph_);
}
static bool IncludeNode(const Node* n) { return n->IsOp(); }
static string EdgeId(const Node* n, int index) {
if (index == 0) {
return n->name();
} else if (index == Graph::kControlSlot) {
return strings::StrCat(n->name(), ":control");
} else {
return strings::StrCat(n->name(), ":", index);
}
}
string CanonicalGraphString(Graph* g) {
std::vector<string> nodes;
std::vector<string> edges;
for (const Node* n : g->nodes()) {
if (IncludeNode(n)) {
nodes.push_back(strings::StrCat(n->name(), "(", n->type_string(), ")"));
}
}
for (const Edge* e : g->edges()) {
if (IncludeNode(e->src()) && IncludeNode(e->dst())) {
edges.push_back(strings::StrCat(EdgeId(e->src(), e->src_output()), "->",
EdgeId(e->dst(), e->dst_input())));
}
}
std::sort(nodes.begin(), nodes.end());
std::sort(edges.begin(), edges.end());
return strings::StrCat(absl::StrJoin(nodes, ";"), "|",
absl::StrJoin(edges, ";"));
}
string DoCSE(const std::function<bool(const Node*)>& consider_fn = nullptr) {
string before = CanonicalGraphString(&graph_);
LOG(ERROR) << "Before rewrites: " << before;
OptimizeCSE(&graph_, consider_fn);
string result = CanonicalGraphString(&graph_);
LOG(ERROR) << "After rewrites: " << result;
return result;
}
const string& OriginalGraph() const { return original_; }
Graph graph_;
string original_;
};
REGISTER_OP("Input").Output("o: float").SetIsStateful();
TEST_F(OptimizerCSETest, Simple) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }");
EXPECT_EQ(DoCSE(),
"A(Input);B(Input);C(Mul)|"
"A->C;B->C:1");
}
TEST_F(OptimizerCSETest, Simple_ThreeEquivalent) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'E' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }");
EXPECT_EQ(DoCSE(),
"A(Input);B(Input);C(Mul)|"
"A->C;B->C:1");
}
TEST_F(OptimizerCSETest, Simple_WithFixups) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'E' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['C', 'D'] }");
EXPECT_EQ(DoCSE(),
"A(Input);B(Input);C(Mul);E(Mul)|"
"A->C;B->C:1;C->E;C->E:1");
}
TEST_F(OptimizerCSETest, Simple_Commutative) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['B', 'A'] }");
EXPECT_EQ(DoCSE(),
"A(Input);B(Input);C(Mul)|"
"A->C;B->C:1");
}
static bool IsNotMultiply(const Node* n) { return n->type_string() != "Mul"; }
TEST_F(OptimizerCSETest, Simple_Filtered) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['B', 'A'] }");
EXPECT_EQ(DoCSE(IsNotMultiply), OriginalGraph());
}
TEST_F(OptimizerCSETest, Simple_NotCommutative) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Sub' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Sub' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['B', 'A'] }");
EXPECT_EQ(DoCSE(), OriginalGraph());
}
TEST_F(OptimizerCSETest, NotEquivalent_Ops) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Sub' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }");
EXPECT_EQ(DoCSE(), OriginalGraph());
}
TEST_F(OptimizerCSETest, Simple_SameOps_SameAttrs1) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] attr { key: 'shape'"
" value { shape: { dim: { size: 37 name: 'SAME_NAME' } } } } }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] attr { key: 'shape'"
" value { shape: { dim: { size: 37 name: 'SAME_NAME' } } } } }");
EXPECT_EQ(DoCSE(),
"A(Input);B(Input);C(Mul)|"
"A->C;B->C:1");
}
TEST_F(OptimizerCSETest, Simple_SameOps_SameAttrs2) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B']"
" attr { key: 'a' value { i: 3 } }"
" attr { key: 't' value { type: DT_INT32 } } }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B']"
" attr { key: 't' value { type: DT_INT32 } }"
" attr { key: 'a' value { i: 3 } } }");
EXPECT_EQ(DoCSE(),
"A(Input);B(Input);C(Mul)|"
"A->C;B->C:1");
}
TEST_F(OptimizerCSETest, SameConstants) {
InitGraph(
"node { name: 'A' op: 'Const' "
" attr { key: 'dtype' value { type: DT_INT32 } }"
" attr { key: 'value' value {"
" tensor { dtype: DT_INT32 tensor_shape { dim { size: 1 } } "
" int_val: 0 } } } }"
"node { name: 'B' op: 'Const' "
" attr { key: 'dtype' value { type: DT_INT32 } }"
" attr { key: 'value' value {"
" tensor { dtype: DT_INT32 tensor_shape { dim { size: 1 } } "
" int_val: 0 } } } }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_INT32 } }"
" input: ['A', 'B'] }");
EXPECT_EQ(DoCSE(),
"A(Const);D(Mul)|"
"A->D;A->D:1");
}
TEST_F(OptimizerCSETest, DifferentConstants) {
InitGraph(
"node { name: 'A' op: 'Const' "
" attr { key: 'dtype' value { type: DT_INT32 } }"
" attr { key: 'value' value {"
" tensor { dtype: DT_INT32 tensor_shape { dim { size: 1 } } "
" int_val: 0 } } } }"
"node { name: 'B' op: 'Const' "
" attr { key: 'dtype' value { type: DT_INT32 } }"
" attr { key: 'value' value {"
" tensor { dtype: DT_INT32 tensor_shape { dim { size: 1 } } "
" int_val: 100000 } } } }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_INT32 } }"
" input: ['A', 'B'] }");
EXPECT_EQ(DoCSE(),
"A(Const);B(Const);D(Mul)|"
"A->D;B->D:1");
}
TEST_F(OptimizerCSETest, SameOps_DifferentAttrs1) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B']"
" attr { key: 'a' value { i: 3 } }"
" attr { key: 't' value { type: DT_INT32 } } }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B']"
" attr { key: 't' value { type: DT_INT32 } }"
" attr { key: 'a' value { i: 4 } } }");
EXPECT_EQ(DoCSE(), OriginalGraph());
}
TEST_F(OptimizerCSETest, SameOps_DifferentAttrs2) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B']"
" attr { key: 'a' value { i: 3 } }"
" attr { key: 't' value { type: DT_FLOAT } } }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B']"
" attr { key: 't' value { type: DT_INT32 } }"
" attr { key: 'a' value { i: 3 } } }");
EXPECT_EQ(DoCSE(), OriginalGraph());
}
TEST_F(OptimizerCSETest, NotEquivalent_Inputs) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Input'}"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'E' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'C'] }");
EXPECT_EQ(DoCSE(), OriginalGraph());
}
TEST_F(OptimizerCSETest, Constant_Dedup) {
Tensor a(DT_FLOAT, TensorShape({1}));
a.flat<float>()(0) = 1.0;
Tensor b(DT_DOUBLE, TensorShape({1}));
b.flat<double>()(0) = 1.0;
Tensor c(DT_FLOAT, TensorShape({1, 1}));
c.flat<float>()(0) = 1.0;
Tensor d(DT_FLOAT, TensorShape({1}));
d.flat<float>()(0) = 2.0;
Graph g(OpRegistry::Global());
for (const auto& val : {a, b, c, d, d, c, b, a}) {
test::graph::Constant(&g, val);
}
GraphDef gdef;
test::graph::ToGraphDef(&g, &gdef);
InitGraph(tsl::LegacyUnredactedDebugString(gdef));
EXPECT_EQ(OriginalGraph(),
"n/_0(Const);n/_1(Const);n/_2(Const);n/_3(Const);"
"n/_4(Const);n/_5(Const);n/_6(Const);n/_7(Const)|");
std::vector<string> nodes = str_util::Split(DoCSE(), ";|");
std::set<string> node_set(nodes.begin(), nodes.end());
EXPECT_EQ(node_set.count("n/_0(Const)") + node_set.count("n/_7(Const)"), 1);
EXPECT_EQ(node_set.count("n/_1(Const)") + node_set.count("n/_6(Const)"), 1);
EXPECT_EQ(node_set.count("n/_2(Const)") + node_set.count("n/_5(Const)"), 1);
EXPECT_EQ(node_set.count("n/_3(Const)") + node_set.count("n/_4(Const)"), 1);
}
void BM_CSE(::testing::benchmark::State& state) {
const int op_nodes = state.range(0);
string s;
for (int in = 0; in < 10; in++) {
s += strings::Printf("node { name: 'in%04d' op: 'Input'}", in);
}
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
for (int op = 0; op < op_nodes; op++) {
s += strings::Printf(
"node { name: 'op%04d' op: 'Mul' attr { key: 'T' value { "
"type: DT_FLOAT } } input: ['in%04d', 'in%04d' ] }",
op, rnd.Uniform(10), rnd.Uniform(10));
}
bool first = true;
for (auto i : state) {
state.PauseTiming();
Graph* graph = new Graph(OpRegistry::Global());
InitGraph(s, graph);
int N = graph->num_node_ids();
if (first) {
state.SetLabel(strings::StrCat("Per graph node. Nodes: ", N));
first = false;
}
{
state.ResumeTiming();
OptimizeCSE(graph, nullptr);
state.PauseTiming();
}
delete graph;
state.ResumeTiming();
}
}
BENCHMARK(BM_CSE)->Arg(1000)->Arg(10000);
}
} |
1,438 | cpp | tensorflow/tensorflow | collective_order | tensorflow/core/graph/collective_order.cc | tensorflow/core/graph/collective_order_test.cc | #ifndef TENSORFLOW_CORE_GRAPH_COLLECTIVE_ORDER_H_
#define TENSORFLOW_CORE_GRAPH_COLLECTIVE_ORDER_H_
#include "tensorflow/core/graph/graph.h"
namespace tensorflow {
enum class GraphCollectiveOrder { kNone, kEdges, kAttrs };
Status OrderCollectives(Graph* graph, GraphCollectiveOrder order_type);
}
#endif
#include "tensorflow/core/graph/collective_order.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/graph/algorithm.h"
namespace tensorflow {
namespace {
Status DiscoverDataDependencies(
const Graph* graph, std::vector<Node*>* collective_nodes,
std::vector<int32>* instance_keys,
absl::flat_hash_map<Node*, absl::flat_hash_set<int32>>* data_dependencies) {
Status s;
auto node_leave = [collective_nodes, instance_keys, data_dependencies,
&s](Node* node) {
int32_t instance_key;
bool enter_node =
node->IsCollective() && node->type_string() == "CollectiveReduce";
if (enter_node) {
Status get_attr_status =
GetNodeAttr(node->attrs(), "instance_key", &instance_key);
s.Update(get_attr_status);
collective_nodes->push_back(node);
instance_keys->push_back(instance_key);
VLOG(2) << "collective node " << node->DebugString();
}
data_dependencies->reserve(data_dependencies->size() + 1 +
node->out_edges().size());
const auto& node_deps = (*data_dependencies)[node];
for (const Edge* out_edge : node->out_edges()) {
auto& child_deps = (*data_dependencies)[out_edge->dst()];
child_deps.insert(node_deps.begin(), node_deps.end());
if (enter_node && s.ok()) {
child_deps.insert(instance_key);
}
}
};
ReverseDFS(*graph, nullptr, node_leave);
return s;
}
Status CreateControlDependencies(
const std::vector<Node*>& collective_nodes,
const std::vector<int32>& instance_keys,
absl::flat_hash_map<Node*, absl::flat_hash_set<int32>>* data_dependencies,
absl::flat_hash_map<Node*, absl::flat_hash_set<Node*>>* dependency_edges) {
absl::flat_hash_map<Node*, absl::flat_hash_set<Node*>> all_paths;
for (int i = 0; i < collective_nodes.size() - 1; i++) {
if (!collective_nodes[i]->IsCollective() ||
collective_nodes[i]->type_string() != "CollectiveReduce") {
return errors::Internal("Unexpected node ",
collective_nodes[i]->DebugString());
}
const auto& deps_i = (*data_dependencies)[collective_nodes[i]];
for (int j = i + 1; j < collective_nodes.size(); j++) {
if (collective_nodes[i]->requested_device() !=
collective_nodes[j]->requested_device()) {
continue;
}
if (instance_keys[i] == instance_keys[j]) {
return errors::Internal("Unexpected same instance_key ",
instance_keys[i],
" on 2 nodes with the same device ",
collective_nodes[i]->requested_device());
}
const auto& deps_j = (*data_dependencies)[collective_nodes[j]];
if (deps_i.find(instance_keys[j]) == deps_i.end() &&
deps_j.find(instance_keys[i]) == deps_j.end()) {
int src_idx = instance_keys[i] > instance_keys[j] ? i : j;
int dst_idx = instance_keys[i] > instance_keys[j] ? j : i;
Node* src_node = collective_nodes[src_idx];
Node* dst_node = collective_nodes[dst_idx];
VLOG(1) << "Adding control dependency from node " << src_node->name()
<< " instance " << instance_keys[src_idx] << " to node "
<< dst_node->name() << " instance " << instance_keys[dst_idx];
(*dependency_edges)[src_node].insert(dst_node);
auto& src_paths = all_paths[src_node];
src_paths.insert(dst_node);
for (Node* downstream_node : all_paths[dst_node]) {
src_paths.insert(downstream_node);
}
}
}
}
for (int i = 0; i < collective_nodes.size(); ++i) {
Node* node = collective_nodes[i];
auto& neighbor_set = (*dependency_edges)[node];
std::vector<Node*> neighbor_list(neighbor_set.begin(), neighbor_set.end());
for (int j = 0; j < neighbor_list.size(); ++j) {
Node* n1 = neighbor_list[j];
if (n1 == nullptr) continue;
auto& n1_paths = all_paths[n1];
for (int k = 0; k < neighbor_list.size(); ++k) {
Node* n2 = neighbor_list[k];
if (j == k || n2 == nullptr) continue;
if (n1_paths.find(n2) != n1_paths.end()) {
neighbor_set.erase(n2);
neighbor_list[k] = nullptr;
}
}
}
}
return absl::OkStatus();
}
Status InsertControlDependencies(
Graph* graph, GraphCollectiveOrder order_type,
const absl::flat_hash_map<Node*, absl::flat_hash_set<Node*>>&
dependency_edges) {
if (order_type == GraphCollectiveOrder::kEdges) {
for (const auto& pair : dependency_edges) {
Node* src_node = pair.first;
for (Node* dst_node : pair.second) {
graph->AddControlEdge(src_node, dst_node);
}
}
} else if (order_type == GraphCollectiveOrder::kAttrs) {
absl::flat_hash_map<Node*, absl::flat_hash_set<int32>> wait_for;
for (const auto& pair : dependency_edges) {
int32_t src_instance;
TF_RETURN_IF_ERROR(
GetNodeAttr(pair.first->attrs(), "instance_key", &src_instance));
for (Node* dst_node : pair.second) {
wait_for[dst_node].insert(src_instance);
}
}
for (const auto& pair : wait_for) {
std::vector<int32> wait_for_list(pair.second.begin(), pair.second.end());
pair.first->ClearAttr("wait_for");
pair.first->AddAttr("wait_for", wait_for_list);
}
} else {
return errors::Internal("Unexpected GraphCollectiveOrder type ",
static_cast<int>(order_type));
}
return absl::OkStatus();
}
}
Status OrderCollectives(Graph* graph, GraphCollectiveOrder order_type) {
std::vector<Node*> collective_nodes;
std::vector<int32> instance_keys;
absl::flat_hash_map<Node*, absl::flat_hash_set<int32>> data_dependencies;
TF_RETURN_IF_ERROR(DiscoverDataDependencies(
graph, &collective_nodes, &instance_keys, &data_dependencies));
if (collective_nodes.empty()) return absl::OkStatus();
absl::flat_hash_map<Node*, absl::flat_hash_set<Node*>> dependency_edges;
TF_RETURN_IF_ERROR(CreateControlDependencies(
collective_nodes, instance_keys, &data_dependencies, &dependency_edges));
return InsertControlDependencies(graph, order_type, dependency_edges);
}
} | #include "tensorflow/core/graph/collective_order.h"
#include <gmock/gmock.h>
#include "tensorflow/core/common_runtime/graph_def_builder_util.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
using ::testing::UnorderedElementsAreArray;
REGISTER_OP("TestParams").Output("o: float");
void VerifyGraph(const Graph& graph,
const std::vector<string>& expected_collective_nodes,
const std::vector<std::pair<string, string>>&
expected_collective_control_edges) {
std::vector<string> actual_collective_nodes;
std::vector<std::pair<string, string>> actual_collective_control_edges;
for (const Node* src : graph.nodes()) {
if (!src->IsCollective()) {
continue;
}
actual_collective_nodes.push_back(src->name());
for (const Edge* edge : src->out_edges()) {
VLOG(2) << "collective edge " << edge->src()->name() << " -> "
<< edge->dst()->name();
if (!edge->IsControlEdge() || edge->dst()->name() == "_SINK") {
continue;
}
actual_collective_control_edges.emplace_back(src->name(),
edge->dst()->name());
}
}
EXPECT_THAT(actual_collective_nodes,
UnorderedElementsAreArray(expected_collective_nodes));
EXPECT_THAT(actual_collective_control_edges,
UnorderedElementsAreArray(expected_collective_control_edges));
}
void VerifyAttrs(
const Graph& graph,
const std::unordered_map<string, std::vector<int32>> wait_for_map) {
for (const Node* node : graph.nodes()) {
if (node->IsCollective() ||
wait_for_map.find(node->name()) == wait_for_map.end()) {
continue;
}
std::vector<int32> wait_for_actual;
TF_EXPECT_OK(GetNodeAttr(node->attrs(), "wait_for", &wait_for_actual));
auto wait_for_expected = wait_for_map.at(node->name());
EXPECT_THAT(wait_for_actual, UnorderedElementsAreArray(wait_for_expected));
}
}
Node* CollectiveReduceNode(GraphDefBuilder* builder, Node* input,
const string& name, const string& device,
int instance_key) {
Node* collective_node =
ops::UnaryOp("CollectiveReduce", input,
builder->opts()
.WithName(name)
.WithDevice(device)
.WithAttr("T", DT_FLOAT)
.WithAttr("group_size", 2)
.WithAttr("group_key", 1)
.WithAttr("instance_key", instance_key)
.WithAttr("merge_op", "Add")
.WithAttr("final_op", "Id")
.WithAttr("subdiv_offsets", {1}));
return collective_node;
}
std::unique_ptr<Graph> InitGraph() {
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
const string dev0 = "/job:localhost/replica:0/task:0/device:CPU:0";
const string dev1 = "/job:localhost/replica:0/task:0/device:CPU:1";
Node* a = ops::SourceOp("TestParams",
builder.opts().WithName("a").WithDevice(dev0));
Node* b = ops::SourceOp("TestParams",
builder.opts().WithName("b").WithDevice(dev1));
Node* c1_0 = CollectiveReduceNode(&builder, a, "c1_0", dev0, 1);
Node* c1_1 = CollectiveReduceNode(&builder, b, "c1_1", dev1, 1);
Node* id0 = ops::UnaryOp(
"Identity", c1_0,
builder.opts().WithName("id0").WithDevice(dev0).WithAttr("T", DT_FLOAT));
Node* id1 = ops::UnaryOp(
"Identity", c1_1,
builder.opts().WithName("id1").WithDevice(dev1).WithAttr("T", DT_FLOAT));
CollectiveReduceNode(&builder, id0, "c2_0", dev0, 2);
CollectiveReduceNode(&builder, id1, "c2_1", dev1, 2);
CollectiveReduceNode(&builder, id0, "c3_0", dev0, 3);
CollectiveReduceNode(&builder, id1, "c3_1", dev1, 3);
std::unique_ptr<Graph> graph = absl::make_unique<Graph>(OpRegistry::Global());
Status s = GraphDefBuilderToGraph(builder, graph.get());
if (!s.ok()) {
LOG(FATAL) << "Error building graph " << s;
}
return graph;
}
TEST(CollectiveOrderTest, SimpleOrder) {
std::unique_ptr<Graph> graph = InitGraph();
TF_EXPECT_OK(OrderCollectives(graph.get(), GraphCollectiveOrder::kEdges));
VerifyGraph(*graph, {"c1_0", "c1_1", "c2_0", "c2_1", "c3_0", "c3_1"},
{{"c3_0", "c2_0"}, {"c3_1", "c2_1"}});
}
TEST(CollectiveOrderTest, SimpleOrderAttr) {
std::unique_ptr<Graph> graph = InitGraph();
TF_EXPECT_OK(OrderCollectives(graph.get(), GraphCollectiveOrder::kAttrs));
VerifyAttrs(*graph, {{"c2_0", {3}}, {"c2_1", {3}}});
}
std::unique_ptr<Graph> InitGraph2() {
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
const string dev0 = "/job:localhost/replica:0/task:0/device:CPU:0";
Node* a = ops::SourceOp("TestParams",
builder.opts().WithName("a").WithDevice(dev0));
Node* c1 = CollectiveReduceNode(&builder, a, "c1", dev0, 1);
CollectiveReduceNode(&builder, c1, "c4", dev0, 4);
Node* id = ops::UnaryOp(
"Identity", c1,
builder.opts().WithName("id").WithDevice(dev0).WithAttr("T", DT_FLOAT));
CollectiveReduceNode(&builder, id, "c2", dev0, 2);
CollectiveReduceNode(&builder, id, "c3", dev0, 3);
std::unique_ptr<Graph> graph = absl::make_unique<Graph>(OpRegistry::Global());
Status s = GraphDefBuilderToGraph(builder, graph.get());
if (!s.ok()) {
LOG(FATAL) << "Error building graph " << s;
}
return graph;
}
TEST(CollectiveOrderTest, SimpleOrder2) {
std::unique_ptr<Graph> graph = InitGraph2();
TF_EXPECT_OK(OrderCollectives(graph.get(), GraphCollectiveOrder::kEdges));
VerifyGraph(*graph, {"c1", "c2", "c3", "c4"}, {{"c4", "c3"}, {"c3", "c2"}});
}
std::unique_ptr<Graph> InitGraphForPruning() {
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
const string dev0 = "/job:localhost/replica:0/task:0/device:CPU:0";
Node* w = ops::SourceOp("TestParams",
builder.opts().WithName("w").WithDevice(dev0));
Node* x = ops::SourceOp("TestParams",
builder.opts().WithName("x").WithDevice(dev0));
Node* y = ops::SourceOp("TestParams",
builder.opts().WithName("y").WithDevice(dev0));
Node* z = ops::SourceOp("TestParams",
builder.opts().WithName("z").WithDevice(dev0));
CollectiveReduceNode(&builder, w, "c1", dev0, 1);
CollectiveReduceNode(&builder, x, "c2", dev0, 2);
CollectiveReduceNode(&builder, y, "c3", dev0, 3);
CollectiveReduceNode(&builder, z, "c4", dev0, 4);
std::unique_ptr<Graph> graph = absl::make_unique<Graph>(OpRegistry::Global());
Status s = GraphDefBuilderToGraph(builder, graph.get());
if (!s.ok()) {
LOG(FATAL) << "Error building graph " << s;
}
return graph;
}
TEST(CollectiveOrderTest, Pruning) {
std::unique_ptr<Graph> graph = InitGraphForPruning();
TF_EXPECT_OK(OrderCollectives(graph.get(), GraphCollectiveOrder::kAttrs));
VerifyAttrs(*graph, {{"c3", {4}}, {"c2", {3}}, {"c1", {2}}});
}
}
} |
1,439 | cpp | tensorflow/tensorflow | tensor_id | tensorflow/core/graph/tensor_id.cc | tensorflow/core/graph/tensor_id_test.cc | #ifndef TENSORFLOW_CORE_GRAPH_TENSOR_ID_H_
#define TENSORFLOW_CORE_GRAPH_TENSOR_ID_H_
#include <string>
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/strcat.h"
namespace tensorflow {
struct SafeTensorId;
struct TensorId : public std::pair<StringPiece, int> {
typedef std::pair<StringPiece, int> Base;
using Base::pair;
TensorId() : Base() {}
TensorId(const SafeTensorId& id);
const StringPiece node() const { return first; }
int index() const { return second; }
string ToString() const {
if (second == Graph::kControlSlot) return strings::StrCat("^", first);
return strings::StrCat(first, ":", second);
}
struct Hasher {
public:
std::size_t operator()(const TensorId& x) const {
return Hash32(x.first.data(), x.first.size(), x.second);
}
};
};
TensorId ParseTensorName(const string& name);
TensorId ParseTensorName(StringPiece name);
bool IsTensorIdControl(const TensorId& tensor_id);
struct SafeTensorId : public std::pair<string, int> {
typedef std::pair<string, int> Base;
SafeTensorId() : Base() {}
SafeTensorId(const string& str, int idx) : Base(str, idx) {}
SafeTensorId(const TensorId& id);
const string& node() const { return first; }
int index() const { return second; }
string ToString() const {
if (second == Graph::kControlSlot) return strings::StrCat("^", first);
return strings::StrCat(first, ":", second);
}
struct Hasher {
public:
std::size_t operator()(const TensorId& x) const {
return Hash32(x.first.data(), x.first.size(), x.second);
}
};
};
}
#endif
#include "tensorflow/core/graph/tensor_id.h"
#include <string>
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
TensorId::TensorId(const SafeTensorId& id) : TensorId(id.first, id.second) {}
SafeTensorId::SafeTensorId(const TensorId& id)
: SafeTensorId(string(id.first), id.second) {}
TensorId ParseTensorName(const string& name) {
return ParseTensorName(StringPiece(name.data(), name.size()));
}
TensorId ParseTensorName(StringPiece name) {
const char* base = name.data();
const char* p = base + name.size() - 1;
unsigned int index = 0;
unsigned int mul = 1;
while (p > base && (*p >= '0' && *p <= '9')) {
index += ((*p - '0') * mul);
mul *= 10;
p--;
}
TensorId id;
if (p > base && *p == ':' && mul > 1) {
id.first = StringPiece(base, p - base);
id.second = index;
} else if (absl::StartsWith(name, "^")) {
id.first = StringPiece(base + 1);
id.second = Graph::kControlSlot;
} else {
id.first = name;
id.second = 0;
}
return id;
}
bool IsTensorIdControl(const TensorId& tensor_id) {
return tensor_id.index() == Graph::kControlSlot;
}
} | #include "tensorflow/core/graph/tensor_id.h"
#include <vector>
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
string ParseHelper(const string& n) { return ParseTensorName(n).ToString(); }
TEST(TensorIdTest, ParseTensorName) {
EXPECT_EQ(ParseHelper("W1"), "W1:0");
EXPECT_EQ(ParseHelper("W1:0"), "W1:0");
EXPECT_EQ(ParseHelper("weights:0"), "weights:0");
EXPECT_EQ(ParseHelper("W1:1"), "W1:1");
EXPECT_EQ(ParseHelper("W1:17"), "W1:17");
EXPECT_EQ(ParseHelper("xyz1_17"), "xyz1_17:0");
EXPECT_EQ(ParseHelper("^foo"), "^foo");
}
uint32 Skewed(random::SimplePhilox* rnd, int max_log) {
const uint32 space = 1 << (rnd->Rand32() % (max_log + 1));
return rnd->Rand32() % space;
}
void BM_ParseTensorName(::testing::benchmark::State& state) {
const int arg = state.range(0);
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
std::vector<string> names;
for (int i = 0; i < 100; i++) {
string name;
switch (arg) {
case 0: {
size_t len = Skewed(&rnd, 4);
while (name.size() < len) {
name += rnd.OneIn(4) ? '0' : 'a';
}
if (rnd.OneIn(3)) {
strings::StrAppend(&name, ":", rnd.Uniform(12));
}
break;
}
case 1:
name = "W1";
break;
case 2:
name = "t0003";
break;
case 3:
name = "weights";
break;
case 4:
name = "weights:17";
break;
case 5:
name = "^weights";
break;
default:
LOG(FATAL) << "Unexpected arg";
break;
}
names.push_back(name);
}
TensorId id;
int index = 0;
int sum = 0;
for (auto s : state) {
id = ParseTensorName(names[index++ % names.size()]);
sum += id.second;
}
VLOG(2) << sum;
}
BENCHMARK(BM_ParseTensorName)->Arg(0)->Arg(1)->Arg(2)->Arg(3)->Arg(4)->Arg(5);
TEST(TensorIdTest, IsTensorIdControl) {
string input = "^foo";
TensorId tensor_id = ParseTensorName(input);
EXPECT_TRUE(IsTensorIdControl(tensor_id));
input = "foo";
tensor_id = ParseTensorName(input);
EXPECT_FALSE(IsTensorIdControl(tensor_id));
input = "foo:2";
tensor_id = ParseTensorName(input);
EXPECT_FALSE(IsTensorIdControl(tensor_id));
}
TEST(TensorIdTest, PortZero) {
for (string input : {"foo", "foo:0"}) {
TensorId tensor_id = ParseTensorName(input);
EXPECT_EQ("foo", tensor_id.node());
EXPECT_EQ(0, tensor_id.index());
}
}
}
} |
1,440 | cpp | tensorflow/tensorflow | graph_debug_info_builder | tensorflow/core/graph/graph_debug_info_builder.cc | tensorflow/core/graph/graph_debug_info_builder_test.cc | #ifndef TENSORFLOW_CORE_GRAPH_GRAPH_DEBUG_INFO_BUILDER_H_
#define TENSORFLOW_CORE_GRAPH_GRAPH_DEBUG_INFO_BUILDER_H_
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/platform/stack_frame.h"
#include "tsl/platform/macros.h"
namespace tensorflow {
class AbstractStackTrace {
public:
struct TracePrintingOptions {
bool show_line_contents = false;
bool filter_common_prefix = false;
bool drop_internal_frames = false;
};
virtual ~AbstractStackTrace() = default;
virtual absl::Span<StackFrame const> ToFrames() const = 0;
virtual StackFrame LastUserFrame() const = 0;
virtual std::vector<StackFrame> GetUserFrames(int limit) const = 0;
virtual std::string ToString(const TracePrintingOptions& opts) const = 0;
};
class FrozenStackTrace : public AbstractStackTrace {
public:
explicit FrozenStackTrace(absl::Span<StackFrame const> frames,
absl::Span<StackFrame const> user_frames = {});
explicit FrozenStackTrace(std::vector<StackFrame>&& frames)
: frames_(std::move(frames)), user_frames_({}) {}
FrozenStackTrace(FrozenStackTrace&&) = default;
FrozenStackTrace(const GraphDebugInfo::StackTrace& stack_trace,
const GraphDebugInfo& debug_info);
~FrozenStackTrace() override = default;
absl::Span<StackFrame const> ToFrames() const override;
StackFrame LastUserFrame() const override;
std::vector<StackFrame> GetUserFrames(int limit) const override;
std::string ToString(const TracePrintingOptions& opts) const override;
private:
std::vector<StackFrame> frames_;
std::vector<StackFrame> user_frames_;
};
struct StackTracePointer {
std::shared_ptr<AbstractStackTrace> trace;
template <class H>
friend H AbslHashValue(H h, const StackTracePointer& p) {
for (const auto& frame : p.trace->ToFrames()) {
h = H::combine(std::move(h), frame);
}
return h;
}
bool operator==(const StackTracePointer& other) const {
absl::Span<StackFrame const> other_frames = other.trace->ToFrames();
absl::Span<StackFrame const> frames = trace->ToFrames();
return frames == other_frames;
}
};
using StackTracesMap =
absl::flat_hash_map<std::string,
std::shared_ptr<tensorflow::AbstractStackTrace>>;
StackTracesMap LoadTracesFromDebugInfo(const GraphDebugInfo& debug_info);
absl::StatusOr<StackTracesMap> LoadTracesFromDebugInfoStr(
absl::string_view debug_info_str);
GraphDebugInfo StackTracesMapToGraphDebugInfo(const StackTracesMap& map,
bool user_frames = true);
class GraphDebugInfoBuilder {
public:
struct Options {
bool user_frames;
int user_frames_limit;
};
GraphDebugInfoBuilder();
virtual ~GraphDebugInfoBuilder() = default;
void AccumulateStackTracesMap(const StackTracesMap& stack_traces_map,
absl::string_view key_suffix = "",
const GraphDebugInfoBuilder::Options& options =
GraphDebugInfoBuilder::Options());
void AccumulateStackTrace(std::shared_ptr<AbstractStackTrace> trace,
absl::string_view traces_key,
const GraphDebugInfoBuilder::Options& options =
GraphDebugInfoBuilder::Options());
void AppendGraphDebugInfo(absl::string_view prefix,
const GraphDebugInfo& new_info);
absl::Status AppendGraphDebugInfoStr(absl::string_view prefix,
absl::string_view new_info_str);
std::string ToGraphDebugInfoStr() const;
GraphDebugInfo Build() const;
private:
void AppendToStackTraceProto(const StackFrame& stack_frame,
GraphDebugInfo::StackTrace& stack_trace_proto);
std::unique_ptr<GraphDebugInfo> debug_info_;
absl::flat_hash_map<std::string, int> file_name_to_index_;
absl::flat_hash_map<StackTracePointer, int> trace_to_index_;
absl::flat_hash_map<StackFrame, int> frame_to_index_;
int new_name_index_ = 0;
GraphDebugInfoBuilder(const GraphDebugInfoBuilder&) = delete;
void operator=(const GraphDebugInfoBuilder&) = delete;
};
}
#endif
#include "tensorflow/core/graph/graph_debug_info_builder.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/hash/hash.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/framework/logging.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/stack_frame.h"
#include "tsl/platform/path.h"
namespace tensorflow {
static const char* kFilenameToIgnorePrefix = "<embedded";
std::string StackFrameToString(const StackFrame& frame,
int shared_prefix_length) {
std::string out = absl::StrFormat(
"File \"%s\", line %d, in %s",
absl::StrContains(frame.file_name, kFilenameToIgnorePrefix)
? frame.file_name
: frame.file_name.substr(shared_prefix_length),
frame.line_number, frame.function_name);
return out;
}
std::string ToStringHelper(absl::Span<const StackFrame> stack_frames,
int shared_prefix_length) {
return absl::StrJoin(
stack_frames, "\n", [&](std::string* out, const StackFrame& frame) {
absl::StrAppend(out, StackFrameToString(frame, shared_prefix_length));
});
}
FrozenStackTrace::FrozenStackTrace(absl::Span<StackFrame const> frames,
absl::Span<StackFrame const> user_frames)
: frames_(frames.begin(), frames.end()),
user_frames_(user_frames.begin(), user_frames.end()) {
if (user_frames.empty()) {
user_frames_ = frames_;
}
}
FrozenStackTrace::FrozenStackTrace(
const GraphDebugInfo::StackTrace& stack_trace,
const GraphDebugInfo& debug_info) {
auto push_frame = [this,
&debug_info](const GraphDebugInfo::FileLineCol& frame) {
int file_index = frame.file_index();
std::string file_name =
(file_index >= 0 && file_index < debug_info.files_size())
? debug_info.files(file_index)
: "<UNKNOWN_FILE_NAME>";
frames_.push_back(StackFrame(file_name, frame.line(), frame.func()));
};
if (!stack_trace.file_line_cols().empty()) {
for (const GraphDebugInfo::FileLineCol& frame :
stack_trace.file_line_cols()) {
push_frame(frame);
}
} else {
for (const uint64_t frame_id : stack_trace.frame_id()) {
if (debug_info.frames_by_id().contains(frame_id)) {
push_frame(debug_info.frames_by_id().at(frame_id));
} else {
LOG_FIRST_N(ERROR, 5) << "No matching frame for id:" << frame_id;
}
}
}
}
absl::Span<StackFrame const> FrozenStackTrace::ToFrames() const {
return frames_;
}
StackFrame FrozenStackTrace::LastUserFrame() const { return frames_.back(); }
std::vector<StackFrame> FrozenStackTrace::GetUserFrames(int limit) const {
std::vector<StackFrame> result;
if (limit < 0 || limit > user_frames_.size()) {
limit = user_frames_.size();
}
result.reserve(limit);
for (int i = 0; i < limit; ++i) {
result.push_back(user_frames_[i]);
}
return result;
}
std::string FrozenStackTrace::ToString(const TracePrintingOptions& opts) const {
int shared_prefix_length = 0;
if (opts.filter_common_prefix) {
std::vector<std::string> prefix_file_names;
for (const StackFrame& frame : frames_) {
if (!absl::StrContains(frame.file_name, kFilenameToIgnorePrefix)) {
prefix_file_names.push_back(frame.file_name);
}
}
shared_prefix_length = tsl::io::CommonPathPrefix(prefix_file_names).size();
}
if (!opts.drop_internal_frames) {
return ToStringHelper(frames_, shared_prefix_length);
}
std::vector<StackFrame> non_internal_frames;
for (const StackFrame& frame : frames_) {
if (!IsInternalFrameForFilename(frame.file_name)) {
non_internal_frames.push_back(frame);
}
}
return ToStringHelper(non_internal_frames, shared_prefix_length);
}
GraphDebugInfoBuilder::GraphDebugInfoBuilder()
: debug_info_(std::make_unique<GraphDebugInfo>()) {}
void GraphDebugInfoBuilder::AccumulateStackTracesMap(
const StackTracesMap& stack_traces_map, absl::string_view key_suffix,
const GraphDebugInfoBuilder::Options& options) {
trace_to_index_.reserve(trace_to_index_.size() + stack_traces_map.size());
for (const auto& [node_name, stack_trace] : stack_traces_map) {
if (stack_trace == nullptr) continue;
std::string trace_key = absl::StrCat(node_name, key_suffix);
AccumulateStackTrace(stack_trace, trace_key, options);
}
}
void GraphDebugInfoBuilder::AccumulateStackTrace(
std::shared_ptr<AbstractStackTrace> trace, absl::string_view traces_key,
const GraphDebugInfoBuilder::Options& options) {
int trace_index = 0;
StackTracePointer p{trace};
auto found = trace_to_index_.find(p);
if (found != trace_to_index_.end()) {
trace_index = found->second;
} else {
trace_index = debug_info_->traces_by_id().size();
trace_to_index_[p] = trace_index;
GraphDebugInfo::StackTrace& stack_trace_proto =
(*debug_info_->mutable_traces_by_id())[trace_index];
if (options.user_frames) {
frame_to_index_.reserve(
frame_to_index_.size() +
trace->GetUserFrames(options.user_frames_limit).size());
for (const auto& stack_frame :
trace->GetUserFrames(options.user_frames_limit)) {
AppendToStackTraceProto(stack_frame, stack_trace_proto);
}
} else {
frame_to_index_.reserve(frame_to_index_.size() +
trace->ToFrames().size());
for (const auto& stack_frame : trace->ToFrames()) {
AppendToStackTraceProto(stack_frame, stack_trace_proto);
}
}
}
(*debug_info_->mutable_name_to_trace_id())[traces_key] = trace_index;
}
void GraphDebugInfoBuilder::AppendToStackTraceProto(
const StackFrame& stack_frame,
GraphDebugInfo::StackTrace& stack_trace_proto) {
int frame_index = 0;
auto found = frame_to_index_.find(stack_frame);
if (found != frame_to_index_.end()) {
frame_index = found->second;
} else {
frame_index = debug_info_->frames_by_id().size();
frame_to_index_[stack_frame] = frame_index;
GraphDebugInfo::FileLineCol& frame =
(*debug_info_->mutable_frames_by_id())[frame_index];
auto file_index = file_name_to_index_.find(stack_frame.file_name);
if (file_index != file_name_to_index_.end()) {
frame.set_file_index(file_index->second);
} else {
frame.set_file_index(new_name_index_);
file_name_to_index_[stack_frame.file_name] = new_name_index_;
*debug_info_->add_files() = stack_frame.file_name;
new_name_index_++;
}
frame.set_line(stack_frame.line_number);
frame.set_func(stack_frame.function_name);
}
stack_trace_proto.add_frame_id(frame_index);
}
void GraphDebugInfoBuilder::AppendGraphDebugInfo(
absl::string_view prefix, const GraphDebugInfo& new_info) {
for (const auto& pair : new_info.name_to_trace_id()) {
auto trace = new_info.traces_by_id().at(pair.second);
auto frozen = std::make_shared<FrozenStackTrace>(trace, new_info);
std::string key =
prefix.empty() ? pair.first : absl::StrCat(pair.first, "@", prefix);
AccumulateStackTrace(frozen, key, GraphDebugInfoBuilder::Options{});
}
}
GraphDebugInfo GraphDebugInfoBuilder::Build() const { return *debug_info_; }
absl::Status GraphDebugInfoBuilder::AppendGraphDebugInfoStr(
absl::string_view prefix, absl::string_view new_info_str) {
GraphDebugInfo debug_info;
if (!debug_info.ParseFromArray(new_info_str.data(), new_info_str.size())) {
return absl::InvalidArgumentError("Failed to parse GraphDebugInfo proto.");
}
AppendGraphDebugInfo(prefix, debug_info);
return absl::OkStatus();
}
std::string GraphDebugInfoBuilder::ToGraphDebugInfoStr() const {
return Build().SerializeAsString();
}
StackTracesMap LoadTracesFromDebugInfo(const GraphDebugInfo& debug_info) {
StackTracesMap traces;
absl::flat_hash_map<uint64_t, std::shared_ptr<AbstractStackTrace>>
traces_by_id;
traces_by_id.reserve(debug_info.traces_by_id_size());
for (const auto& [id, frames] : debug_info.traces_by_id()) {
traces_by_id[id] = std::make_shared<FrozenStackTrace>(frames, debug_info);
}
traces.reserve(debug_info.name_to_trace_id_size() + debug_info.traces_size());
for (const auto& [name, trace_id] : debug_info.name_to_trace_id()) {
if (!traces_by_id.contains(trace_id)) {
LOG_FIRST_N(ERROR, 5) << "No matching trace for id:" << trace_id;
continue;
}
traces[name] = traces_by_id[trace_id];
}
for (const auto& [name, frames] : debug_info.traces()) {
traces[name] = std::make_shared<FrozenStackTrace>(frames, debug_info);
}
return traces;
}
absl::StatusOr<StackTracesMap> LoadTracesFromDebugInfoStr(
absl::string_view debug_info_str) {
GraphDebugInfo debug_info;
if (!debug_info.ParseFromArray(debug_info_str.data(),
debug_info_str.size())) {
return absl::InvalidArgumentError("Failed to parse GraphDebugInfo proto.");
}
return LoadTracesFromDebugInfo(debug_info);
}
GraphDebugInfo StackTracesMapToGraphDebugInfo(const StackTracesMap& map,
bool user_frames) {
GraphDebugInfoBuilder builder;
GraphDebugInfoBuilder::Options options;
options.user_frames = user_frames;
options.user_frames_limit = -1;
builder.AccumulateStackTracesMap(map, "", options);
return builder.Build();
}
} | #include "tensorflow/core/graph/graph_debug_info_builder.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
using ::testing::Eq;
using ::testing::Ne;
using ::testing::UnorderedElementsAre;
class TestStackTrace : public AbstractStackTrace {
public:
explicit TestStackTrace(const std::vector<StackFrame> frames)
: frames_(std::move(frames)) {}
absl::Span<StackFrame const> ToFrames() const override { return frames_; }
std::vector<StackFrame> GetUserFrames(int limit) const override {
return frames_;
}
StackFrame LastUserFrame() const override { return frames_.back(); }
string ToString(const TracePrintingOptions& opts) const override {
auto frame = LastUserFrame();
return absl::StrCat(frame.file_name, ":", frame.line_number, ":",
frame.function_name);
}
std::vector<StackFrame> frames_;
};
TEST(GraphDebugInfoBuilderTest, AccumulateStackTrace) {
auto stack_trace = std::make_shared<TestStackTrace>(
std::vector<StackFrame>{{"dummy_file_alpha.cc", 20, "function_bar"},
{"dummy_file_beta.cc", 30, "function_sop"}});
GraphDebugInfoBuilder builder;
builder.AccumulateStackTrace(stack_trace, "alpha_beta");
GraphDebugInfo debug_info = builder.Build();
EXPECT_THAT(debug_info.files(), UnorderedElementsAre("dummy_file_alpha.cc",
"dummy_file_beta.cc"));
EXPECT_THAT(debug_info.traces_by_id_size(), Eq(1));
EXPECT_THAT(debug_info.name_to_trace_id().find("alpha_beta"),
Ne(debug_info.name_to_trace_id().end()));
auto actual_stack_trace = debug_info.traces_by_id().at(
debug_info.name_to_trace_id().at("alpha_beta"));
EXPECT_THAT(actual_stack_trace.frame_id_size(), Eq(2))
<< debug_info.DebugString();
}
TEST(GraphDebugInfoBuilderTest, AccumulateStackTracesMap) {
StackTracesMap stack_traces;
stack_traces["two"] = std::make_shared<TestStackTrace>(
std::vector<StackFrame>{{"dummy_file_alpha.cc", 20, "function_bar"},
{"dummy_file_beta.cc", 30, "function_sop"}});
stack_traces["scale"] =
std::make_shared<TestStackTrace>(std::vector<StackFrame>{
{"dummy_file_alpha.cc", 10, "function_foo"},
{"dummy_file_beta.cc", 30, "function_sop"},
});
stack_traces["y"] = std::make_shared<TestStackTrace>(std::vector<StackFrame>{
{"dummy_file_alpha.cc", 15, "function_flex"},
{"dummy_file_alpha.cc", 20, "function_bar"},
{"dummy_file_beta.cc", 30, "function_sop"},
});
GraphDebugInfoBuilder builder;
builder.AccumulateStackTracesMap(stack_traces, "@func");
GraphDebugInfo debug_info = builder.Build();
EXPECT_THAT(debug_info.files(), UnorderedElementsAre("dummy_file_alpha.cc",
"dummy_file_beta.cc"));
EXPECT_THAT(debug_info.name_to_trace_id_size(), Eq(3));
EXPECT_THAT(debug_info.name_to_trace_id().find("scale@func"),
Ne(debug_info.name_to_trace_id().end()));
auto stack_trace = debug_info.traces_by_id().at(
debug_info.name_to_trace_id().at("scale@func"));
EXPECT_THAT(stack_trace.frame_id_size(), Eq(2));
std::vector<GraphDebugInfo::FileLineCol> file_line_cols;
for (auto& frame_id : stack_trace.frame_id()) {
file_line_cols.push_back(debug_info.frames_by_id().at(frame_id));
}
auto file_line_col_0 = file_line_cols[0];
auto file_line_col_1 = file_line_cols[1];
EXPECT_THAT(std::vector<int>(
{file_line_col_0.file_index(), file_line_col_1.file_index()}),
UnorderedElementsAre(0, 1));
EXPECT_THAT(file_line_col_0.line(), Eq(10));
EXPECT_THAT(file_line_col_0.func(), Eq("function_foo"));
EXPECT_THAT(file_line_col_1.line(), Eq(30));
EXPECT_THAT(file_line_col_1.func(), Eq("function_sop"));
}
TEST(GraphDebugInfoBuilderTest, AppendGraphDebugInfo) {
GraphDebugInfo a;
{
GraphDebugInfoBuilder builder;
StackTracesMap stack_traces;
stack_traces["two"] = std::make_shared<TestStackTrace>(
std::vector<StackFrame>{{"dummy_file_alpha.cc", 20, "function_bar"}});
stack_traces["scale"] = std::make_shared<TestStackTrace>(
std::vector<StackFrame>{{"dummy_file_alpha.cc", 10, "function_foo"}});
builder.AccumulateStackTracesMap(stack_traces, "");
a = builder.Build();
}
GraphDebugInfo b;
{
GraphDebugInfoBuilder builder;
StackTracesMap stack_traces;
stack_traces["y"] =
std::make_shared<TestStackTrace>(std::vector<StackFrame>{
{"dummy_file_alpha.cc", 15, "function_flex"},
});
builder.AccumulateStackTracesMap(stack_traces, "");
b = builder.Build();
}
GraphDebugInfo c;
{
GraphDebugInfoBuilder builder;
StackTracesMap stack_traces;
stack_traces["z"] =
std::make_shared<TestStackTrace>(std::vector<StackFrame>{
{"dummy_file_alpha.cc", 15, "function_flex"},
});
builder.AccumulateStackTracesMap(stack_traces, "@func3");
c = builder.Build();
}
GraphDebugInfoBuilder builder;
builder.AppendGraphDebugInfo("func1", a);
builder.AppendGraphDebugInfo("func2", b);
builder.AppendGraphDebugInfo("", c);
GraphDebugInfo combined = builder.Build();
EXPECT_EQ(combined.name_to_trace_id().size(), 4);
std::vector<std::string> keys{"two@func1", "scale@func1", "y@func2",
"z@func3"};
for (const auto& key : keys) {
EXPECT_THAT(combined.name_to_trace_id().find(key),
Ne(combined.name_to_trace_id().end()));
}
}
TEST(StackTracesMapToGraphDebugInfoTest, EmptyMap) {
StackTracesMap map;
GraphDebugInfo generated = StackTracesMapToGraphDebugInfo(map);
EXPECT_EQ(generated.files_size(), 0);
EXPECT_EQ(generated.traces_size(), 0);
}
TEST(StackTracesMapToGraphDebugInfoTest, EmptyFrames) {
StackTracesMap map;
std::vector<StackFrame> frames;
auto stack_trace = std::make_shared<FrozenStackTrace>(frames);
map.insert({"dummy_name", stack_trace});
GraphDebugInfo generated = StackTracesMapToGraphDebugInfo(map);
EXPECT_EQ(generated.files_size(), 0);
EXPECT_EQ(generated.traces_by_id_size(), 1);
EXPECT_TRUE(generated.name_to_trace_id().contains("dummy_name"));
}
TEST(StackTracesMapToGraphDebugInfoTest, RoundTripStackTraces) {
StackTracesMap map;
std::vector<StackFrame> frames = {
StackFrame({"dummy_file_name", 10, "dummy_function_name"}),
StackFrame({"dummy_file_name", 20, "other_function_name"})};
auto stack_trace = std::make_shared<FrozenStackTrace>(frames);
map.insert({"dummy_name", stack_trace});
GraphDebugInfo generated = StackTracesMapToGraphDebugInfo(map);
StackTracesMap output = LoadTracesFromDebugInfo(generated);
for (auto [name, trace] : output) {
auto orig_trace = map[name];
EXPECT_NE(orig_trace, nullptr);
EXPECT_EQ(orig_trace->ToFrames(), trace->ToFrames());
}
}
}
} |
1,441 | cpp | tensorflow/tensorflow | edgeset | tensorflow/core/graph/edgeset.cc | tensorflow/core/graph/edgeset_test.cc | #ifndef TENSORFLOW_CORE_GRAPH_EDGESET_H_
#define TENSORFLOW_CORE_GRAPH_EDGESET_H_
#include <stddef.h>
#include "tensorflow/core/lib/gtl/flatset.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
class Edge;
class EdgeSet {
public:
EdgeSet();
~EdgeSet();
typedef const Edge* key_type;
typedef const Edge* value_type;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
class const_iterator;
typedef const_iterator iterator;
bool empty() const;
size_type size() const;
void clear();
std::pair<iterator, bool> insert(value_type value);
size_type erase(key_type key);
void reserve(size_type new_size) {
if (new_size > kInline) {
auto s = new gtl::FlatSet<const Edge*>(new_size);
s->insert(reinterpret_cast<const Edge**>(std::begin(ptrs_)),
reinterpret_cast<const Edge**>(&ptrs_[0] + size()));
ptrs_[0] = this;
ptrs_[1] = s;
}
}
const_iterator begin() const;
const_iterator end() const;
private:
static constexpr int kInline = 64 / sizeof(const void*);
const void* ptrs_[kInline];
gtl::FlatSet<const Edge*>* get_set() const {
if (ptrs_[0] == this) {
return static_cast<gtl::FlatSet<const Edge*>*>(
const_cast<void*>(ptrs_[1]));
} else {
return nullptr;
}
}
#ifdef NDEBUG
void RegisterMutation() {}
#else
uint32 mutations_ = 0;
void RegisterMutation() { mutations_++; }
#endif
EdgeSet(const EdgeSet&) = delete;
void operator=(const EdgeSet&) = delete;
};
class EdgeSet::const_iterator {
public:
typedef typename EdgeSet::value_type value_type;
typedef const typename EdgeSet::value_type& reference;
typedef const typename EdgeSet::value_type* pointer;
typedef typename EdgeSet::difference_type difference_type;
typedef std::forward_iterator_tag iterator_category;
const_iterator() {}
const_iterator& operator++();
const_iterator operator++(int );
const value_type* operator->() const;
value_type operator*() const;
bool operator==(const const_iterator& other) const;
bool operator!=(const const_iterator& other) const {
return !(*this == other);
}
private:
friend class EdgeSet;
void const* const* array_iter_ = nullptr;
typename gtl::FlatSet<const Edge*>::const_iterator tree_iter_;
#ifdef NDEBUG
inline void Init(const EdgeSet* e) {}
inline void CheckNoMutations() const {}
#else
inline void Init(const EdgeSet* e) {
owner_ = e;
init_mutations_ = e->mutations_;
}
inline void CheckNoMutations() const {
CHECK_EQ(init_mutations_, owner_->mutations_);
}
const EdgeSet* owner_ = nullptr;
uint32 init_mutations_ = 0;
#endif
};
inline EdgeSet::EdgeSet() {
for (int i = 0; i < kInline; i++) {
ptrs_[i] = nullptr;
}
}
inline EdgeSet::~EdgeSet() { delete get_set(); }
inline bool EdgeSet::empty() const { return size() == 0; }
inline EdgeSet::size_type EdgeSet::size() const {
auto s = get_set();
if (s) {
return s->size();
} else {
size_t result = 0;
for (int i = 0; i < kInline; i++) {
if (ptrs_[i]) result++;
}
return result;
}
}
inline void EdgeSet::clear() {
RegisterMutation();
delete get_set();
for (int i = 0; i < kInline; i++) {
ptrs_[i] = nullptr;
}
}
inline EdgeSet::const_iterator EdgeSet::begin() const {
const_iterator ci;
ci.Init(this);
auto s = get_set();
if (s) {
ci.tree_iter_ = s->begin();
} else {
ci.array_iter_ = &ptrs_[0];
}
return ci;
}
inline EdgeSet::const_iterator EdgeSet::end() const {
const_iterator ci;
ci.Init(this);
auto s = get_set();
if (s) {
ci.tree_iter_ = s->end();
} else {
ci.array_iter_ = &ptrs_[size()];
}
return ci;
}
inline EdgeSet::const_iterator& EdgeSet::const_iterator::operator++() {
CheckNoMutations();
if (array_iter_ != nullptr) {
++array_iter_;
} else {
++tree_iter_;
}
return *this;
}
inline EdgeSet::const_iterator EdgeSet::const_iterator::operator++(
int ) {
CheckNoMutations();
const_iterator tmp = *this;
operator++();
return tmp;
}
inline const EdgeSet::const_iterator::value_type* EdgeSet::const_iterator::
operator->() const {
CheckNoMutations();
if (array_iter_ != nullptr) {
return reinterpret_cast<const value_type*>(array_iter_);
} else {
return tree_iter_.operator->();
}
}
inline EdgeSet::const_iterator::value_type EdgeSet::const_iterator::operator*()
const {
CheckNoMutations();
if (array_iter_ != nullptr) {
return static_cast<value_type>(*array_iter_);
} else {
return *tree_iter_;
}
}
inline bool EdgeSet::const_iterator::operator==(
const const_iterator& other) const {
DCHECK((array_iter_ == nullptr) == (other.array_iter_ == nullptr))
<< "Iterators being compared must be from same set that has not "
<< "been modified since the iterator was constructed";
CheckNoMutations();
if (array_iter_ != nullptr) {
return array_iter_ == other.array_iter_;
} else {
return other.array_iter_ == nullptr && tree_iter_ == other.tree_iter_;
}
}
}
#endif
#include "tensorflow/core/graph/edgeset.h"
namespace tensorflow {
std::pair<EdgeSet::const_iterator, bool> EdgeSet::insert(value_type value) {
RegisterMutation();
const_iterator ci;
ci.Init(this);
auto s = get_set();
if (!s) {
for (int i = 0; i < kInline; i++) {
if (ptrs_[i] == value) {
ci.array_iter_ = &ptrs_[i];
return std::make_pair(ci, false);
}
}
for (int i = 0; i < kInline; i++) {
if (ptrs_[i] == nullptr) {
ptrs_[i] = value;
ci.array_iter_ = &ptrs_[i];
return std::make_pair(ci, true);
}
}
s = new gtl::FlatSet<const Edge*>;
s->insert(reinterpret_cast<const Edge**>(std::begin(ptrs_)),
reinterpret_cast<const Edge**>(std::end(ptrs_)));
ptrs_[0] = this;
ptrs_[1] = s;
}
auto p = s->insert(value);
ci.tree_iter_ = p.first;
return std::make_pair(ci, p.second);
}
EdgeSet::size_type EdgeSet::erase(key_type key) {
RegisterMutation();
auto s = get_set();
if (!s) {
for (int i = 0; i < kInline; i++) {
if (ptrs_[i] == key) {
size_t n = size();
ptrs_[i] = ptrs_[n - 1];
ptrs_[n - 1] = nullptr;
return 1;
}
}
return 0;
} else {
return s->erase(key);
}
}
} | #include "tensorflow/core/graph/edgeset.h"
#include <set>
#include <vector>
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
class EdgeSetTest : public ::testing::Test {
public:
EdgeSetTest() : edges_(nullptr) {}
~EdgeSetTest() override { delete[] edges_; }
void MakeEdgeSet(int n) {
if (edges_) {
delete[] edges_;
}
edges_ = new Edge[n];
eset_.clear();
model_.clear();
for (int i = 0; i < n; i++) {
eset_.insert(&edges_[i]);
model_.insert(&edges_[i]);
}
}
void CheckSame() {
EXPECT_EQ(model_.size(), eset_.size());
EXPECT_EQ(model_.empty(), eset_.empty());
std::vector<const Edge*> modelv(model_.begin(), model_.end());
std::vector<const Edge*> esetv(eset_.begin(), eset_.end());
std::sort(modelv.begin(), modelv.end());
std::sort(esetv.begin(), esetv.end());
EXPECT_EQ(modelv.size(), esetv.size());
for (size_t i = 0; i < modelv.size(); i++) {
EXPECT_EQ(modelv[i], esetv[i]) << i;
}
}
static constexpr int kInline = 64 / sizeof(const void*);
Edge nonexistent_;
Edge* edges_;
EdgeSet eset_;
std::set<const Edge*> model_;
};
namespace {
TEST_F(EdgeSetTest, Ops) {
for (int n : {0, 1, 2, kInline + 1}) {
MakeEdgeSet(n);
CheckSame();
EXPECT_EQ((n == 0), eset_.empty());
EXPECT_EQ(n, eset_.size());
eset_.clear();
model_.clear();
CheckSame();
eset_.insert(&edges_[0]);
model_.insert(&edges_[0]);
CheckSame();
}
}
TEST_F(EdgeSetTest, Exists) {
for (int n : {0, 1, 2, kInline + 1}) {
MakeEdgeSet(n);
for (int pos = 0; pos < n; pos++) {
auto p = eset_.insert(&edges_[pos]);
EXPECT_FALSE(p.second);
EXPECT_EQ(&edges_[pos], *p.first);
EXPECT_EQ(1, eset_.erase(&edges_[pos]));
model_.erase(&edges_[pos]);
CheckSame();
}
}
}
TEST_F(EdgeSetTest, DoesNotExist) {
for (int n : {0, 1, 2, kInline + 1}) {
MakeEdgeSet(n);
EXPECT_EQ(0, eset_.erase(&nonexistent_));
auto p = eset_.insert(&nonexistent_);
EXPECT_TRUE(p.second);
EXPECT_EQ(&nonexistent_, *p.first);
}
}
}
} |
1,442 | cpp | tensorflow/tensorflow | simple_delete | tensorflow/core/graph/regularization/simple_delete.cc | tensorflow/core/graph/regularization/simple_delete_test.cc | #ifndef TENSORFLOW_CORE_GRAPH_REGULARIZATION_SIMPLE_DELETE_H_
#define TENSORFLOW_CORE_GRAPH_REGULARIZATION_SIMPLE_DELETE_H_
#include "tensorflow/core/framework/graph.pb.h"
namespace tensorflow::graph_regularization {
void SimpleDelete(GraphDef& graph_def);
}
#endif
#include "tensorflow/core/graph/regularization/simple_delete.h"
#include <cstdint>
#include <string>
#include "absl/status/statusor.h"
#include "absl/strings/strip.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/regularization/util.h"
#include "tensorflow/core/grappler/op_types.h"
namespace tensorflow::graph_regularization {
namespace {
void RegularizeNodes(GraphDef* graph_def) {
for (NodeDef& node : *graph_def->mutable_node()) {
if (grappler::IsPartitionedCall(node) ||
grappler::IsStatefulPartitionedCall(node)) {
std::string function_name = node.attr().find("f")->second.func().name();
absl::StatusOr<int64_t> uid = GetSuffixUID(function_name);
if (uid.ok()) {
node.mutable_attr()->find("f")->second.mutable_func()->set_name(
std::string(
absl::StripSuffix(function_name, std::to_string(*uid))));
}
auto node_config_proto = node.mutable_attr()->find("config_proto");
if (node_config_proto != node.attr().end()) {
node_config_proto->second.mutable_s()->erase();
}
}
if (grappler::IsConstant(node)) {
if (node.attr().at("dtype").type() == DT_STRING) {
node.mutable_attr()->find("value")->second.clear_value();
}
}
}
}
}
void SimpleDelete(GraphDef& graph_def) {
RegularizeNodes(&graph_def);
graph_def.mutable_library()->Clear();
graph_def.mutable_versions()->Clear();
}
} | #include "tensorflow/core/graph/regularization/simple_delete.h"
#include <string>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/graph/regularization/util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saved_model.pb.h"
#include "tsl/platform/statusor.h"
namespace tensorflow::graph_regularization {
namespace {
absl::StatusOr<SavedModel> ReadSavedModel(absl::string_view file_dir) {
std::string file_path = io::JoinPath(file_dir, "saved_model.pb");
std::string serialized_saved_model;
auto status =
ReadFileToString(Env::Default(), file_path, &serialized_saved_model);
if (!status.ok()) {
return status;
}
SavedModel saved_model_pb;
saved_model_pb.ParseFromString(serialized_saved_model);
return saved_model_pb;
}
TEST(SimpleDeleteTest, TestSimpleDeleteModelSavedTwice) {
const std::string export_dir =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/graph/regularization/testdata", "bert1");
TF_ASSERT_OK_AND_ASSIGN(SavedModel saved_model_pb,
ReadSavedModel(export_dir));
MetaGraphDef* metagraph = saved_model_pb.mutable_meta_graphs(0);
GraphDef* graph_def = metagraph->mutable_graph_def();
SimpleDelete(*graph_def);
uint64 hash1 = ComputeHash(*graph_def);
const std::string export_dir2 =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/graph/regularization/testdata", "bert2");
TF_ASSERT_OK_AND_ASSIGN(SavedModel saved_model_pb2,
ReadSavedModel(export_dir2));
const MetaGraphDef& metagraph2 = saved_model_pb2.meta_graphs(0);
GraphDef graph_def2 = metagraph2.graph_def();
SimpleDelete(graph_def2);
uint64 hash2 = ComputeHash(graph_def2);
EXPECT_EQ(hash1, hash2);
}
}
} |
1,443 | cpp | tensorflow/tensorflow | nccl_manager | tensorflow/core/nccl/nccl_manager.cc | tensorflow/core/nccl/nccl_manager_test.cc | #ifndef TENSORFLOW_CORE_NCCL_NCCL_MANAGER_H_
#define TENSORFLOW_CORE_NCCL_NCCL_MANAGER_H_
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include <vector>
#ifndef gpu_assert
#define gpu_assert(x)
#endif
#include "absl/container/flat_hash_map.h"
#if GOOGLE_CUDA
#include "third_party/nccl/nccl.h"
#elif TENSORFLOW_USE_ROCM
#include "rocm/rocm_config.h"
#if (TF_ROCM_VERSION >= 50200)
#include "rocm/include/rccl/rccl.h"
#else
#include "rocm/include/rccl.h"
#endif
#include "tensorflow/core/common_runtime/gpu_device_context.h"
#endif
#include "tensorflow/core/common_runtime/gpu/gpu_event_mgr.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/stream_executor.h"
namespace tensorflow {
class NcclManager {
public:
typedef std::function<void(Status)> DoneCallback;
NcclManager();
~NcclManager();
static NcclManager* instance();
#if TENSORFLOW_USE_ROCM
static int instance_count;
#endif
string GenerateCommunicatorKey();
struct Participant {
Participant(se::StreamExecutor* executor, se::Stream* tensor_stream,
const DeviceBase::AcceleratorDeviceInfo* info,
const Tensor* input, Tensor* output, int global_rank,
DoneCallback done_callback)
: executor(executor),
tensor_stream(tensor_stream),
event_mgr(info->event_mgr),
gpu_device_id(info->gpu_id),
#if TENSORFLOW_USE_ROCM
context(static_cast<GPUDeviceContext*>(info->default_context)),
#endif
input(input),
output(output),
global_rank(global_rank),
done_callback(std::move(done_callback)),
root(false) {
DCHECK(executor != nullptr);
DCHECK(event_mgr != nullptr);
DCHECK(tensor_stream != nullptr);
}
se::StreamExecutor* const executor = nullptr;
se::Stream* const tensor_stream;
EventMgr* const event_mgr;
const int gpu_device_id;
#if TENSORFLOW_USE_ROCM
GPUDeviceContext* const context;
#endif
const Tensor* input;
Tensor* output;
const int global_rank;
DoneCallback done_callback;
bool root;
};
struct Context {
Context(const string& collective_key, int num_local_devices,
int num_global_devices, const string& communicator_key,
int source_rank)
: collective_key(collective_key),
num_local_devices(num_local_devices),
num_global_devices(num_global_devices),
communicator_key(communicator_key),
source_rank(source_rank) {}
const string& collective_key;
int num_local_devices;
int num_global_devices;
const string& communicator_key;
int source_rank;
};
void AddToAllReduce(std::unique_ptr<Participant> participant,
const Context& context, ncclRedOp_t reduction_op);
void AddToAllGather(std::unique_ptr<Participant> participant,
const Context& context);
void AddToReduceScatter(std::unique_ptr<Participant> participant,
const Context& context, ncclRedOp_t reduction_op);
void AddBroadcastSend(std::unique_ptr<Participant> participant,
const Context& context);
void AddBroadcastRecv(std::unique_ptr<Participant> participant,
const Context& context);
void AddReduceSend(std::unique_ptr<Participant> participant,
const Context& context, ncclRedOp_t reduction_op);
void AddReduceRecv(std::unique_ptr<Participant> participant,
const Context& context, ncclRedOp_t reduction_op);
void AddToAllToAll(std::unique_ptr<Participant> participant,
const Context& context);
void SignalMultiNodeReady(const string& collective_key);
void StartAbort(const Status& s);
void Reset();
private:
enum CollectiveType {
kAllReduce = 1,
kBroadcast = 2,
kReduce = 3,
kAllGather = 4,
kReduceScatter = 5,
kAllToAll = 6,
};
struct Collective;
struct Communicator;
struct CommunicatorMember;
struct NcclStream;
Status GetCommunicator(Collective* collective, Communicator** communicator);
void AddParticipant(std::unique_ptr<Participant> participant,
const Context& context, CollectiveType collective_type,
ncclRedOp_t reduction_op);
bool CheckReady(const string& collective_key, Collective* collective)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void RunCollective(Collective* collective);
void LoopKernelLaunches(NcclStream* stream);
mutex mu_;
absl::flat_hash_map<string, Collective*> collectives_ TF_GUARDED_BY(mu_);
absl::flat_hash_map<se::StreamExecutor*, std::vector<NcclStream*>>
device_to_comm_streams_ TF_GUARDED_BY(mu_);
std::vector<std::unique_ptr<Communicator>> communicators_ TF_GUARDED_BY(mu_);
Status status_ TF_GUARDED_BY(mu_);
NcclManager(const NcclManager&) = delete;
void operator=(const NcclManager&) = delete;
};
}
#endif
#endif
#include "tensorflow/core/nccl/nccl_manager.h"
#include <utility>
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include "absl/base/call_once.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/refcount.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/unbounded_work_queue.h"
#include "tensorflow/core/profiler/lib/annotated_traceme.h"
#include "tensorflow/core/profiler/lib/connected_traceme.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#if GOOGLE_CUDA
#include "xla/stream_executor/cuda/cuda_activation.h"
#elif TENSORFLOW_USE_ROCM
#include "tensorflow/core/platform/rocm.h"
#endif
namespace tensorflow {
#if GOOGLE_CUDA
using se::cuda::ScopedActivateExecutorContext;
#elif TENSORFLOW_USE_ROCM
using se::rocm::ScopedActivateExecutorContext;
#define cudaError_t hipError_t
#define cudaStream_t hipStream_t
#define cudaGetErrorString hipGetErrorString
#define cudaGetDevice hipGetDevice
#define cudaSetDevice hipSetDevice
#define cudaSuccess hipSuccess
int NcclManager::instance_count = 0;
#endif
#define NCCL_RETURN_IF_ERROR(...) \
do { \
ncclResult_t nccl_status = (__VA_ARGS__); \
if (nccl_status != ncclSuccess) { \
return errors::Internal("NCCL: ", ncclGetErrorString(nccl_status), \
". Set NCCL_DEBUG=WARN for detail."); \
} \
} while (0)
#define CUDA_RETURN_IF_ERROR(...) \
do { \
cudaError_t cuda_status = (__VA_ARGS__); \
if (cuda_status != cudaSuccess) { \
return errors::Internal("CUDA: ", cudaGetErrorString(cuda_status)); \
} \
} while (0)
struct NcclManager::NcclStream : public core::RefCounted {
public:
NcclStream() = default;
~NcclStream() = default;
se::StreamExecutor* executor = nullptr;
#if TENSORFLOW_USE_ROCM
se::Stream* stream = nullptr;
#else
std::unique_ptr<se::Stream> stream;
#endif
mutex mu;
condition_variable cv;
std::deque<std::pair<Collective*, int>> pending_launches_ TF_GUARDED_BY(mu);
bool shutdown_requested TF_GUARDED_BY(mu) = false;
};
struct NcclManager::CommunicatorMember {
public:
CommunicatorMember() {}
~CommunicatorMember() {
if (nccl_comm != nullptr) ncclCommDestroy(nccl_comm);
}
ncclComm_t nccl_comm = nullptr;
NcclStream* nccl_stream = nullptr;
};
struct NcclManager::Communicator {
public:
explicit Communicator(std::vector<CommunicatorMember> members,
const string& key)
: num_devices(members.size()), members(std::move(members)), key(key) {}
const int num_devices;
std::vector<CommunicatorMember> members;
const string key;
};
namespace {
static constexpr DataTypeSet kValidDataTypes =
ToSet(DT_HALF) | ToSet(DT_FLOAT) | ToSet(DT_DOUBLE) | ToSet(DT_INT32) |
ToSet(DT_INT64);
ncclDataType_t ToNcclType(DataType t) {
switch (t) {
case DT_HALF:
return ncclHalf;
case DT_FLOAT:
return ncclFloat;
case DT_DOUBLE:
return ncclDouble;
case DT_INT32:
return ncclInt;
case DT_INT64:
return ncclInt64;
default:
return ncclFloat;
}
}
void StringToNcclUniqueId(const string& str_id, ncclUniqueId* nccl_id) {
if (str_id.size() == NCCL_UNIQUE_ID_BYTES) {
memcpy(nccl_id->internal, str_id.data(), NCCL_UNIQUE_ID_BYTES);
}
}
}
struct NcclManager::Collective : public core::RefCounted {
Collective(const string& collective_key_in, DataType data_type_in,
CollectiveType type_in, ncclRedOp_t reduction_op_in,
int num_local_devices_in, int num_global_devices_in,
const string& communicator_key_in)
: collective_key(collective_key_in),
data_type(data_type_in),
type(type_in),
reduction_op(reduction_op_in),
num_local_devices(num_local_devices_in),
num_global_devices(num_global_devices_in),
single_node(num_local_devices_in == num_global_devices_in),
communicator_key(communicator_key_in) {
participants.reserve(num_local_devices_in);
#if TENSORFLOW_USE_ROCM
if (NcclManager::instance_count > 1) {
status = errors::Internal(
"ROCm cannot use multi-node NCCL collectives on a single node");
}
#endif
}
const string collective_key;
const DataType data_type;
const CollectiveType type;
const ncclRedOp_t reduction_op;
const int num_local_devices;
const int num_global_devices;
const bool single_node;
const string communicator_key;
Communicator* communicator = nullptr;
std::vector<std::unique_ptr<Participant>> participants;
int root_rank = -1;
int available_participants = 0;
bool multi_node_ready = false;
uint64 trace_context = 0;
Status status;
};
NcclManager::NcclManager() {
VLOG(2) << "New NcclManager " << this;
#if TENSORFLOW_USE_ROCM
++instance_count;
#endif
}
NcclManager::~NcclManager() {
VLOG(2) << "~NcclManager " << this;
#if TENSORFLOW_USE_ROCM
--instance_count;
#endif
for (auto& it : device_to_comm_streams_) {
for (NcclStream* nccl_stream : it.second) {
{
mutex_lock l(nccl_stream->mu);
nccl_stream->shutdown_requested = true;
nccl_stream->cv.notify_all();
}
nccl_stream->Unref();
}
}
}
NcclManager* NcclManager::instance() {
static NcclManager* instance = new NcclManager();
#if TENSORFLOW_USE_ROCM
static absl::once_flag once;
absl::call_once(once, [] { --NcclManager::instance_count; });
#endif
return instance;
}
string NcclManager::GenerateCommunicatorKey() {
ncclUniqueId nccl_id;
ncclGetUniqueId(&nccl_id);
return string(nccl_id.internal, NCCL_UNIQUE_ID_BYTES);
}
Status NcclManager::GetCommunicator(NcclManager::Collective* collective,
NcclManager::Communicator** communicator) {
std::sort(collective->participants.begin(), collective->participants.end(),
[](const std::unique_ptr<Participant>& a,
const std::unique_ptr<Participant>& b) {
if (a->gpu_device_id != b->gpu_device_id) {
return a->gpu_device_id < b->gpu_device_id;
}
if (a->executor != b->executor) {
return a->executor < b->executor;
}
return a->global_rank < b->global_rank;
});
mutex_lock l(mu_);
if (!status_.ok()) {
return status_;
}
if (collective->communicator_key.empty()) {
for (auto& comm : communicators_) {
if (comm->num_devices == collective->num_global_devices) {
int i;
for (i = 0; i < collective->num_local_devices; ++i) {
if (comm->members[i].nccl_stream->executor !=
collective->participants[i]->executor) {
break;
}
}
if (i == collective->num_local_devices) {
*communicator = comm.get();
return OkStatus();
}
}
}
} else {
#if NCCL_MAJOR < 2
return errors::Internal(
"Cannot use multi-node NCCL collectives with NCCL 1.x");
#endif
if (collective->communicator_key.size() != NCCL_UNIQUE_ID_BYTES) {
return errors::Internal("Expected communicator_key of size ",
NCCL_UNIQUE_ID_BYTES, " but found size ",
collective->communicator_key.size());
}
for (auto& comm : communicators_) {
if (comm->key == collective->communicator_key) {
*communicator = comm.get();
return OkStatus();
}
}
}
auto* env = Env::Default();
std::set<NcclStream*> used_streams;
std::vector<CommunicatorMember> members(collective->num_local_devices);
std::vector<int> devices(collective->num_local_devices);
for (int i = 0; i < collective->num_local_devices; ++i) {
auto* executor = collective->participants[i]->executor;
auto& streams = device_to_comm_streams_[executor];
NcclStream* nccl_stream = nullptr;
for (const auto& s : streams) {
if (used_streams.insert(s).second) {
nccl_stream = s;
break;
}
}
if (nccl_stream == nullptr) {
nccl_stream = new NcclStream();
nccl_stream->executor = executor;
#if TENSORFLOW_USE_ROCM
nccl_stream->stream = collective->participants[i]->context->nccl_stream();
#else
TF_ASSIGN_OR_RETURN(auto stream, executor->CreateStream());
nccl_stream->stream = std::move(stream);
#endif
streams.emplace_back(nccl_stream);
used_streams.insert(nccl_stream);
nccl_stream->Ref();
env->SchedClosure([this, nccl_stream]() {
LoopKernelLaunches(nccl_stream);
nccl_stream->Unref();
});
}
members[i].nccl_stream = nccl_stream;
devices[i] = collective->participants[i]->gpu_device_id;
}
std::vector<ncclComm_t> nccl_comms(collective->num_local_devices);
VLOG(2) << "Created nccl Communicator with "
<< "num_global_devices = " << collective->num_global_devices
<< " num_local_devices = " << collective->num_local_devices
<< " communicator_key ="
<< absl::StrJoin(
std::vector<int>{collective->communicator_key.begin(),
collective->communicator_key.end()},
" ");
#if NCCL_MAJOR >= 2
ncclUniqueId nccl_id;
if (collective->single_node) {
NCCL_RETURN_IF_ERROR(ncclGetUniqueId(&nccl_id));
} else {
StringToNcclUniqueId(collective->communicator_key, &nccl_id);
}
int saved_device = 0;
CUDA_RETURN_IF_ERROR(cudaGetDevice(&saved_device));
NCCL_RETURN_IF_ERROR(ncclGroupStart());
for (int i = 0; i < collective->num_local_devices; ++i) {
const int rank = collective->participants[i]->global_rank >= 0
? collective->participants[i]->global_rank
: i;
CUDA_RETURN_IF_ERROR(cudaSetDevice(devices[i]));
NCCL_RETURN_IF_ERROR(ncclCommInitRank(
nccl_comms.data() + i, collective->num_global_devices, nccl_id, rank));
}
NCCL_RETURN_IF_ERROR(ncclGroupEnd());
CUDA_RETURN_IF_ERROR(cudaSetDevice(saved_device));
#else
NCCL_RETURN_IF_ERROR(ncclCommInitAll(
nccl_comms.data(), collective->num_local_devices, devices.data()));
#endif
for (int i = 0; i < collective->num_local_devices; ++i) {
members[i].nccl_comm = nccl_comms[i];
}
communicators_.emplace_back(
new Communicator(std::move(members), collective->communicator_key));
*communicator = communicators_.back().get();
return OkStatus();
}
void NcclManager::AddToAllReduce(std::unique_ptr<Participant> participant,
const Context& context,
ncclRedOp_t reduction_op) {
AddParticipant(std::move(participant), context, kAllReduce, reduction_op);
}
void NcclManager::AddToAllGather(std::unique_ptr<Participant> participant,
const Context& context) {
AddParticipant(std::move(participant), context, kAllGather,
ncclSum );
}
void NcclManager::AddToReduceScatter(std::unique_ptr<Participant> participant,
const Context& context,
ncclRedOp_t reduction_op) {
AddParticipant(std::move(participant), context, kReduceScatter, reduction_op);
}
void NcclManager::AddToAllToAll(std::unique_ptr<Participant> participant,
const Context& context) {
AddParticipant(std::move(participant), context, kAllToAll,
ncclSum );
}
void NcclManager::AddBroadcastSend(std::unique_ptr<Participant> participant,
const Context& context) {
participant->root = true;
AddParticipant(std::move(participant), context, kBroadcast,
ncclSum );
}
void NcclManager::AddBroadcastRecv(std::unique_ptr<Participant> participant,
const Context& context) {
AddParticipant(std::move(participant), context, kBroadcast,
ncclSum );
}
void NcclManager::AddReduceSend(std::unique_ptr<Participant> participant,
const Context& context,
ncclRedOp_t reduction_op) {
AddParticipant(std::move(participant), context, kReduce, reduction_op);
}
void NcclManager::AddReduceRecv(std::unique_ptr<Participant> participant,
const Context& context,
ncclRedOp_t reduction_op) {
participant->root = true;
AddParticipant(std::move(participant), context, kReduce, reduction_op);
}
void NcclManager::SignalMultiNodeReady(const string& collective_key) {
Collective* to_run = nullptr;
{
mutex_lock l(mu_);
auto collective_it = collectives_.find(collective_key);
if (collective_it != collectives_.end()) {
Collective* collective = collective_it->second;
collective->multi_node_ready = true;
if (CheckReady(collective_key, collective)) {
to_run = collective;
}
VLOG(2) << "SignalMultiNodeReady collective " << collective_key
<< " to_run " << to_run;
}
}
if (to_run != nullptr) RunCollective(to_run);
}
void NcclManager::AddParticipant(std::unique_ptr<Participant> participant,
const Context& context,
CollectiveType collective_type,
ncclRedOp_t reduction_op) {
Collective* to_run = nullptr;
DataType data_type;
Status nccl_manager_status;
if (participant->input != nullptr) {
data_type = participant->input->dtype();
} else {
data_type = participant->output->dtype();
}
{
mutex_lock l(mu_);
nccl_manager_status = status_;
if (nccl_manager_status.ok()) {
auto collective_it = collectives_.find(context.collective_key);
Collective* collective = nullptr;
if (collective_it == collectives_.end()) {
collective = new Collective(
context.collective_key, data_type, collective_type, reduction_op,
context.num_local_devices, context.num_global_devices,
context.communicator_key);
collectives_.emplace(context.collective_key, collective);
} else {
collective = collective_it->second;
}
if (collective->status.ok() && !collective->single_node &&
collective->communicator_key.empty()) {
collective->status = errors::Internal(
"Collective ", reduction_op,
" is multi node with num_local_devices=",
collective->num_local_devices,
" and num_global_devices=", collective->num_global_devices,
" but has an empty communicator_key");
}
if (collective->status.ok() && collective->communicator_key.size() !=
context.communicator_key.size()) {
collective->status =
errors::Internal("Collective ", reduction_op,
" mismatch in member communicator_key with size ",
collective->communicator_key.size(),
" and arg communicator_key with size ",
context.communicator_key.size());
}
if (collective->status.ok() && collective->type != collective_type) {
collective->status = errors::Internal(
"Collective ", reduction_op, " previously initialized with type ",
collective->type, " but now got type ", collective_type);
}
if (collective->status.ok() &&
collective->num_global_devices != context.num_global_devices) {
collective->status =
errors::Internal("Collective ", reduction_op, | #include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include <algorithm>
#include <random>
#include <vector>
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/gpu/gpu_device.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/nccl/nccl_manager.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/unbounded_work_queue.h"
namespace tensorflow {
static std::vector<std::unique_ptr<BaseGPUDevice>> GetGPUDevices() {
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::GetFactory(DEVICE_GPU)
->AddDevices(SessionOptions(), "", &devices));
std::vector<std::unique_ptr<BaseGPUDevice>> gpus;
for (std::unique_ptr<Device>& device : devices) {
if (device->device_type() == "GPU") {
gpus.emplace_back(static_cast<BaseGPUDevice*>(device.release()));
}
}
return gpus;
}
template <typename Scalar>
class NcclManagerTest : public ::testing::Test {
public:
struct TestCase {
TestCase(int num_nodes, int num_ranks_per_node)
: num_nodes(num_nodes), num_ranks_per_node(num_ranks_per_node) {}
std::vector<Tensor> ins;
std::vector<Tensor> outs;
Tensor expected;
const int num_nodes;
const int num_ranks_per_node;
mutex mu;
Status final_status;
int num_completed TF_GUARDED_BY(mu) = 0;
condition_variable done_cv;
};
static void SetUpTestSuite() {
setenv("NCCL_DEBUG", "INFO", 1 );
setenv("NCCL_LAUNCH_MODE", "PARALLEL", 1 );
devices_ = new std::vector<std::unique_ptr<BaseGPUDevice>>(GetGPUDevices());
VLOG(1) << "Running test with " << devices_->size() << " gpus";
if (devices_->size() <= 1) {
LOG(FATAL) << "Cannot run NCCL test without multiple GPUs";
}
work_queue_ = new UnboundedWorkQueue(Env::Default(), "nccl_manager_test");
}
void SetUp() override {
ASSERT_GT(devices_->size(), 0) << "No GPUs found";
ASSERT_NE(work_queue_, nullptr);
}
static int32 NumGPUs() { return static_cast<int32>(devices_->size()); }
static void PopulateMultiNodeParams(int* num_nodes, int* num_ranks_per_node) {
const auto num_gpus = NumGPUs();
CHECK_GT(num_gpus, 1);
*num_nodes = 2;
if (num_gpus % 2 == 0) {
*num_ranks_per_node = num_gpus / 2;
} else {
*num_ranks_per_node = (num_gpus - 1) / 2;
}
}
static void TearDownTestSuite() {
delete devices_;
delete work_queue_;
}
TestCase* MakeReductionTestCase(int num_nodes, int num_ranks_per_node,
ncclRedOp_t reduction_op, TensorShape shape,
float value_offset) {
TestCase* test_case = new TestCase(num_nodes, num_ranks_per_node);
test_case->expected = Tensor(data_type_, shape);
if (reduction_op == ncclProd) {
test::FillFn<Scalar>(&test_case->expected,
[](int) { return static_cast<Scalar>(1); });
} else if (reduction_op == ncclSum) {
test::FillFn<Scalar>(&test_case->expected,
[](int) { return static_cast<Scalar>(0); });
} else if (reduction_op == ncclMax) {
test::FillFn<Scalar>(&test_case->expected, [](int) { return -max_; });
} else if (reduction_op == ncclMin) {
test::FillFn<Scalar>(&test_case->expected, [](int) { return max_; });
} else {
LOG(FATAL) << "Invalid reduction_op " << reduction_op;
}
float value_scale = 0.01;
for (int node = 0; node < num_nodes; ++node) {
for (int local_rank = 0; local_rank < num_ranks_per_node; ++local_rank) {
auto* device = GetDevice(num_ranks_per_node, node, local_rank);
auto* stream = device->tensorflow_accelerator_device_info()->stream;
Tensor in_cpu(data_type_, shape);
test::FillFn<Scalar>(&in_cpu, [&](int index) {
return static_cast<Scalar>((index + 1) * value_scale + value_offset);
});
for (int j = 0; j < shape.num_elements(); ++j) {
auto in_val = in_cpu.flat<Scalar>()(j);
auto out_expr = test_case->expected.template flat<Scalar>();
if (reduction_op == ncclProd) {
out_expr(j) = out_expr(j) * in_val;
} else if (reduction_op == ncclSum) {
out_expr(j) = out_expr(j) + in_val;
} else if (reduction_op == ncclMax) {
if (in_val > out_expr(j)) {
out_expr(j) = in_val;
}
} else if (reduction_op == ncclMin) {
if (in_val < out_expr(j)) {
out_expr(j) = in_val;
}
}
}
value_scale *= 10;
test_case->ins.emplace_back(GpuAllocator(device), data_type_, shape);
test_case->outs.emplace_back(GpuAllocator(device), data_type_, shape);
const Tensor& in_gpu = test_case->ins.back();
auto in_gpu_mem = AsDeviceMemory(in_gpu.flat<Scalar>().data());
TF_CHECK_OK(stream->Memcpy(&in_gpu_mem, in_cpu.flat<Scalar>().data(),
in_cpu.TotalBytes()));
}
}
return test_case;
}
TestCase* MakeGatherTestCase(int num_nodes, int num_ranks_per_node,
TensorShape in_shape, TensorShape out_shape) {
TestCase* test_case = new TestCase(num_nodes, num_ranks_per_node);
test_case->expected = Tensor(data_type_, out_shape);
test::FillFn<Scalar>(&test_case->expected,
[](int) { return static_cast<Scalar>(0); });
float value_scale = 0.01;
for (int node = 0; node < num_nodes; ++node) {
for (int i = 0; i < num_ranks_per_node; ++i) {
auto* device = GetDevice(num_ranks_per_node, node, i);
auto* stream = device->tensorflow_accelerator_device_info()->stream;
Tensor in_cpu(data_type_, in_shape);
test::FillFn<Scalar>(&in_cpu, [&](int index) {
return static_cast<Scalar>((index + 1) * value_scale);
});
int32_t gather_idx =
(node * num_ranks_per_node + i) * in_shape.num_elements();
for (int j = 0; j < in_shape.num_elements(); ++j) {
auto in_val = in_cpu.flat<Scalar>()(j);
auto out_expr = test_case->expected.template flat<Scalar>();
out_expr(gather_idx + j) = in_val;
}
value_scale *= 10;
test_case->ins.emplace_back(GpuAllocator(device), data_type_, in_shape);
test_case->outs.emplace_back(GpuAllocator(device), data_type_,
out_shape);
const Tensor& in_gpu = test_case->ins.back();
auto in_gpu_mem = AsDeviceMemory(in_gpu.flat<Scalar>().data());
TF_CHECK_OK(stream->Memcpy(&in_gpu_mem, in_cpu.flat<Scalar>().data(),
in_cpu.TotalBytes()));
}
}
return test_case;
}
TestCase* MakeBroadcastTestCase(int num_nodes, int num_ranks_per_node,
TensorShape shape, int src_node, int src_rank,
bool in_place) {
TestCase* test_case = new TestCase(num_nodes, num_ranks_per_node);
test_case->expected = Tensor(data_type_, shape);
test::FillFn<Scalar>(&test_case->expected,
[](int) { return static_cast<Scalar>(1); });
for (int node = 0; node < num_nodes; ++node) {
for (int local_rank = 0; local_rank < num_ranks_per_node; ++local_rank) {
auto* device = GetDevice(num_ranks_per_node, node, local_rank);
if (node == src_node && local_rank == src_rank) {
test_case->ins.emplace_back(GpuAllocator(device), data_type_, shape);
if (in_place) {
test_case->outs.emplace_back(test_case->ins.back());
} else {
test_case->outs.emplace_back(GpuAllocator(device), data_type_,
shape);
}
Tensor in_cpu(data_type_, shape);
test::FillFn<Scalar>(&in_cpu,
[](int) { return static_cast<Scalar>(1); });
const Tensor& in_gpu = test_case->ins.back();
auto in_gpu_mem = AsDeviceMemory(in_gpu.flat<Scalar>().data());
auto* stream = device->tensorflow_accelerator_device_info()->stream;
TF_CHECK_OK(stream->Memcpy(&in_gpu_mem, in_cpu.flat<Scalar>().data(),
in_cpu.TotalBytes()));
} else {
test_case->ins.emplace_back(Tensor());
test_case->outs.emplace_back(GpuAllocator(device), data_type_, shape);
}
}
}
return test_case;
}
void WaitForTestCompletion(TestCase* test_case) {
mutex_lock l(test_case->mu);
while (test_case->num_completed != test_case->outs.size()) {
test_case->done_cv.wait(l);
}
}
void VerifyResults(TestCase* test_case) {
WaitForTestCompletion(test_case);
TF_ASSERT_OK(test_case->final_status);
for (int node = 0; node < test_case->num_nodes; ++node) {
for (int local_rank = 0; local_rank < test_case->num_ranks_per_node;
++local_rank) {
auto* device =
GetDevice(test_case->num_ranks_per_node, node, local_rank);
auto* stream = device->tensorflow_accelerator_device_info()->stream;
const int global_rank =
GlobalRank(test_case->num_ranks_per_node, node, local_rank);
const Tensor& out_gpu = test_case->outs[global_rank];
Tensor out_cpu(data_type_, out_gpu.shape());
auto out_gpu_mem = AsDeviceMemory(out_gpu.flat<Scalar>().data());
TF_CHECK_OK(stream->Memcpy(out_cpu.flat<Scalar>().data(), out_gpu_mem,
out_cpu.TotalBytes()));
TF_ASSERT_OK(stream->BlockHostUntilDone());
VLOG(1) << "Verifying rank " << global_rank << " expected shape "
<< test_case->expected.shape() << " out shape "
<< out_cpu.shape();
test::ExpectClose(test_case->expected, out_cpu);
}
}
}
void VerifyError(TestCase* test_case) {
WaitForTestCompletion(test_case);
LOG(INFO) << test_case->final_status;
EXPECT_EQ(test_case->final_status.code(), error::INTERNAL);
}
NcclManager::DoneCallback CreateDoneCallback(TestCase* test_case) {
return [this, test_case](Status s) {
mutex_lock l(test_case->mu);
test_case->final_status.Update(s);
if (++test_case->num_completed == test_case->outs.size()) {
test_case->done_cv.notify_one();
}
};
}
struct NodeState {
NcclManager nccl_manager;
std::atomic<int> launched{0};
};
void RunMultiNodeAllReduceTest(const int num_nodes,
const int num_ranks_per_node) {
std::vector<NodeState> node_states(num_nodes);
RunMultiNodeAllReduceTest(node_states, num_ranks_per_node);
}
void RunMultiNodeAllReduceTest(std::vector<NodeState>& node_states,
const int num_ranks_per_node) {
const int num_nodes = node_states.size();
const int num_global_ranks = num_nodes * num_ranks_per_node;
const string collective_key = "allreduce";
const string communicator_key =
node_states[0].nccl_manager.GenerateCommunicatorKey();
for (int op = 0; op < 4; ++op) {
ncclRedOp_t reduction_op = static_cast<ncclRedOp_t>(op);
std::unique_ptr<TestCase> test_case(
this->MakeReductionTestCase(num_nodes, num_ranks_per_node,
reduction_op, TensorShape({2, 3}), 0.0f));
for (int node = 0; node < num_nodes; ++node) {
auto node_fn = [this, node, num_ranks_per_node, num_global_ranks,
&node_states, &communicator_key, &collective_key,
reduction_op, &test_case] {
for (int local_rank = 0; local_rank < num_ranks_per_node;
++local_rank) {
auto* device = GetDevice(num_ranks_per_node, node, local_rank);
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
const int global_rank =
GlobalRank(num_ranks_per_node, node, local_rank);
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, &test_case->ins[global_rank],
&test_case->outs[global_rank], global_rank,
this->CreateDoneCallback(test_case.get()));
node_states[node].nccl_manager.AddToAllReduce(
std::move(participant),
{collective_key, num_ranks_per_node, num_global_ranks,
communicator_key, -1},
reduction_op);
VLOG(1) << "AddToAllReduce node " << node << " global_rank "
<< global_rank;
}
node_states[node].nccl_manager.SignalMultiNodeReady(collective_key);
};
this->work_queue_->Schedule(node_fn);
}
VLOG(2) << "Verifying results";
this->VerifyResults(test_case.get());
}
}
void RunMultiNodeBroadcastTest(const int num_nodes,
const int num_ranks_per_node,
const int src_node, const int src_local_rank,
const bool in_place) {
const int num_global_ranks = num_nodes * num_ranks_per_node;
const int src_global_rank = src_node * num_ranks_per_node + src_local_rank;
const string collective_key = "broadcast";
std::vector<NodeState> node_states(num_nodes);
const string communicator_key =
node_states[0].nccl_manager.GenerateCommunicatorKey();
std::unique_ptr<TestCase> test_case(this->MakeBroadcastTestCase(
num_nodes, num_ranks_per_node, TensorShape({5, 6}), src_node,
src_local_rank, in_place));
for (int node = 0; node < num_nodes; ++node) {
for (int local_rank = 0; local_rank < num_ranks_per_node; ++local_rank) {
auto rank_fn = [this, node, num_ranks_per_node, num_global_ranks,
src_global_rank, local_rank, &node_states,
&collective_key, &communicator_key, &test_case]() {
auto* device = GetDevice(num_ranks_per_node, node, local_rank);
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
const int global_rank =
GlobalRank(num_ranks_per_node, node, local_rank);
auto* input = global_rank == src_global_rank
? &test_case->ins[global_rank]
: nullptr;
auto* output = test_case->outs[global_rank].NumElements() == 0
? nullptr
: &test_case->outs[global_rank];
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, input, output, global_rank,
this->CreateDoneCallback(test_case.get()));
if (global_rank == src_global_rank) {
node_states[node].nccl_manager.AddBroadcastSend(
std::move(participant),
{collective_key, num_ranks_per_node, num_global_ranks,
communicator_key, src_global_rank});
} else {
node_states[node].nccl_manager.AddBroadcastRecv(
std::move(participant),
{collective_key, num_ranks_per_node, num_global_ranks,
communicator_key, src_global_rank});
}
if (++node_states[node].launched == num_ranks_per_node) {
node_states[node].nccl_manager.SignalMultiNodeReady(collective_key);
}
};
this->work_queue_->Schedule(std::move(rank_fn));
}
}
VLOG(2) << "Verifying results";
this->VerifyResults(test_case.get());
}
static int GlobalRank(int num_ranks_per_node, int node, int local_rank) {
return node * num_ranks_per_node + local_rank;
}
static BaseGPUDevice* GetDevice(int num_ranks_per_node, int node,
int local_rank) {
const int device_idx = GlobalRank(num_ranks_per_node, node, local_rank);
CHECK_LT(device_idx, devices_->size());
return (*devices_)[device_idx].get();
}
static UnboundedWorkQueue* work_queue_;
private:
static Allocator* GpuAllocator(BaseGPUDevice* device) {
return device->GetAllocator(AllocatorAttributes());
}
static se::DeviceMemory<Scalar> AsDeviceMemory(const Scalar* cuda_memory) {
se::DeviceMemoryBase wrapped(const_cast<Scalar*>(cuda_memory));
se::DeviceMemory<Scalar> typed(wrapped);
return typed;
}
static std::vector<std::unique_ptr<BaseGPUDevice>>* devices_;
static const DataType data_type_;
static const Scalar max_;
};
template <typename Scalar>
std::vector<std::unique_ptr<BaseGPUDevice>>* NcclManagerTest<Scalar>::devices_ =
nullptr;
template <typename Scalar>
const DataType NcclManagerTest<Scalar>::data_type_ =
DataTypeToEnum<Scalar>::value;
template <typename Scalar>
const Scalar NcclManagerTest<Scalar>::max_ =
Eigen::NumTraits<Scalar>::highest();
template <typename Scalar>
UnboundedWorkQueue* NcclManagerTest<Scalar>::work_queue_ = nullptr;
using TypeList = ::testing::Types<float, double>;
TYPED_TEST_SUITE(NcclManagerTest, TypeList);
TYPED_TEST(NcclManagerTest, BasicSumReduction) {
const int num_ranks = this->NumGPUs();
for (int op = 0; op < 4; ++op) {
ncclRedOp_t reduction_op = static_cast<ncclRedOp_t>(op);
std::unique_ptr<typename TestFixture::TestCase> test_case(
this->MakeReductionTestCase(1, num_ranks, reduction_op,
TensorShape({2, 3}), 0.0f));
for (int rank = 0; rank < num_ranks; ++rank) {
auto* device = this->GetDevice(num_ranks, 0, rank);
VLOG(2) << "rank " << rank << " device " << device->name();
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, &test_case->ins[rank],
&test_case->outs[rank], -1,
this->CreateDoneCallback(test_case.get()));
NcclManager::instance()->AddToAllReduce(
std::move(participant),
{"allreduce", num_ranks,
num_ranks, "",
-1},
reduction_op);
}
LOG(INFO) << "Verifying results";
this->VerifyResults(test_case.get());
}
}
TYPED_TEST(NcclManagerTest, MultipleCallers) {
const int num_ranks = this->NumGPUs();
const int num_collectives_per_iteration = 10;
const int time_limit_micros = 1 * 1000 * 1000;
int64_t start = Env::Default()->NowMicros();
srand(Env::Default()->NowMicros());
for (;;) {
std::vector<std::pair<int, int>> case_and_rank;
std::vector<std::unique_ptr<typename TestFixture::TestCase>> test_cases;
for (int i = 0; i < num_collectives_per_iteration; ++i) {
test_cases.emplace_back(this->MakeReductionTestCase(
1, num_ranks, ncclSum,
TensorShape({100, i % 5 + 1, i % 3 + 1}), 1.1f * i));
for (int j = 0; j < num_ranks; ++j) {
case_and_rank.emplace_back(i, j);
}
}
for (int rank = 0; rank < num_ranks; ++rank) {
auto* device = this->GetDevice(num_ranks, 0, rank);
auto* stream = device->tensorflow_accelerator_device_info()->stream;
TF_ASSERT_OK(stream->BlockHostUntilDone());
}
std::shuffle(case_and_rank.begin(), case_and_rank.end(),
std::mt19937(std::random_device()()));
mutex mu;
const int to_schedule = case_and_rank.size();
for (int i = 0; i < to_schedule; ++i) {
auto fn = [&]() {
int rank;
int test_num;
{
mutex_lock l(mu);
test_num = case_and_rank.back().first;
rank = case_and_rank.back().second;
case_and_rank.pop_back();
}
auto* device = this->GetDevice(num_ranks, 0, rank);
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
typename TestFixture::TestCase* test_case = test_cases[test_num].get();
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, &test_case->ins[rank],
&test_case->outs[rank], -1,
this->CreateDoneCallback(test_case));
NcclManager::instance()->AddToAllReduce(
std::move(participant),
{strings::StrCat("allreduce", test_num),
num_ranks,
num_ranks,
"", -1},
ncclSum);
};
this->work_queue_->Schedule(fn);
}
VLOG(2) << "Verifying results for " << num_collectives_per_iteration
<< " collectives";
for (int i = 0; i < test_cases.size(); ++i) {
this->VerifyResults(test_cases[i].get());
}
int64_t delta = Env::Default()->NowMicros() - start;
if (delta > time_limit_micros) {
LOG(INFO) << "Ran for " << delta << " microsecs, now quitting";
break;
}
}
}
TYPED_TEST(NcclManagerTest, BasicAllGather) {
const int num_ranks = this->NumGPUs();
for (int i = 0; i < num_ranks; ++i) {
std::unique_ptr<typename TestFixture::TestCase> test_case(
this->MakeGatherTestCase(1, num_ranks,
TensorShape({2, 3}),
TensorShape({2 * num_ranks, 3})));
for (int rank = 0; rank < num_ranks; ++rank) {
auto* device = this->GetDevice(num_ranks, 0, rank);
VLOG(2) << "rank " << rank << " device " << device->name();
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, &test_case->ins[rank],
&test_case->outs[rank], rank,
this->CreateDoneCallback(test_case.get()));
NcclManager::instance()->AddToAllGather(
std::move(participant),
{"allgather", num_ranks,
num_ranks, "",
-1});
}
LOG(INFO) << "Verifying results";
this->VerifyResults(test_case.get());
}
}
TYPED_TEST(NcclManagerTest, BasicBroadcast) {
this->RunMultiNodeBroadcastTest(1,
this->NumGPUs(),
0, 0,
false);
}
TYPED_TEST(NcclManagerTest, InPlaceBroadcast) {
this->RunMultiNodeBroadcastTest(1,
this->NumGPUs(),
0, 0,
true);
}
TYPED_TEST(NcclManagerTest, BroadcastWithDifferentRanks) {
for (int num_ranks = 1; num_ranks <= this->NumGPUs(); ++num_ranks) {
const int src_rank = static_cast<int>(random::New64() % num_ranks);
for (int in_place_idx = 0; in_place_idx <= 1; ++in_place_idx) {
const bool in_place = in_place_idx == 0;
this->RunMultiNodeBroadcastTest(1, num_ranks,
0, src_rank, in_place);
}
}
}
TEST(NcclManagerTest, CommunicatorKey) {
const string communicator_key =
NcclManager::instance()->GenerateCommunicatorKey();
EXPECT_EQ(communicator_key.size(), NCCL_UNIQUE_ID_BYTES);
}
#if !TENSORFLOW_USE_ROCM
TYPED_TEST(NcclManagerTest, MultiNode) {
int num_nodes;
int num_ranks_per_node;
this->PopulateMultiNodeParams(&num_nodes, &num_ranks_per_node);
VLOG(1) << "Calling RunMultiNodeAllReduceTest with num_nodes=" << num_nodes
<< " and num_ranks_per_node=" << num_ranks_per_node;
this->RunMultiNodeAllReduceTest(num_nodes, num_ranks_per_node);
}
#endif
TYPED_TEST(NcclManagerTest, MultiNodeSingle) {
this->RunMultiNodeAllReduceTest(1,
this->NumGPUs());
}
#if !TENSORFLOW_USE_ROCM
TYPED_TEST(NcclManagerTest, MultiNodeBroadcast) {
int num_nodes;
int num_ranks_per_node;
this->PopulateMultiNodeParams(&num_nodes, &num_ranks_per_node);
VLOG(1) << "Calling RunMultiNodeBroadcastTest with num_nodes=" << num_nodes
<< " and num_ranks_per_node=" << num_ranks_per_node;
this->RunMultiNodeBroadcastTest(num_nodes, num_ranks_per_node,
0, 0,
true);
}
#endif
TYPED_TEST(NcclManagerTest, ConsistentCollectiveType) {
const int num_ranks = 2;
std::unique_ptr<typename TestFixture::TestCase> test_case(
this->MakeReductionTestCase(1, num_ranks, ncclSum,
TensorShape({2, 3}), 0.0f));
for (int rank = 0; rank < num_ranks; ++rank) {
auto* device = this->GetDevice(num_ranks, 0, rank);
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, &test_case->ins[rank],
&test_case->outs[rank], -1,
this->CreateDoneCallback(test_case.get()));
if (rank == 0) {
NcclManager::instance()->AddToAllReduce(std::move(participant),
{"bad_coll_type",
num_ranks,
num_ranks,
"",
-1},
ncclSum);
} else {
NcclManager::instance()->AddBroadcastSend(
std::move(participant),
{"bad_coll_type",
num_ranks,
num_ranks,
"", -1});
}
}
this->VerifyError(test_case.get());
}
TYPED_TEST(NcclManagerTest, ConsistentCommunicatorKey) {
const int num_ranks = 2;
std::unique_ptr<typename TestFixture::TestCase> test_case(
this->MakeReductionTestCase(1, num_ranks, ncclSum,
TensorShape({2, 3}), 0.0f));
for (int rank = 0; rank < num_ranks; ++rank) {
auto* device = this->GetDevice(num_ranks, 0, rank);
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, &test_case->ins[rank],
&test_case->outs[rank], -1,
this->CreateDoneCallback(test_case.get()));
NcclManager::instance()->AddToAllReduce(
std::move(participant),
{"bad_coll_type",
num_ranks,
num_ranks,
rank == 0 ? "" : NcclManager::instance()->GenerateCommunicatorKey(),
-1},
ncclSum);
}
this->VerifyError(test_case.get());
}
TYPED_TEST(NcclManagerTest, ConsistentNumberOfDevices) {
const int num_ranks = 2;
std::unique_ptr<typename TestFixture::TestCase> test_case(
this->MakeReductionTestCase(1, num_ranks, ncclSum,
TensorShape({2, 3}), 0.0f));
for (int rank = 0; rank < num_ranks; ++rank) {
auto* device = this->GetDevice(num_ranks, 0, rank);
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
int num_devices = rank == 0 ? num_ranks : num_ranks + 1;
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, &test_case->ins[rank],
&test_case->outs[rank], -1,
this->CreateDoneCallback(test_case.get()));
NcclManager::instance()->AddToAllReduce(std::move(participant),
{"bad_coll_type",
num_devices,
num_devices,
"", |
1,444 | cpp | tensorflow/tensorflow | debug_io_utils | tensorflow/core/debug/debug_io_utils.cc | tensorflow/core/debug/debug_io_utils_test.cc | #ifndef TENSORFLOW_CORE_DEBUG_DEBUG_IO_UTILS_H_
#define TENSORFLOW_CORE_DEBUG_DEBUG_IO_UTILS_H_
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "tensorflow/core/debug/debug_node_key.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/event.pb.h"
namespace tensorflow {
Status ReadEventFromFile(const string& dump_file_path, Event* event);
struct DebugWatchAndURLSpec {
DebugWatchAndURLSpec(const string& watch_key, const string& url,
const bool gated_grpc)
: watch_key(watch_key), url(url), gated_grpc(gated_grpc) {}
const string watch_key;
const string url;
const bool gated_grpc;
};
class DebugIO {
public:
static const char* const kDebuggerPluginName;
static const char* const kCoreMetadataTag;
static const char* const kGraphTag;
static const char* const kHashTag;
static const char* const kFileURLScheme;
static const char* const kGrpcURLScheme;
static const char* const kMemoryURLScheme;
static Status PublishDebugMetadata(
const int64_t global_step, const int64_t session_run_index,
const int64_t executor_step_index, const std::vector<string>& input_names,
const std::vector<string>& output_names,
const std::vector<string>& target_nodes,
const std::unordered_set<string>& debug_urls);
static Status PublishDebugTensor(const DebugNodeKey& debug_node_key,
const Tensor& tensor,
const uint64 wall_time_us,
const absl::Span<const string> debug_urls,
bool gated_grpc, int64_t step_id = -1);
static Status PublishDebugTensor(const DebugNodeKey& debug_node_key,
const Tensor& tensor,
const uint64 wall_time_us,
const absl::Span<const string> debug_urls);
static Status PublishGraph(const Graph& graph, const string& device_name,
const std::unordered_set<string>& debug_urls);
static bool IsCopyNodeGateOpen(
const std::vector<DebugWatchAndURLSpec>& specs);
static bool IsDebugNodeGateOpen(const string& watch_key,
const std::vector<string>& debug_urls);
static bool IsDebugURLGateOpen(const string& watch_key,
const string& debug_url);
static Status CloseDebugURL(const string& debug_url);
};
class DebugFileIO {
public:
static Status DumpTensorToDir(const DebugNodeKey& debug_node_key,
const Tensor& tensor, const uint64 wall_time_us,
const string& dump_root_dir,
string* dump_file_path);
static Status DumpTensorToDirForNodeDumping(
const DebugNodeKey& debug_node_key, const Tensor& tensor,
uint64 wall_time_us, const string& dump_root_dir, string* dump_file_path,
int64_t step_id);
static string GetDumpFilePath(const string& dump_root_dir,
const DebugNodeKey& debug_node_key,
const uint64 wall_time_us);
static string GetDumpFilePathForNodeDumping(
const string& dump_root_dir, const DebugNodeKey& debug_node_key,
uint64 wall_time_us, int64_t step_id);
static Status DumpEventProtoToFile(const Event& event_proto,
const string& dir_name,
const string& file_name);
static bool requestDiskByteUsage(uint64 bytes);
static void resetDiskByteUsage();
static uint64 global_disk_bytes_limit_;
private:
static Status DumpTensorToEventFile(const DebugNodeKey& debug_node_key,
const Tensor& tensor,
const uint64 wall_time_us,
const string& file_path);
static Status RecursiveCreateDir(Env* env, const string& dir);
static uint64 disk_bytes_used_;
static mutex bytes_mu_;
static const uint64 kDefaultGlobalDiskBytesLimit;
friend class DiskUsageLimitTest;
};
}
namespace std {
template <>
struct hash<::tensorflow::DebugNodeKey> {
size_t operator()(const ::tensorflow::DebugNodeKey& k) const {
return ::tensorflow::Hash64(
::tensorflow::strings::StrCat(k.device_name, ":", k.node_name, ":",
k.output_slot, ":", k.debug_op, ":"));
}
};
}
#ifndef PLATFORM_WINDOWS
#include "grpcpp/channel.h"
#include "tensorflow/core/debug/debug_service.grpc.pb.h"
namespace tensorflow {
class DebugGrpcChannel {
public:
explicit DebugGrpcChannel(const string& server_stream_addr);
virtual ~DebugGrpcChannel() {}
Status Connect(const int64_t timeout_micros);
bool WriteEvent(const Event& event);
bool ReadEventReply(EventReply* event_reply);
void ReceiveAndProcessEventReplies(size_t max_replies);
Status ReceiveServerRepliesAndClose();
private:
string server_stream_addr_;
string url_;
::grpc::ClientContext ctx_;
std::shared_ptr<::grpc::Channel> channel_;
std::unique_ptr<grpc::EventListener::Stub> stub_;
std::unique_ptr<::grpc::ClientReaderWriterInterface<Event, EventReply>>
reader_writer_;
mutex mu_;
};
class DebugGrpcIO {
public:
static const size_t kGrpcMessageSizeLimitBytes;
static const size_t kGrpcMaxVarintLengthSize;
static Status SendTensorThroughGrpcStream(const DebugNodeKey& debug_node_key,
const Tensor& tensor,
const uint64 wall_time_us,
const string& grpc_stream_url,
const bool gated);
static Status SendEventProtoThroughGrpcStream(
const Event& event_proto, const string& grpc_stream_url,
const bool receive_reply = false);
static Status ReceiveEventReplyProtoThroughGrpcStream(
EventReply* event_reply, const string& grpc_stream_url);
static bool IsReadGateOpen(const string& grpc_debug_url,
const string& watch_key);
static bool IsWriteGateOpen(const string& grpc_debug_url,
const string& watch_key);
static Status CloseGrpcStream(const string& grpc_stream_url);
static void SetDebugNodeKeyGrpcState(
const string& grpc_debug_url, const string& watch_key,
const EventReply::DebugOpStateChange::State new_state);
private:
using DebugNodeName2State =
std::unordered_map<string, EventReply::DebugOpStateChange::State>;
static std::unordered_map<string, std::unique_ptr<DebugGrpcChannel>>*
GetStreamChannels();
static Status GetOrCreateDebugGrpcChannel(
const string& grpc_stream_url, DebugGrpcChannel** debug_grpc_channel);
static std::unordered_map<string, DebugNodeName2State>*
GetEnabledDebugOpStates();
static DebugNodeName2State* GetEnabledDebugOpStatesAtUrl(
const string& grpc_debug_url);
static void ClearEnabledWatchKeys();
static mutex streams_mu_;
static int64_t channel_connection_timeout_micros_;
friend class GrpcDebugTest;
friend class DebugNumericSummaryOpTest;
};
}
#endif
#endif
#include "tensorflow/core/debug/debug_io_utils.h"
#include <stddef.h>
#include <string.h>
#include <cmath>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <limits>
#include <utility>
#include <vector>
#ifndef PLATFORM_WINDOWS
#include "grpcpp/create_channel.h"
#else
#endif
#include "absl/strings/ascii.h"
#include "absl/strings/match.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/debug/debug_callback_registry.h"
#include "tensorflow/core/debug/debugger_event_metadata.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/lib/core/bits.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/util/event.pb.h"
#define GRPC_OSS_WINDOWS_UNIMPLEMENTED_ERROR \
return errors::Unimplemented( \
kGrpcURLScheme, " debug URL scheme is not implemented on Windows yet.")
namespace tensorflow {
namespace {
constexpr absl::string_view kDumpSubDirName = "node-io-dump";
Event PrepareChunkEventProto(const DebugNodeKey& debug_node_key,
const uint64 wall_time_us, const size_t num_chunks,
const size_t chunk_index,
const DataType& tensor_dtype,
const TensorShapeProto& tensor_shape) {
Event event;
event.set_wall_time(static_cast<double>(wall_time_us));
Summary::Value* value = event.mutable_summary()->add_value();
value->set_node_name(debug_node_key.debug_node_name);
value->set_tag(debug_node_key.node_name);
third_party::tensorflow::core::debug::DebuggerEventMetadata metadata;
metadata.set_device(debug_node_key.device_name);
metadata.set_output_slot(debug_node_key.output_slot);
metadata.set_num_chunks(num_chunks);
metadata.set_chunk_index(chunk_index);
string json_output;
tensorflow::protobuf::util::JsonPrintOptions json_options;
json_options.always_print_primitive_fields = true;
auto status = tensorflow::protobuf::util::MessageToJsonString(
metadata, &json_output, json_options);
if (status.ok()) {
SummaryMetadata::PluginData* plugin_data =
value->mutable_metadata()->mutable_plugin_data();
plugin_data->set_plugin_name(DebugIO::kDebuggerPluginName);
plugin_data->set_content(json_output);
} else {
LOG(WARNING) << "Failed to convert DebuggerEventMetadata proto to JSON. "
<< "The debug_node_name is " << debug_node_key.debug_node_name
<< ".";
}
value->mutable_tensor()->set_dtype(tensor_dtype);
*value->mutable_tensor()->mutable_tensor_shape() = tensor_shape;
return event;
}
const size_t StringValMaxBytesInProto(const string& str) {
#if defined(PLATFORM_GOOGLE)
return str.size() + DebugGrpcIO::kGrpcMaxVarintLengthSize;
#else
return str.size();
#endif
}
Status WrapStringTensorAsEvents(const DebugNodeKey& debug_node_key,
const uint64 wall_time_us,
const size_t chunk_size_limit,
TensorProto* tensor_proto,
std::vector<Event>* events) {
const protobuf::RepeatedPtrField<string>& strs = tensor_proto->string_val();
const size_t num_strs = strs.size();
const size_t chunk_size_ub = chunk_size_limit > 0
? chunk_size_limit
: std::numeric_limits<size_t>::max();
std::vector<size_t> cutoffs;
size_t chunk_size = 0;
for (size_t i = 0; i < num_strs; ++i) {
if (StringValMaxBytesInProto(strs[i]) > chunk_size_ub) {
return errors::FailedPrecondition(
"string value at index ", i, " from debug node ",
debug_node_key.debug_node_name,
" does not fit gRPC message size limit (", chunk_size_ub, ")");
}
if (chunk_size + StringValMaxBytesInProto(strs[i]) > chunk_size_ub) {
cutoffs.push_back(i);
chunk_size = 0;
}
chunk_size += StringValMaxBytesInProto(strs[i]);
}
cutoffs.push_back(num_strs);
const size_t num_chunks = cutoffs.size();
for (size_t i = 0; i < num_chunks; ++i) {
Event event = PrepareChunkEventProto(debug_node_key, wall_time_us,
num_chunks, i, tensor_proto->dtype(),
tensor_proto->tensor_shape());
Summary::Value* value = event.mutable_summary()->mutable_value(0);
if (cutoffs.size() == 1) {
value->mutable_tensor()->mutable_string_val()->Swap(
tensor_proto->mutable_string_val());
} else {
const size_t begin = (i == 0) ? 0 : cutoffs[i - 1];
const size_t end = cutoffs[i];
for (size_t j = begin; j < end; ++j) {
value->mutable_tensor()->add_string_val(strs[j]);
}
}
events->push_back(std::move(event));
}
return absl::OkStatus();
}
Status WrapTensorAsEvents(const DebugNodeKey& debug_node_key,
const Tensor& tensor, const uint64 wall_time_us,
const size_t chunk_size_limit,
std::vector<Event>* events) {
TensorProto tensor_proto;
if (tensor.dtype() == DT_STRING) {
tensor.AsProtoField(&tensor_proto);
TF_RETURN_IF_ERROR(WrapStringTensorAsEvents(
debug_node_key, wall_time_us, chunk_size_limit, &tensor_proto, events));
} else {
tensor.AsProtoTensorContent(&tensor_proto);
const size_t total_length = tensor_proto.tensor_content().size();
const size_t chunk_size_ub =
chunk_size_limit > 0 ? chunk_size_limit : total_length;
const size_t num_chunks =
(total_length == 0)
? 1
: (total_length + chunk_size_ub - 1) / chunk_size_ub;
for (size_t i = 0; i < num_chunks; ++i) {
const size_t pos = i * chunk_size_ub;
const size_t len =
(i == num_chunks - 1) ? (total_length - pos) : chunk_size_ub;
Event event = PrepareChunkEventProto(debug_node_key, wall_time_us,
num_chunks, i, tensor_proto.dtype(),
tensor_proto.tensor_shape());
event.mutable_summary()
->mutable_value(0)
->mutable_tensor()
->set_tensor_content(tensor_proto.tensor_content().substr(pos, len));
events->push_back(std::move(event));
}
}
return absl::OkStatus();
}
string AppendTimestampToFilePath(const string& in, const uint64 timestamp) {
string out = strings::StrCat(in, "_", timestamp);
uint64 i = 1;
while (Env::Default()->FileExists(out).ok()) {
out = strings::StrCat(in, "_", timestamp, "-", i);
++i;
}
return out;
}
#ifndef PLATFORM_WINDOWS
Status PublishEncodedGraphDefInChunks(const string& encoded_graph_def,
const string& device_name,
const int64_t wall_time,
const string& debug_url) {
const uint64 hash = ::tensorflow::Hash64(encoded_graph_def);
const size_t total_length = encoded_graph_def.size();
const size_t num_chunks =
static_cast<size_t>(std::ceil(static_cast<float>(total_length) /
DebugGrpcIO::kGrpcMessageSizeLimitBytes));
for (size_t i = 0; i < num_chunks; ++i) {
const size_t pos = i * DebugGrpcIO::kGrpcMessageSizeLimitBytes;
const size_t len = (i == num_chunks - 1)
? (total_length - pos)
: DebugGrpcIO::kGrpcMessageSizeLimitBytes;
Event event;
event.set_wall_time(static_cast<double>(wall_time));
event.set_graph_def(strings::StrCat(hash, ",", device_name, ",", wall_time,
"|", i, "|", num_chunks, "|",
encoded_graph_def.substr(pos, len)));
const Status s = DebugGrpcIO::SendEventProtoThroughGrpcStream(
event, debug_url, num_chunks - 1 == i);
if (!s.ok()) {
return errors::FailedPrecondition(
"Failed to send chunk ", i, " of ", num_chunks,
" of encoded GraphDef of size ", encoded_graph_def.size(), " bytes, ",
"due to: ", s.message());
}
}
return absl::OkStatus();
}
#endif
}
const char* const DebugIO::kDebuggerPluginName = "debugger";
const char* const DebugIO::kCoreMetadataTag = "core_metadata_";
const char* const DebugIO::kGraphTag = "graph_";
const char* const DebugIO::kHashTag = "hash";
Status ReadEventFromFile(const string& dump_file_path, Event* event) {
Env* env(Env::Default());
string content;
uint64 file_size = 0;
Status s = env->GetFileSize(dump_file_path, &file_size);
if (!s.ok()) {
return s;
}
content.resize(file_size);
std::unique_ptr<RandomAccessFile> file;
s = env->NewRandomAccessFile(dump_file_path, &file);
if (!s.ok()) {
return s;
}
StringPiece result;
s = file->Read(0, file_size, &result, &(content)[0]);
if (!s.ok()) {
return s;
}
event->ParseFromString(content);
return absl::OkStatus();
}
const char* const DebugIO::kFileURLScheme = "file:
const char* const DebugIO::kGrpcURLScheme = "grpc:
const char* const DebugIO::kMemoryURLScheme = "memcbk:
Status DebugIO::PublishDebugMetadata(
const int64_t global_step, const int64_t session_run_index,
const int64_t executor_step_index, const std::vector<string>& input_names,
const std::vector<string>& output_names,
const std::vector<string>& target_nodes,
const std::unordered_set<string>& debug_urls) {
std::ostringstream oss;
oss << "{";
oss << "\"global_step\":" << global_step << ",";
oss << "\"session_run_index\":" << session_run_index << ",";
oss << "\"executor_step_index\":" << executor_step_index << ",";
oss << "\"input_names\":[";
for (size_t i = 0; i < input_names.size(); ++i) {
oss << "\"" << input_names[i] << "\"";
if (i < input_names.size() - 1) {
oss << ",";
}
}
oss << "],";
oss << "\"output_names\":[";
for (size_t i = 0; i < output_names.size(); ++i) {
oss << "\"" << output_names[i] << "\"";
if (i < output_names.size() - 1) {
oss << ",";
}
}
oss << "],";
oss << "\"target_nodes\":[";
for (size_t i = 0; i < target_nodes.size(); ++i) {
oss << "\"" << target_nodes[i] << "\"";
if (i < target_nodes.size() - 1) {
oss << ",";
}
}
oss << "]";
oss << "}";
const string json_metadata = oss.str();
Event event;
event.set_wall_time(static_cast<double>(Env::Default()->NowMicros()));
LogMessage* log_message = event.mutable_log_message();
log_message->set_message(json_metadata);
Status status;
for (const string& url : debug_urls) {
if (absl::StartsWith(absl::AsciiStrToLower(url), kGrpcURLScheme)) {
#ifndef PLATFORM_WINDOWS
Event grpc_event;
const string address = url.substr(strlen(DebugIO::kFileURLScheme));
const string path = address.find('/') == string::npos
? ""
: address.substr(address.find('/'));
grpc_event.set_wall_time(event.wall_time());
LogMessage* log_message_grpc = grpc_event.mutable_log_message();
log_message_grpc->set_message(
strings::StrCat(json_metadata.substr(0, json_metadata.size() - 1),
",\"grpc_path\":\"", path, "\"}"));
status.Update(
DebugGrpcIO::SendEventProtoThroughGrpcStream(grpc_event, url, true));
#else
GRPC_OSS_WINDOWS_UNIMPLEMENTED_ERROR;
#endif
} else if (absl::StartsWith(absl::AsciiStrToLower(url), kFileURLSchem | #include <cstdlib>
#include <unordered_set>
#include "tensorflow/core/debug/debug_io_utils.h"
#include "tensorflow/core/debug/debug_callback_registry.h"
#include "tensorflow/core/debug/debug_node_key.h"
#include "tensorflow/core/debug/debugger_event_metadata.pb.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/util/event.pb.h"
namespace tensorflow {
namespace {
class DebugIOUtilsTest : public ::testing::Test {
public:
void Initialize() {
env_ = Env::Default();
tensor_a_.reset(new Tensor(DT_FLOAT, TensorShape({2, 2})));
tensor_a_->flat<float>()(0) = 5.0;
tensor_a_->flat<float>()(1) = 3.0;
tensor_a_->flat<float>()(2) = -1.0;
tensor_a_->flat<float>()(3) = 0.0;
tensor_b_.reset(new Tensor(DT_STRING, TensorShape{2}));
tensor_b_->flat<tstring>()(0) = "corge";
tensor_b_->flat<tstring>()(1) = "garply";
}
Env* env_;
std::unique_ptr<Tensor> tensor_a_;
std::unique_ptr<Tensor> tensor_b_;
};
TEST_F(DebugIOUtilsTest, ConstructDebugNodeKey) {
DebugNodeKey debug_node_key("/job:worker/replica:1/task:0/device:GPU:2",
"hidden_1/MatMul", 0, "DebugIdentity");
EXPECT_EQ("/job:worker/replica:1/task:0/device:GPU:2",
debug_node_key.device_name);
EXPECT_EQ("hidden_1/MatMul", debug_node_key.node_name);
EXPECT_EQ(0, debug_node_key.output_slot);
EXPECT_EQ("DebugIdentity", debug_node_key.debug_op);
EXPECT_EQ("hidden_1/MatMul:0:DebugIdentity", debug_node_key.debug_node_name);
EXPECT_EQ("_tfdbg_device_,job_worker,replica_1,task_0,device_GPU_2",
debug_node_key.device_path);
}
TEST_F(DebugIOUtilsTest, EqualityOfDebugNodeKeys) {
const DebugNodeKey debug_node_key_1("/job:worker/replica:1/task:0/gpu:2",
"hidden_1/MatMul", 0, "DebugIdentity");
const DebugNodeKey debug_node_key_2("/job:worker/replica:1/task:0/gpu:2",
"hidden_1/MatMul", 0, "DebugIdentity");
const DebugNodeKey debug_node_key_3("/job:worker/replica:1/task:0/gpu:2",
"hidden_1/BiasAdd", 0, "DebugIdentity");
const DebugNodeKey debug_node_key_4("/job:worker/replica:1/task:0/gpu:2",
"hidden_1/MatMul", 0,
"DebugNumericSummary");
EXPECT_EQ(debug_node_key_1, debug_node_key_2);
EXPECT_NE(debug_node_key_1, debug_node_key_3);
EXPECT_NE(debug_node_key_1, debug_node_key_4);
EXPECT_NE(debug_node_key_3, debug_node_key_4);
}
TEST_F(DebugIOUtilsTest, DebugNodeKeysIsHashable) {
const DebugNodeKey debug_node_key_1("/job:worker/replica:1/task:0/gpu:2",
"hidden_1/MatMul", 0, "DebugIdentity");
const DebugNodeKey debug_node_key_2("/job:worker/replica:1/task:0/gpu:2",
"hidden_1/MatMul", 0, "DebugIdentity");
const DebugNodeKey debug_node_key_3("/job:worker/replica:1/task:0/gpu:2",
"hidden_1/BiasAdd", 0, "DebugIdentity");
std::unordered_set<DebugNodeKey> keys;
keys.insert(debug_node_key_1);
ASSERT_EQ(1, keys.size());
keys.insert(debug_node_key_3);
ASSERT_EQ(2, keys.size());
keys.erase(debug_node_key_2);
ASSERT_EQ(1, keys.size());
}
TEST_F(DebugIOUtilsTest, DumpFloatTensorToFileSunnyDay) {
Initialize();
const string test_dir =
strings::StrCat(testing::TmpDir(), "/DumpFloatTensorToFileSunnyDay");
if (!env_->FileExists(test_dir).ok()) {
ASSERT_TRUE(env_->RecursivelyCreateDir(test_dir).ok());
}
const uint64 wall_time = env_->NowMicros();
const DebugNodeKey kDebugNodeKey("/job:localhost/replica:0/task:0/cpu:0",
"foo/bar/qux/tensor_a", 0, "DebugIdentity");
string dump_file_path;
TF_ASSERT_OK(DebugFileIO::DumpTensorToDir(
kDebugNodeKey, *tensor_a_, wall_time, test_dir, &dump_file_path));
Event event;
TF_ASSERT_OK(ReadEventFromFile(dump_file_path, &event));
ASSERT_GE(wall_time, event.wall_time());
ASSERT_EQ(1, event.summary().value().size());
ASSERT_EQ(kDebugNodeKey.debug_node_name,
event.summary().value(0).node_name());
Tensor a_prime(DT_FLOAT);
ASSERT_TRUE(a_prime.FromProto(event.summary().value(0).tensor()));
ASSERT_EQ(tensor_a_->shape(), a_prime.shape());
for (int i = 0; i < a_prime.flat<float>().size(); ++i) {
ASSERT_EQ(tensor_a_->flat<float>()(i), a_prime.flat<float>()(i));
}
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
ASSERT_TRUE(
env_->DeleteRecursively(test_dir, &undeleted_files, &undeleted_dirs)
.ok());
ASSERT_EQ(0, undeleted_files);
ASSERT_EQ(0, undeleted_dirs);
}
TEST_F(DebugIOUtilsTest, DumpStringTensorToFileSunnyDay) {
Initialize();
const string test_dir =
strings::StrCat(testing::TmpDir(), "/DumpStringTensorToFileSunnyDay");
if (!env_->FileExists(test_dir).ok()) {
ASSERT_TRUE(env_->RecursivelyCreateDir(test_dir).ok());
}
const DebugNodeKey kDebugNodeKey("/job:localhost/replica:0/task:0/cpu:0",
"quux/grault/tensor_b", 1, "DebugIdentity");
const uint64 wall_time = env_->NowMicros();
string dump_file_name;
Status s = DebugFileIO::DumpTensorToDir(kDebugNodeKey, *tensor_b_, wall_time,
test_dir, &dump_file_name);
ASSERT_TRUE(s.ok());
Event event;
TF_ASSERT_OK(ReadEventFromFile(dump_file_name, &event));
ASSERT_GE(wall_time, event.wall_time());
ASSERT_EQ(1, event.summary().value().size());
ASSERT_EQ(kDebugNodeKey.node_name, event.summary().value(0).tag());
ASSERT_EQ(kDebugNodeKey.debug_node_name,
event.summary().value(0).node_name());
third_party::tensorflow::core::debug::DebuggerEventMetadata metadata;
auto status = tensorflow::protobuf::util::JsonStringToMessage(
event.summary().value(0).metadata().plugin_data().content(), &metadata);
ASSERT_TRUE(status.ok());
ASSERT_EQ(kDebugNodeKey.device_name, metadata.device());
ASSERT_EQ(kDebugNodeKey.output_slot, metadata.output_slot());
Tensor b_prime(DT_STRING);
ASSERT_TRUE(b_prime.FromProto(event.summary().value(0).tensor()));
ASSERT_EQ(tensor_b_->shape(), b_prime.shape());
for (int i = 0; i < b_prime.flat<tstring>().size(); ++i) {
ASSERT_EQ(tensor_b_->flat<tstring>()(i), b_prime.flat<tstring>()(i));
}
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
ASSERT_TRUE(
env_->DeleteRecursively(test_dir, &undeleted_files, &undeleted_dirs)
.ok());
ASSERT_EQ(0, undeleted_files);
ASSERT_EQ(0, undeleted_dirs);
}
TEST_F(DebugIOUtilsTest, DumpTensorToFileCannotCreateDirectory) {
Initialize();
const string test_dir = strings::StrCat(
testing::TmpDir(), "/DumpTensorToFileCannotCreateDirectory");
if (!env_->FileExists(test_dir).ok()) {
ASSERT_TRUE(env_->RecursivelyCreateDir(test_dir).ok());
}
const string kDeviceName = "/job:localhost/replica:0/task:0/cpu:0";
const DebugNodeKey kDebugNodeKey(kDeviceName, "baz/tensor_a", 0,
"DebugIdentity");
const string txt_file_dir =
io::JoinPath(test_dir, DebugNodeKey::DeviceNameToDevicePath(kDeviceName));
const string txt_file_name = io::JoinPath(txt_file_dir, "baz");
if (!env_->FileExists(txt_file_dir).ok()) {
ASSERT_TRUE(env_->RecursivelyCreateDir(txt_file_dir).ok());
}
ASSERT_EQ(error::Code::NOT_FOUND, env_->FileExists(txt_file_name).code());
std::unique_ptr<WritableFile> file;
ASSERT_TRUE(env_->NewWritableFile(txt_file_name, &file).ok());
TF_EXPECT_OK(file->Append("text in baz"));
TF_EXPECT_OK(file->Flush());
TF_ASSERT_OK(file->Close());
ASSERT_TRUE(env_->FileExists(txt_file_name).ok());
ASSERT_FALSE(env_->IsDirectory(txt_file_name).ok());
const uint64 wall_time = env_->NowMicros();
string dump_file_name;
Status s = DebugFileIO::DumpTensorToDir(kDebugNodeKey, *tensor_a_, wall_time,
test_dir, &dump_file_name);
ASSERT_FALSE(s.ok());
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
ASSERT_TRUE(
env_->DeleteRecursively(test_dir, &undeleted_files, &undeleted_dirs)
.ok());
ASSERT_EQ(0, undeleted_files);
ASSERT_EQ(0, undeleted_dirs);
}
TEST_F(DebugIOUtilsTest, PublishTensorToMultipleFileURLs) {
Initialize();
const int kNumDumpRoots = 3;
const DebugNodeKey kDebugNodeKey("/job:localhost/replica:0/task:0/cpu:0",
"foo/bar/qux/tensor_a", 0, "DebugIdentity");
const uint64 wall_time = env_->NowMicros();
std::vector<string> dump_roots;
std::vector<string> dump_file_paths;
std::vector<string> urls;
for (int i = 0; i < kNumDumpRoots; ++i) {
string dump_root = strings::StrCat(testing::TmpDir(),
"/PublicTensorToMultipleFileUrls_", i);
dump_roots.push_back(dump_root);
dump_file_paths.push_back(
DebugFileIO::GetDumpFilePath(dump_root, kDebugNodeKey, wall_time));
urls.push_back(strings::StrCat("file:
}
for (int i = 1; i < kNumDumpRoots; ++i) {
ASSERT_NE(dump_roots[0], dump_roots[i]);
}
Status s =
DebugIO::PublishDebugTensor(kDebugNodeKey, *tensor_a_, wall_time, urls);
ASSERT_TRUE(s.ok());
for (int i = 0; i < kNumDumpRoots; ++i) {
Event event;
TF_ASSERT_OK(ReadEventFromFile(dump_file_paths[i], &event));
ASSERT_GE(wall_time, event.wall_time());
ASSERT_EQ(1, event.summary().value().size());
ASSERT_EQ(kDebugNodeKey.node_name, event.summary().value(0).tag());
ASSERT_EQ(kDebugNodeKey.debug_node_name,
event.summary().value(0).node_name());
third_party::tensorflow::core::debug::DebuggerEventMetadata metadata;
auto status = tensorflow::protobuf::util::JsonStringToMessage(
event.summary().value(0).metadata().plugin_data().content(), &metadata);
ASSERT_TRUE(status.ok());
ASSERT_EQ(kDebugNodeKey.device_name, metadata.device());
ASSERT_EQ(kDebugNodeKey.output_slot, metadata.output_slot());
Tensor a_prime(DT_FLOAT);
ASSERT_TRUE(a_prime.FromProto(event.summary().value(0).tensor()));
ASSERT_EQ(tensor_a_->shape(), a_prime.shape());
for (int i = 0; i < a_prime.flat<float>().size(); ++i) {
ASSERT_EQ(tensor_a_->flat<float>()(i), a_prime.flat<float>()(i));
}
}
for (int i = 0; i < kNumDumpRoots; ++i) {
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
ASSERT_TRUE(env_->DeleteRecursively(dump_roots[i], &undeleted_files,
&undeleted_dirs)
.ok());
ASSERT_EQ(0, undeleted_files);
ASSERT_EQ(0, undeleted_dirs);
}
}
TEST_F(DebugIOUtilsTest, PublishTensorToMemoryCallback) {
Initialize();
const DebugNodeKey kDebugNodeKey("/job:localhost/replica:0/task:0/cpu:0",
"foo/bar/qux/tensor_a", 0, "DebugIdentity");
const uint64 wall_time = env_->NowMicros();
bool called = false;
std::vector<string> urls = {"memcbk:
;
auto* callback_registry = DebugCallbackRegistry::singleton();
callback_registry->RegisterCallback(
"test_callback", [this, &kDebugNodeKey, &called](const DebugNodeKey& key,
const Tensor& tensor) {
called = true;
ASSERT_EQ(kDebugNodeKey.device_name, key.device_name);
ASSERT_EQ(kDebugNodeKey.node_name, key.node_name);
ASSERT_EQ(tensor_a_->shape(), tensor.shape());
for (int i = 0; i < tensor.flat<float>().size(); ++i) {
ASSERT_EQ(tensor_a_->flat<float>()(i), tensor.flat<float>()(i));
}
});
Status s =
DebugIO::PublishDebugTensor(kDebugNodeKey, *tensor_a_, wall_time, urls);
ASSERT_TRUE(s.ok());
ASSERT_TRUE(called);
callback_registry->UnregisterCallback("test_callback");
}
TEST_F(DebugIOUtilsTest, PublishTensorConcurrentlyToPartiallyOverlappingPaths) {
Initialize();
const int kConcurrentPubs = 3;
const DebugNodeKey kDebugNodeKey("/job:localhost/replica:0/task:0/cpu:0",
"tensor_a", 0, "DebugIdentity");
thread::ThreadPool* tp =
new thread::ThreadPool(Env::Default(), "test", kConcurrentPubs);
const uint64 wall_time = env_->NowMicros();
const string dump_root_base =
strings::StrCat(testing::TmpDir(),
"/PublishTensorConcurrentlyToPartiallyOverlappingPaths");
if (!env_->FileExists(dump_root_base).ok()) {
ASSERT_TRUE(env_->RecursivelyCreateDir(dump_root_base).ok());
}
mutex mu;
std::vector<string> dump_roots TF_GUARDED_BY(mu);
std::vector<string> dump_file_paths TF_GUARDED_BY(mu);
int dump_count TF_GUARDED_BY(mu) = 0;
int done_count TF_GUARDED_BY(mu) = 0;
Notification all_done;
auto fn = [this, &dump_count, &done_count, &mu, &dump_root_base, &dump_roots,
&dump_file_paths, &wall_time, &kDebugNodeKey, &kConcurrentPubs,
&all_done]() {
string dump_root;
string debug_url;
{
mutex_lock l(mu);
dump_root =
strings::StrCat(dump_root_base, "grumpy/", "dump_", dump_count++);
dump_roots.push_back(dump_root);
dump_file_paths.push_back(
DebugFileIO::GetDumpFilePath(dump_root, kDebugNodeKey, wall_time));
debug_url = strings::StrCat("file:
}
std::vector<string> urls;
urls.push_back(debug_url);
Status s =
DebugIO::PublishDebugTensor(kDebugNodeKey, *tensor_a_, wall_time, urls);
ASSERT_TRUE(s.ok());
{
mutex_lock l(mu);
done_count++;
if (done_count == kConcurrentPubs) {
all_done.Notify();
}
}
};
for (int i = 0; i < kConcurrentPubs; ++i) {
tp->Schedule(fn);
}
all_done.WaitForNotification();
delete tp;
{
mutex_lock l(mu);
for (int i = 1; i < kConcurrentPubs; ++i) {
ASSERT_NE(dump_roots[0], dump_roots[i]);
}
for (int i = 0; i < kConcurrentPubs; ++i) {
Event event;
TF_ASSERT_OK(ReadEventFromFile(dump_file_paths[i], &event));
ASSERT_GE(wall_time, event.wall_time());
ASSERT_EQ(1, event.summary().value().size());
ASSERT_EQ(kDebugNodeKey.node_name, event.summary().value(0).tag());
ASSERT_EQ(kDebugNodeKey.debug_node_name,
event.summary().value(0).node_name());
third_party::tensorflow::core::debug::DebuggerEventMetadata metadata;
auto status = tensorflow::protobuf::util::JsonStringToMessage(
event.summary().value(0).metadata().plugin_data().content(),
&metadata);
ASSERT_TRUE(status.ok());
ASSERT_EQ(kDebugNodeKey.device_name, metadata.device());
ASSERT_EQ(kDebugNodeKey.output_slot, metadata.output_slot());
Tensor a_prime(DT_FLOAT);
ASSERT_TRUE(a_prime.FromProto(event.summary().value(0).tensor()));
ASSERT_EQ(tensor_a_->shape(), a_prime.shape());
for (int i = 0; i < a_prime.flat<float>().size(); ++i) {
ASSERT_EQ(tensor_a_->flat<float>()(i), a_prime.flat<float>()(i));
}
}
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
auto delete_files = env_->DeleteRecursively(
dump_root_base, &undeleted_files, &undeleted_dirs);
ASSERT_TRUE(delete_files.ok()) << delete_files;
ASSERT_EQ(0, undeleted_files);
ASSERT_EQ(0, undeleted_dirs);
}
}
class DiskUsageLimitTest : public ::testing::Test {
public:
void Initialize() {
setenv("TFDBG_DISK_BYTES_LIMIT", "", 1);
DebugFileIO::resetDiskByteUsage();
DebugFileIO::global_disk_bytes_limit_ = 0;
}
};
TEST_F(DiskUsageLimitTest, RequestWithZeroByteIsOkay) {
Initialize();
ASSERT_TRUE(DebugFileIO::requestDiskByteUsage(0L));
}
TEST_F(DiskUsageLimitTest, ExceedingLimitAfterOneCall) {
Initialize();
ASSERT_FALSE(DebugFileIO::requestDiskByteUsage(100L * 1024L * 1024L * 1024L));
}
TEST_F(DiskUsageLimitTest, ExceedingLimitAfterTwoCalls) {
Initialize();
ASSERT_TRUE(DebugFileIO::requestDiskByteUsage(50L * 1024L * 1024L * 1024L));
ASSERT_FALSE(DebugFileIO::requestDiskByteUsage(50L * 1024L * 1024L * 1024L));
ASSERT_TRUE(DebugFileIO::requestDiskByteUsage(1024L));
}
TEST_F(DiskUsageLimitTest, ResetDiskByteUsageWorks) {
Initialize();
ASSERT_TRUE(DebugFileIO::requestDiskByteUsage(50L * 1024L * 1024L * 1024L));
ASSERT_FALSE(DebugFileIO::requestDiskByteUsage(50L * 1024L * 1024L * 1024L));
DebugFileIO::resetDiskByteUsage();
ASSERT_TRUE(DebugFileIO::requestDiskByteUsage(50L * 1024L * 1024L * 1024L));
}
TEST_F(DiskUsageLimitTest, CustomEnvVarIsObeyed) {
Initialize();
setenv("TFDBG_DISK_BYTES_LIMIT", "1024", 1);
ASSERT_FALSE(DebugFileIO::requestDiskByteUsage(1024L));
ASSERT_TRUE(DebugFileIO::requestDiskByteUsage(1000L));
ASSERT_TRUE(DebugFileIO::requestDiskByteUsage(23L));
ASSERT_FALSE(DebugFileIO::requestDiskByteUsage(1L));
DebugFileIO::resetDiskByteUsage();
ASSERT_TRUE(DebugFileIO::requestDiskByteUsage(1023L));
}
}
} |
1,445 | cpp | tensorflow/tensorflow | debug_graph_utils | tensorflow/core/debug/debug_graph_utils.cc | tensorflow/core/debug/debug_graph_utils_test.cc | #ifndef TENSORFLOW_CORE_DEBUG_DEBUG_GRAPH_UTILS_H_
#define TENSORFLOW_CORE_DEBUG_DEBUG_GRAPH_UTILS_H_
#include <unordered_map>
#include <vector>
#include "tensorflow/core/common_runtime/debugger_state_interface.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/protobuf/debug.pb.h"
namespace tensorflow {
class DebugNodeInserter {
public:
static Status InsertNodes(
const protobuf::RepeatedPtrField<DebugTensorWatch>& watches, Graph* graph,
Device* device);
static void DeparallelizeWhileLoops(Graph* graph, Device* device);
static const string GetCopyNodeName(const string& node_name,
const int output_slot);
static const string GetDebugNodeName(const string& tensor_name,
const int debug_op_num,
const string& debug_op_name);
private:
static Status CreateCopyNode(
Graph* graph, const DeviceType device_type, const bool is_host_memory,
const string& src_node_name, const int src_output, const DataType src_dt,
const string& tensor_name, const std::vector<string>& debug_ops,
const std::vector<string>& debug_urls, Node** copy_node);
static Status ParseDebugOpName(
const string& debug_op_name, string* debug_op_name_proper,
std::unordered_map<string, string>* attributes);
static Status SetDebugNodeAttributes(
Node* debug_node, const std::unordered_map<string, string>& attributes);
static Status CreateDebugNode(Graph* graph, const Device& device,
const string& src_copy_node_name,
const DataType src_dt,
const string& tensor_name,
const std::vector<string>& debug_urls,
const int debug_op_num,
const string& debug_op_name, Node** debug_node);
friend class DebugGraphUtilsTest;
};
}
#endif
#include "tensorflow/core/debug/debug_graph_utils.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/common_runtime/memory_types.h"
#include "tensorflow/core/framework/kernel_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/protobuf/debug.pb.h"
namespace tensorflow {
namespace {
Status ParseBoolString(const string& bool_str, bool* bool_val) {
const string lower_bool_str = absl::AsciiStrToLower(bool_str);
if (lower_bool_str == "false" || lower_bool_str == "f" ||
lower_bool_str == "0") {
*bool_val = false;
} else if (lower_bool_str == "true" || lower_bool_str == "t" ||
lower_bool_str == "1") {
*bool_val = true;
} else {
return absl::InvalidArgumentError(
absl::StrCat("Invalid string for bool value: ", bool_str));
}
return absl::OkStatus();
}
}
Status DebugNodeInserter::InsertNodes(
const protobuf::RepeatedPtrField<DebugTensorWatch>& watches, Graph* graph,
Device* device) {
if (watches.empty()) {
return absl::OkStatus();
}
std::vector<string> default_debug_ops;
std::vector<string> default_debug_urls;
std::unordered_map<string, std::vector<string>> tensor_watches;
std::unordered_map<string, std::vector<string>> tensor_watch_urls;
std::unordered_map<string, bool> tensor_tolerate_failures;
for (const DebugTensorWatch& watch : watches) {
if (watch.debug_ops().empty()) {
continue;
}
if (watch.debug_urls().empty()) {
continue;
}
if (watch.node_name() == "*") {
if (watch.output_slot() == -1) {
default_debug_ops.insert(default_debug_ops.end(),
watch.debug_ops().begin(),
watch.debug_ops().end());
default_debug_urls.insert(default_debug_urls.end(),
watch.debug_urls().begin(),
watch.debug_urls().end());
} else {
return Status(absl::StatusCode::kFailedPrecondition,
strings::StrCat(
"output_slot is expected to be -1 for wildcard ",
"node name (\"*\"), but got ", watch.output_slot()));
}
continue;
} else {
if (watch.output_slot() < 0) {
return Status(
absl::StatusCode::kFailedPrecondition,
strings::StrCat("A negative output_slot in DebugTensorWatch is ",
"valid only for the wildcard node name (\"*\"), ",
"but got node name ", watch.node_name()));
}
}
string tensor_name =
strings::StrCat(watch.node_name(), ":", watch.output_slot());
std::vector<string> debug_ops;
for (const string& debug_op : watch.debug_ops()) {
debug_ops.push_back(debug_op);
}
tensor_watches[tensor_name] = debug_ops;
tensor_tolerate_failures[tensor_name] =
watch.tolerate_debug_op_creation_failures();
std::vector<string> urls;
for (const string& url : watch.debug_urls()) {
urls.push_back(url);
}
tensor_watch_urls[tensor_name] = urls;
}
if (tensor_watches.empty()) {
return absl::OkStatus();
}
DeviceType device_type = DeviceType{device->device_type()};
std::vector<const Edge*> edges_to_remove;
for (Node* src_node : graph->nodes()) {
std::unordered_map<int, std::vector<const Edge*>> output_slot_to_edges;
for (const Edge* edge : src_node->out_edges()) {
const int src_output = edge->src_output();
if (output_slot_to_edges.find(src_output) == output_slot_to_edges.end()) {
output_slot_to_edges[src_output] = {edge};
} else {
output_slot_to_edges[src_output].push_back(edge);
}
}
for (int src_output_slot = 0; src_output_slot < src_node->num_outputs();
++src_output_slot) {
const string tensor_name =
strings::StrCat(src_node->name(), ":", src_output_slot);
const bool explicit_tensor_match =
tensor_watches.find(tensor_name) != tensor_watches.end();
if (!explicit_tensor_match && default_debug_ops.empty()) {
continue;
}
const DataType src_dt = src_node->output_type(src_output_slot);
MemoryType memory_type;
TF_RETURN_IF_ERROR(MemoryTypeForOutput(device_type, graph, src_node,
src_output_slot, &memory_type));
const std::vector<string> debug_ops = explicit_tensor_match
? tensor_watches[tensor_name]
: default_debug_ops;
const std::vector<string> debug_urls =
explicit_tensor_match ? tensor_watch_urls[tensor_name]
: default_debug_urls;
Node* copy_node;
Status copy_s =
CreateCopyNode(graph, device_type, memory_type == HOST_MEMORY,
src_node->name(), src_output_slot, src_dt, tensor_name,
debug_ops, debug_urls, ©_node);
if (!copy_s.ok()) {
return Status(
absl::StatusCode::kFailedPrecondition,
strings::StrCat("Failed to create Copy/CopyHost node for tensor ",
tensor_name, ", due to: ", copy_s.message()));
}
graph->AddEdge(src_node, src_output_slot, copy_node, 0);
std::vector<Node*> debug_nodes;
for (size_t i = 0; i < debug_ops.size(); ++i) {
const string& debug_op_name = debug_ops[i];
Node* debug_node;
Status debug_s = CreateDebugNode(graph, *device, copy_node->name(),
src_dt, tensor_name, debug_urls, i,
debug_op_name, &debug_node);
if (debug_s.ok()) {
graph->AddEdge(copy_node, 0, debug_node, 0);
debug_nodes.push_back(debug_node);
} else {
if (tensor_tolerate_failures[tensor_name]) {
LOG(INFO) << "Tolerating failure to create debug node: "
<< "tensor name = " << tensor_name << "; "
<< "debug op name = " << debug_op_name;
} else {
return Status(
absl::StatusCode::kFailedPrecondition,
strings::StrCat("Failed to create debug node ", debug_op_name,
" for tensor ", tensor_name,
", due to: ", debug_s.message()));
}
}
}
const bool is_ref = IsRefType(src_node->output_type(src_output_slot));
for (const Edge* edge : output_slot_to_edges[src_output_slot]) {
if (!is_ref) {
edges_to_remove.push_back(edge);
graph->AddEdge(copy_node, 0, edge->dst(), edge->dst_input());
}
for (Node* debug_node : debug_nodes) {
if (!src_node->IsEnter() && !src_node->IsNextIteration()) {
graph->AddEdge(debug_node, Graph::kControlSlot, edge->dst(),
Graph::kControlSlot);
}
}
}
}
}
for (const Edge* edge : edges_to_remove) {
graph->RemoveEdge(edge);
}
return absl::OkStatus();
}
void DebugNodeInserter::DeparallelizeWhileLoops(Graph* graph, Device* device) {
bool deparallelized_a_loop = false;
for (Node* node : graph->nodes()) {
if (node->IsEnter()) {
const AttrValue* parallel_iterations =
node->attrs().Find("parallel_iterations");
if (parallel_iterations && parallel_iterations->i() > 1) {
deparallelized_a_loop = true;
VLOG(1) << "Changing the parallel_iterations attribute of the "
<< "Enter/RefEnter node \"" << node->name() << "\" on device \""
<< device->name() << "\" from " << parallel_iterations->i()
<< " to 1.";
node->AddAttr<int64_t>("parallel_iterations", 1);
}
}
}
if (deparallelized_a_loop) {
LOG(INFO) << "For debugging, tfdbg has set the parallel_iterations "
<< "attribute of all scheduled Enter/RefEnter nodes to 1. (This "
<< "does not affect subsequent non-debug runs.)";
}
}
const string DebugNodeInserter::GetCopyNodeName(const string& node_name,
const int output_slot) {
return strings::StrCat("__copy_", node_name, "_", output_slot);
}
const string DebugNodeInserter::GetDebugNodeName(const string& tensor_name,
const int debug_op_num,
const string& debug_op_name) {
return strings::StrCat("__dbg_", tensor_name, "_", debug_op_num, "_",
debug_op_name);
}
Status DebugNodeInserter::CreateCopyNode(
Graph* graph, const DeviceType device_type, const bool is_host_memory,
const string& src_node_name, const int src_output, const DataType src_dt,
const string& tensor_name, const std::vector<string>& debug_ops,
const std::vector<string>& debug_urls, Node** copy_node) {
const string kGatedGrpcAttributeKey = "gated_grpc";
NodeDef node_def;
const KernelDef* kdef;
const string copy_op_name = is_host_memory ? "CopyHost" : "Copy";
const string copy_node_name = GetCopyNodeName(src_node_name, src_output);
std::vector<string> debug_ops_spec;
for (const string& debug_op : debug_ops) {
for (const string& debug_url : debug_urls) {
string debug_op_name_proper;
std::unordered_map<string, string> custom_attributes;
TF_RETURN_IF_ERROR(ParseDebugOpName(debug_op, &debug_op_name_proper,
&custom_attributes));
bool gated_grpc_value = false;
if (custom_attributes.find(kGatedGrpcAttributeKey) !=
custom_attributes.end()) {
TF_RETURN_IF_ERROR(ParseBoolString(
custom_attributes[kGatedGrpcAttributeKey], &gated_grpc_value));
}
debug_ops_spec.push_back(strings::StrCat(debug_op_name_proper, ";",
debug_url, ";",
gated_grpc_value ? "1" : "0"));
}
}
auto builder = NodeDefBuilder(copy_node_name, copy_op_name)
.Input(src_node_name, src_output, src_dt)
.Attr("debug_ops_spec", debug_ops_spec);
if (!builder.Finalize(&node_def).ok()) {
return Status(
absl::StatusCode::kFailedPrecondition,
strings::StrCat("Failed to create node definition ", "for copy op ",
copy_node_name, " on watched tensor ", tensor_name));
}
Status s = FindKernelDef(device_type, node_def, &kdef, nullptr);
if (!s.ok()) {
return Status(
absl::StatusCode::kFailedPrecondition,
strings::StrCat("Failed to find kernel definition ", "for copy op ",
copy_node_name, " on watched tensor ", tensor_name));
}
if (!NodeBuilder(builder).Finalize(graph, copy_node).ok()) {
return Status(absl::StatusCode::kFailedPrecondition,
strings::StrCat("Failed to create copy node ", copy_node_name,
" on watched tensor ", tensor_name));
}
return absl::OkStatus();
}
Status DebugNodeInserter::ParseDebugOpName(
const string& debug_op_name, string* debug_op_name_proper,
std::unordered_map<string, string>* attributes) {
const size_t l_index = debug_op_name.find('(');
const size_t r_index = debug_op_name.find(')');
if (l_index == string::npos && r_index == string::npos) {
*debug_op_name_proper = debug_op_name;
} else {
if (l_index == string::npos || l_index == 0 ||
r_index != debug_op_name.size() - 1) {
return absl::InvalidArgumentError(
absl::StrCat("Malformed debug op name \"", debug_op_name, "\""));
}
*debug_op_name_proper = debug_op_name.substr(0, l_index);
string arguments = debug_op_name.substr(l_index + 1, r_index - l_index - 1);
std::vector<string> attribute_segs = str_util::Split(arguments, ";");
for (const string& attribute_seg : attribute_segs) {
StringPiece seg(attribute_seg);
str_util::RemoveWhitespaceContext(&seg);
if (seg.empty()) {
continue;
}
const size_t eq_index = seg.find('=');
if (eq_index == string::npos) {
return absl::InvalidArgumentError(absl::StrCat(
"Malformed attributes in debug op name \"", debug_op_name, "\""));
}
const string key(seg.substr(0, eq_index));
const string value(
seg.substr(eq_index + 1, attribute_seg.size() - eq_index - 1));
if (key.empty() || value.empty()) {
return absl::InvalidArgumentError(absl::StrCat(
"Malformed attributes in debug op name \"", debug_op_name, "\""));
}
if (attributes->find(key) == attributes->end()) {
(*attributes)[key] = value;
} else {
return absl::InvalidArgumentError(
absl::StrCat("Duplicate attribute name \"", key,
"\" found in the debug op: \"", debug_op_name, "\""));
}
}
}
return absl::OkStatus();
}
Status DebugNodeInserter::SetDebugNodeAttributes(
Node* debug_node, const std::unordered_map<string, string>& attributes) {
std::unordered_set<string> unfulfilled_keys;
for (const auto& item : attributes) {
unfulfilled_keys.insert(item.first);
}
for (const auto& attr : debug_node->op_def().attr()) {
if (attributes.find(attr.name()) != attributes.end()) {
const string& attr_value = attributes.at(attr.name());
if (attr.type() == "string") {
debug_node->AddAttr<string>(attr.name(), attr_value);
} else if (attr.type() == "float") {
float float_value = 0.0;
if (!::tensorflow::strings::safe_strtof(attr_value, &float_value)) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid value string for float-type attribute ", attr.name(),
"of debug node ", debug_node->name(), ": \"", attr_value, "\""));
}
debug_node->AddAttr<float>(attr.name(), float_value);
} else if (attr.type() == "int") {
int64_t int_value = 0;
if (!::tensorflow::strings::safe_strto64(attr_value, &int_value)) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid value string for int-type attribute ", attr.name(),
"of debug node ", debug_node->name(), ": \"", attr_value, "\""));
}
debug_node->AddAttr<int>(attr.name(), int_value);
} else if (attr.type() == "bool") {
bool bool_value;
if (!ParseBoolString(attr_value, &bool_value).ok()) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid value string for bool-type attribute ", attr.name(),
"of debug node ", debug_node->name(), ": \"", attr_value, "\""));
}
debug_node->AddAttr<bool>(attr.name(), bool_value);
} else {
return absl::InvalidArgumentError(
absl::StrCat("Unsupported type of custom attribute for debug ops: ",
attr.type()));
}
unfulfilled_keys.erase(attr.name());
}
}
if (unfulfilled_keys.empty()) {
return absl::OkStatus();
} else {
return absl::InvalidArgumentError(absl::StrCat(
unfulfilled_keys.size(),
" attribute key(s) were not valid for debug node ", debug_node->name(),
": ", absl::StrJoin(unfulfilled_keys, ", ")));
}
}
Status DebugNodeInserter::CreateDebugNode(
Graph* graph, const Device& device, const string& src_copy_node_name,
const DataType src_dt, const string& tensor_name,
const std::vector<string>& debug_urls, const int debug_op_num,
const string& debug_op_name, Node** debug_node) {
NodeDef node_def;
const KernelDef* kdef;
string debug_op_name_proper;
std::unordered_map<string, string> custom_attributes;
TF_RETURN_IF_ERROR(ParseDebugOpName(debug_op_name, &debug_op_name_proper,
&custom_attributes));
const string debug_node_name =
GetDebugNodeName(tensor_name, debug_op_num, debug_op_name_proper);
auto builder = NodeDefBuilder(debug_node_name, debug_op_name_proper)
.Input(src_copy_node_name, 0, src_dt)
.Attr("device_name", device.name())
.Attr("tensor_name", tensor_name)
.Attr("debug_urls", debug_urls);
if (!builder.Finalize(&node_def).ok()) {
return absl::FailedPreconditionError(
absl::StrCat("Failed to create node definition for debug op ",
debug_op_name_proper, " on watched tensor ", tensor_name));
}
if (!FindKernelDef(DeviceType(device.device_type()), node_def, &kdef, nullptr)
.ok()) {
return absl::FailedPreconditionError(
absl::StrCat("Failed to find kernel definition for debug op ",
debug_op_name_proper, " on watched tensor ", tensor_name));
}
if (!NodeBuilder(builder).Finalize(graph, debug_node).ok()) {
return absl::FailedPreconditionError(
absl::StrCat("Failed to create debug node ", debug_op_name_proper,
" on watched tensor ", tensor_name));
}
if (!custom_attributes.empty()) {
TF_RETURN_IF_ERROR(SetDebugNodeAttributes(*debug_node, custom_attributes));
}
return absl::OkStatus();
}
} | #include "tensorflow/core/debug/debug_graph_utils.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
class DebugGraphUtilsTest : public ::testing::Test {
protected:
Status ParseDebugOpName(const string& debug_op_name,
string* debug_op_name_proper,
std::unordered_map<string, string>* attributes) {
return DebugNodeInserter::ParseDebugOpName(
debug_op_name, debug_op_name_proper, attributes);
}
};
TEST_F(DebugGraphUtilsTest, TestParseNoAttributeDebugOpName) {
string debug_op_name_proper;
std::unordered_map<string, string> attributes;
TF_ASSERT_OK(
ParseDebugOpName("DebugIdentity", &debug_op_name_proper, &attributes));
ASSERT_EQ("DebugIdentity", debug_op_name_proper);
ASSERT_EQ(0, attributes.size());
}
TEST_F(DebugGraphUtilsTest, TestMalformedDebugOpName) {
string debug_op_name_proper;
std::unordered_map<string, string> attributes;
Status s = ParseDebugOpName("(mute_if_healthy=true)", &debug_op_name_proper,
&attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
s = ParseDebugOpName("DebugNumericSummary(", &debug_op_name_proper,
&attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
s = ParseDebugOpName("DebugNumericSummary)", &debug_op_name_proper,
&attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
}
TEST_F(DebugGraphUtilsTest, TestDebugOpNameWithMalformedAttributes) {
string debug_op_name_proper;
std::unordered_map<string, string> attributes;
Status s = ParseDebugOpName("DebugNumericSummary(=)", &debug_op_name_proper,
&attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
s = ParseDebugOpName("DebugNumericSummary(mute_if_healthy=)",
&debug_op_name_proper, &attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
s = ParseDebugOpName("DebugNumericSummary(=true)", &debug_op_name_proper,
&attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
s = ParseDebugOpName("DebugNumericSummary(mute_if_healthy:true)",
&debug_op_name_proper, &attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
s = ParseDebugOpName("DebugNumericSummary(mute_if_healthy=true;threshold=)",
&debug_op_name_proper, &attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
s = ParseDebugOpName(
"DebugNumericSummary(mute_if_healthy=true;threshold:300.0)",
&debug_op_name_proper, &attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
}
TEST_F(DebugGraphUtilsTest, TestValidDebugOpNameWithSingleAttribute) {
string debug_op_name_proper;
std::unordered_map<string, string> attributes;
TF_ASSERT_OK(ParseDebugOpName("DebugNumericSummary()", &debug_op_name_proper,
&attributes));
ASSERT_EQ("DebugNumericSummary", debug_op_name_proper);
ASSERT_EQ(0, attributes.size());
attributes.clear();
TF_ASSERT_OK(ParseDebugOpName("DebugNumericSummary(mute_if_healthy=true)",
&debug_op_name_proper, &attributes));
ASSERT_EQ("DebugNumericSummary", debug_op_name_proper);
ASSERT_EQ(1, attributes.size());
ASSERT_EQ("true", attributes["mute_if_healthy"]);
}
TEST_F(DebugGraphUtilsTest, TestValidDebugOpNameWithMoreThanOneAttributes) {
string debug_op_name_proper;
std::unordered_map<string, string> attributes;
TF_ASSERT_OK(ParseDebugOpName(
"DebugNumericSummary(mute_if_healthy=true; threshold=300.0)",
&debug_op_name_proper, &attributes));
ASSERT_EQ("DebugNumericSummary", debug_op_name_proper);
ASSERT_EQ(2, attributes.size());
ASSERT_EQ("true", attributes["mute_if_healthy"]);
ASSERT_EQ("300.0", attributes["threshold"]);
attributes.clear();
TF_ASSERT_OK(ParseDebugOpName(
"DebugNumericSummary(mute_if_healthy=true;threshold=300.0;first_n=100)",
&debug_op_name_proper, &attributes));
ASSERT_EQ("DebugNumericSummary", debug_op_name_proper);
ASSERT_EQ(3, attributes.size());
ASSERT_EQ("true", attributes["mute_if_healthy"]);
ASSERT_EQ("300.0", attributes["threshold"]);
ASSERT_EQ("100", attributes["first_n"]);
}
TEST_F(DebugGraphUtilsTest, TestValidDebugOpNameWithMoreDuplicateAttributes) {
string debug_op_name_proper;
std::unordered_map<string, string> attributes;
Status s = ParseDebugOpName(
"DebugNumericSummary(mute_if_healthy=true; lower_bound=3; "
"mute_if_healthy=false;)",
&debug_op_name_proper, &attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
}
TEST_F(DebugGraphUtilsTest, TestValidDebugOpNameWithWhitespaceInAttributes) {
string debug_op_name_proper;
std::unordered_map<string, string> attributes;
TF_ASSERT_OK(ParseDebugOpName(
"DebugNumericSummary( mute_if_healthy=true; threshold=300.0 )",
&debug_op_name_proper, &attributes));
ASSERT_EQ("DebugNumericSummary", debug_op_name_proper);
ASSERT_EQ(2, attributes.size());
ASSERT_EQ("true", attributes["mute_if_healthy"]);
ASSERT_EQ("300.0", attributes["threshold"]);
attributes.clear();
TF_ASSERT_OK(ParseDebugOpName(
"DebugNumericSummary(;;mute_if_healthy=true; threshold=300.0;;)",
&debug_op_name_proper, &attributes));
ASSERT_EQ("DebugNumericSummary", debug_op_name_proper);
ASSERT_EQ(2, attributes.size());
ASSERT_EQ("true", attributes["mute_if_healthy"]);
ASSERT_EQ("300.0", attributes["threshold"]);
}
} |
1,446 | cpp | tensorflow/tensorflow | utility | tensorflow/core/ir/utility.cc | tensorflow/core/ir/utility_test.cc | #ifndef TENSORFLOW_CORE_IR_UTILITY_H_
#define TENSORFLOW_CORE_IR_UTILITY_H_
#include <optional>
#include "llvm/ADT/STLExtras.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/ir/dialect.h"
namespace mlir {
namespace tfg {
Block::BlockArgListType GetLoopRegionDataArgs(Region ®ion);
Block::BlockArgListType GetLoopRegionControlTokens(Region ®ion);
BlockArgument GetLoopRegionControlOf(BlockArgument data);
BlockArgument GetLoopRegionDataOf(BlockArgument ctl);
Value LookupControlDependency(Value data);
std::optional<Value> LookupDataValue(Value ctl);
template <typename RangeT>
std::pair<RangeT, RangeT> SplitDataAndControlValues(RangeT values,
ControlType ctl_type) {
unsigned num_ctl = 0;
for (Value value : llvm::reverse(values)) {
if (value.getType() == ctl_type)
++num_ctl;
else
break;
}
unsigned split_idx = llvm::size(values) - num_ctl;
return std::make_pair(values.slice(0, split_idx),
values.slice(split_idx, num_ctl));
}
}
}
#endif
#include "tensorflow/core/ir/utility.h"
#include <optional>
#include "mlir/IR/Block.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/interfaces.h"
#include "tensorflow/core/ir/types/dialect.h"
namespace mlir {
namespace tfg {
Block::BlockArgListType GetLoopRegionDataArgs(Region ®ion) {
Block::BlockArgListType args = region.getArguments();
return args.drop_back(args.size() / 2);
}
Block::BlockArgListType GetLoopRegionControlTokens(Region ®ion) {
Block::BlockArgListType args = region.getArguments();
return args.drop_front(args.size() / 2);
}
BlockArgument GetLoopRegionControlOf(BlockArgument data) {
Block &block = *data.getOwner();
return block.getArgument(data.getArgNumber() + block.getNumArguments() / 2);
}
BlockArgument GetLoopRegionDataOf(BlockArgument ctl) {
Block &block = *ctl.getOwner();
return block.getArgument(ctl.getArgNumber() - block.getNumArguments() / 2);
}
Value LookupControlDependency(Value data) {
assert(!mlir::isa<ControlType>(data.getType()) && "expected a data type");
Value control_dep;
if (auto result = mlir::dyn_cast<OpResult>(data)) {
control_dep = *std::prev(result.getOwner()->result_end());
} else {
auto arg = mlir::cast<BlockArgument>(data);
control_dep = cast<ControlArgumentInterface>(arg.getOwner()->getParentOp())
.getControlTokenOf(arg);
}
assert(mlir::isa<ControlType>(control_dep.getType()) &&
"expected a control type");
return control_dep;
}
std::optional<Value> LookupDataValue(Value ctl) {
assert(mlir::isa<ControlType>(ctl.getType()) && "expected a control type");
Value data;
if (auto result = mlir::dyn_cast<OpResult>(ctl)) {
if (result.getOwner()->getNumResults() == 1) return {};
data = *result.getOwner()->result_begin();
} else {
auto arg = mlir::cast<BlockArgument>(ctl);
data = cast<ControlArgumentInterface>(arg.getOwner()->getParentOp())
.getDataValueOf(arg);
}
assert(!mlir::isa<ControlType>(data.getType()) && "expected a data type");
return data;
}
}
} | #include "tensorflow/core/ir/utility.h"
#include <optional>
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/ops.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace tfg {
namespace {
TEST(DialectUtilityTest, TestLookupControlDependency) {
MLIRContext context;
context.getOrLoadDialect<tfg::TFGraphDialect>();
const char *const code = R"mlir(
tfg.func @test(%arg: tensor<i32> {tfg.name = "arg"}) -> (tensor<i32>) {
%Copy, %ctl = Copy(%arg) : (tensor<i32>) -> (tensor<i32>)
return(%Copy) : tensor<i32>
}
)mlir";
OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
GraphFuncOp func = module->lookupSymbol<GraphFuncOp>("test");
ASSERT_TRUE(func);
auto ret_op = cast<ReturnOp>(func.getBody().front().getTerminator());
Value copy = ret_op.getOperand(0);
Value ctl = LookupControlDependency(copy);
ASSERT_TRUE(ctl);
OpResult ctl_result = mlir::dyn_cast<OpResult>(ctl);
ASSERT_TRUE(ctl_result);
EXPECT_EQ(ctl_result.getResultNumber(), 1);
EXPECT_EQ(copy, ctl_result.getOwner()->getResult(0));
EXPECT_EQ(ctl_result.getOwner()->getName().getStringRef(), "tfg.Copy");
Value arg = ctl_result.getOwner()->getOperand(0);
Value arg_ctl = LookupControlDependency(arg);
ASSERT_TRUE(arg_ctl);
BlockArgument ctl_arg = mlir::dyn_cast<BlockArgument>(arg_ctl);
ASSERT_TRUE(ctl_arg);
EXPECT_EQ(ctl_arg.getArgNumber(), 1);
EXPECT_EQ(arg, ctl_arg.getOwner()->getArgument(0));
}
TEST(DialectUtilityTest, TestLookupDataValue) {
MLIRContext context;
context.getOrLoadDialect<tfg::TFGraphDialect>();
const char *const code = R"mlir(
tfg.func @test(%arg: tensor<i32> {tfg.name = "arg"}) -> (tensor<i32>) {
%Produce, %ctl = Produce [%arg.ctl] : () -> (tensor<i32>)
return(%arg) [%ctl] : tensor<i32>
}
)mlir";
OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
GraphFuncOp func = module->lookupSymbol<GraphFuncOp>("test");
ASSERT_TRUE(func);
auto ret_op = cast<ReturnOp>(func.getBody().front().getTerminator());
Value ctl = ret_op.getOperand(1);
std::optional<Value> produce = LookupDataValue(ctl);
ASSERT_TRUE(produce);
OpResult produce_result = mlir::dyn_cast<OpResult>(*produce);
ASSERT_TRUE(produce_result);
ASSERT_EQ(produce_result.getResultNumber(), 0);
ASSERT_EQ(produce_result.getOwner()->getName().getStringRef(), "tfg.Produce");
ASSERT_EQ(produce_result.getOwner()->getResult(1), ctl);
Value arg_ctl = produce_result.getOwner()->getOperand(0);
std::optional<Value> arg = LookupDataValue(arg_ctl);
ASSERT_TRUE(arg);
BlockArgument arg_arg = mlir::dyn_cast<BlockArgument>(*arg);
ASSERT_TRUE(arg_arg);
ASSERT_EQ(arg_arg.getArgNumber(), 0);
ASSERT_EQ(arg_arg.getOwner()->getArgument(1), arg_ctl);
}
TEST(DialectUtilityTest, TestLookupDataValueNoData) {
MLIRContext context;
context.getOrLoadDialect<tfg::TFGraphDialect>();
const char *const code = R"mlir(
tfg.func @test(%arg: tensor<i32> {tfg.name = "arg"}) -> (tensor<i32>) {
%ctl = NoOp [%arg.ctl] : () -> ()
return(%arg) [%ctl] : tensor<i32>
}
)mlir";
OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
GraphFuncOp func = module->lookupSymbol<GraphFuncOp>("test");
ASSERT_TRUE(func);
auto ret_op = cast<ReturnOp>(func.getBody().front().getTerminator());
Value ctl = ret_op.getOperand(1);
std::optional<Value> no_data = LookupDataValue(ctl);
ASSERT_FALSE(no_data);
}
}
}
} |
1,447 | cpp | tensorflow/tensorflow | interfaces | tensorflow/core/ir/interfaces.cc | tensorflow/core/ir/interfaces_test.cc | #ifndef TENSORFLOW_CORE_IR_INTERFACES_H_
#define TENSORFLOW_CORE_IR_INTERFACES_H_
#include "mlir/IR/Dialect.h"
#include "mlir/IR/DialectInterface.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/Interfaces/ControlFlowInterfaces.h"
#include "mlir/Interfaces/SideEffectInterfaces.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/interfaces.h.inc"
namespace mlir {
namespace tfg {
class TensorFlowRegistryInterfaceBase
: public TensorFlowRegistryInterface::FallbackModel<
TensorFlowRegistryInterfaceBase>,
public DialectInterface::Base<TensorFlowRegistryInterfaceBase> {
public:
explicit TensorFlowRegistryInterfaceBase(Dialect *dialect)
: DialectInterface::Base<TensorFlowRegistryInterfaceBase>(dialect) {}
virtual bool isStateful(Operation *op) const = 0;
};
class StatefulMemoryEffectInterface
: public MemoryEffectOpInterface::FallbackModel<
StatefulMemoryEffectInterface>,
public DialectInterface::Base<StatefulMemoryEffectInterface> {
public:
explicit StatefulMemoryEffectInterface(Dialect *dialect)
: DialectInterface::Base<StatefulMemoryEffectInterface>(dialect) {}
void getEffects(
Operation *op,
SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>>
&effects) const;
};
}
namespace OpTrait {
template <typename ConcreteType>
class IntrinsicOperation
: public mlir::OpTrait::TraitBase<ConcreteType, IntrinsicOperation> {};
}
}
#endif
#include "tensorflow/core/ir/interfaces.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Region.h"
#include "mlir/IR/Value.h"
#include "mlir/Interfaces/SideEffectInterfaces.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/ir/ops.h"
#include "tensorflow/core/ir/types/dialect.h"
namespace mlir {
namespace tfg {
LogicalResult ControlArgumentInterface::verifyRegion(Operation *op,
Region ®ion) {
unsigned num_ctl = 0, num_data = 0;
for (BlockArgument arg : region.getArguments()) {
bool is_ctl = mlir::isa<tf_type::ControlType>(arg.getType());
num_ctl += is_ctl;
num_data += !is_ctl;
}
if (num_ctl != num_data) {
return op->emitOpError("region #")
<< region.getRegionNumber()
<< " expected same number of data values and control tokens ("
<< num_data << " vs. " << num_ctl << ")";
}
return success();
}
void StatefulMemoryEffectInterface::getEffects(
Operation *op,
SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>>
&effects) const {
auto registry = dyn_cast<TensorFlowRegistryInterface>(op);
if (!registry || registry.isStateful() || op->getParentOfType<GraphOp>()) {
effects.emplace_back(MemoryEffects::Write::get());
}
}
}
}
#include "tensorflow/core/ir/interfaces.cc.inc" | #include "tensorflow/core/ir/interfaces.h"
#include "llvm/ADT/ScopeExit.h"
#include "mlir/IR/DialectInterface.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/Verifier.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace tfg {
namespace {
TEST(TensorFlowRegistryInterface, TestDefaultImplementation) {
MLIRContext context(MLIRContext::Threading::DISABLED);
auto *dialect = context.getOrLoadDialect<TFGraphDialect>();
OperationState state(UnknownLoc::get(&context), "tfg.Foo");
state.addTypes(dialect->getControlType());
Operation *op = Operation::create(state);
auto cleanup = llvm::make_scope_exit([&] { op->destroy(); });
ASSERT_TRUE(succeeded(verify(op)));
auto iface = dyn_cast<TensorFlowRegistryInterface>(op);
EXPECT_FALSE(iface);
}
TEST(TensorFlowRegisterInterface, TestCustomImplementation) {
MLIRContext context(MLIRContext::Threading::DISABLED);
DialectRegistry registry;
registry.insert<TFGraphDialect>();
struct CustomRegistryInterface : public TensorFlowRegistryInterfaceBase {
using TensorFlowRegistryInterfaceBase::TensorFlowRegistryInterfaceBase;
bool isStateful(Operation *op) const override {
return op->getName().stripDialect() == "Foo";
}
};
registry.addExtension(+[](mlir::MLIRContext *ctx, TFGraphDialect *dialect) {
dialect->addInterfaces<CustomRegistryInterface>();
});
context.appendDialectRegistry(registry);
auto *dialect = context.getOrLoadDialect<TFGraphDialect>();
SmallVector<StringRef, 2> op_names = {"tfg.Foo", "tfg.Bar"};
SmallVector<bool, 2> expected = {true, false};
for (auto it : llvm::zip(op_names, expected)) {
OperationState state(UnknownLoc::get(&context), std::get<0>(it));
state.addTypes(dialect->getControlType());
Operation *op = Operation::create(state);
auto cleanup = llvm::make_scope_exit([&] { op->destroy(); });
auto iface = dyn_cast<TensorFlowRegistryInterface>(op);
ASSERT_TRUE(iface);
EXPECT_EQ(iface.isStateful(), std::get<1>(it));
}
}
}
}
} |
1,448 | cpp | tensorflow/tensorflow | tf_op_wrapper | tensorflow/core/ir/tf_op_wrapper.cc | tensorflow/core/ir/tf_op_wrapper_test.cc | #ifndef TENSORFLOW_CORE_IR_TF_OP_WRAPPER_H_
#define TENSORFLOW_CORE_IR_TF_OP_WRAPPER_H_
#include <cstddef>
#include "llvm/ADT/iterator_range.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/TypeRange.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/types/dialect.h"
#include "tensorflow/core/ir/utility.h"
namespace mlir {
namespace detail {
template <typename ValueIteratorT>
class ControlRetIterator final
: public llvm::mapped_iterator_base<ControlRetIterator<ValueIteratorT>,
ValueIteratorT, Value> {
public:
using llvm::mapped_iterator_base<ControlRetIterator<ValueIteratorT>,
ValueIteratorT, Value>::mapped_iterator_base;
Value mapElement(Value value) const {
return mlir::isa<tf_type::ControlType>(value.getType())
? value
: tfg::LookupControlDependency(value);
}
};
}
namespace tfg {
class TFOp {
public:
TFOp(Operation *op = nullptr);
explicit TFOp(Operation &op) : TFOp(&op) {}
static bool classof(Operation *op) {
return isa<TFGraphDialect>(op->getDialect());
}
Operation *getOperation() { return op_; }
TFGraphDialect *getDialect() {
return cast<TFGraphDialect>(op_->getDialect());
}
std::pair<OperandRange, OperandRange> splitOperands() {
ControlType ctl_type = getDialect()->getControlType();
return SplitDataAndControlValues(op_->getOperands(), ctl_type);
}
OperandRange getNonControlOperands() { return splitOperands().first; }
OperandRange getControlOperands() { return splitOperands().second; }
Value controlRet() { return op_->getResult(op_->getNumResults() - 1); }
ResultRange getNonControlResults() {
return op_->getResults().slice(0, op_->getNumResults() - 1);
}
StringAttr nameAttr();
StringRef name();
void setName(const Twine &name);
void setName(StringAttr name);
StringAttr requestedDeviceAttr();
StringRef requestedDevice();
void setRequestedDevice(const Twine &requested_device);
void setRequestedDevice(StringAttr requested_device);
StringAttr assignedDeviceAttr();
StringRef assignedDevice();
void setAssignedDevice(const Twine &assigned_device);
void setAssignedDevice(StringAttr assigned_device);
StringAttr tpuReplicate();
void setTpuReplicate(StringAttr tpu_replicate);
StringAttr deviceAttr() {
StringAttr device = assignedDeviceAttr();
if (device) {
assert(!device.getValue().empty());
return device;
}
return requestedDeviceAttr();
}
StringRef device() {
StringAttr device_attr = deviceAttr();
if (device_attr) return device_attr.getValue();
return "";
}
Operation *operator->() { return op_; }
Operation &operator*() { return *op_; }
explicit operator bool() const { return op_; }
private:
Operation *op_;
};
template <typename ValueRangeT>
class ControlRetRange final
: public llvm::iterator_range<
::mlir::detail::ControlRetIterator<typename ValueRangeT::iterator>> {
public:
using Base = llvm::iterator_range<
::mlir::detail::ControlRetIterator<typename ValueRangeT::iterator>>;
explicit ControlRetRange(ValueRangeT c) : Base(c.begin(), c.end()) {}
Value operator[](size_t index) const {
assert(index < size() && "invalid index into value range");
return *(this->begin() + index);
}
size_t size() const { return llvm::size(*this); }
Value front() { return (*this)[0]; }
template <typename OtherT>
bool operator==(const OtherT &other) const {
return llvm::size(*this) == llvm::size(other) &&
std::equal(this->begin(), this->end(), other.begin());
}
template <typename OtherT>
bool operator!=(const OtherT &other) const {
return !(*this == other);
}
};
using OperandControlRetRange = ControlRetRange<OperandRange>;
using ValueControlRetRange = ControlRetRange<ValueRange>;
}
}
#endif
#include "tensorflow/core/ir/tf_op_wrapper.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "tensorflow/core/ir/dialect.h"
namespace mlir {
namespace tfg {
TFOp::TFOp(Operation *op) : op_(op) {
assert(!op || classof(op) && "expected a TFG op");
}
StringAttr TFOp::nameAttr() {
return op_->getAttrOfType<StringAttr>(getDialect()->getNameAttrIdentifier());
}
StringRef TFOp::name() { return nameAttr().getValue(); }
void TFOp::setName(const Twine &name) {
setName(StringAttr::get(op_->getContext(), name.str()));
}
void TFOp::setName(StringAttr name) {
op_->setAttr(getDialect()->getNameAttrIdentifier(), name);
}
StringAttr TFOp::requestedDeviceAttr() {
return op_->getAttrOfType<StringAttr>(
getDialect()->getDeviceAttrIdentifier());
}
StringRef TFOp::requestedDevice() { return requestedDeviceAttr().getValue(); }
void TFOp::setRequestedDevice(const Twine &device) {
setRequestedDevice(StringAttr::get(op_->getContext(), device.str()));
}
void TFOp::setRequestedDevice(StringAttr device) {
op_->setAttr(getDialect()->getDeviceAttrIdentifier(), device);
}
StringAttr TFOp::assignedDeviceAttr() {
return op_->getAttrOfType<StringAttr>(
getDialect()->getAssignedDeviceAttrIdentifier());
}
StringRef TFOp::assignedDevice() { return assignedDeviceAttr().getValue(); }
void TFOp::setAssignedDevice(const Twine &device) {
setAssignedDevice(StringAttr::get(op_->getContext(), device.str()));
}
void TFOp::setAssignedDevice(StringAttr device) {
op_->setAttr(getDialect()->getAssignedDeviceAttrIdentifier(), device);
}
StringAttr TFOp::tpuReplicate() {
return op_->getAttrOfType<StringAttr>("_tpu_replicate");
}
void TFOp::setTpuReplicate(StringAttr tpu_replicate) {
op_->setAttr("_tpu_replicate", tpu_replicate);
}
}
} | #include "tensorflow/core/ir/tf_op_wrapper.h"
#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/ops.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace tfg {
namespace {
TEST(TFOpWrapper, LLVMRTTI) {
const char *const code = R"mlir(
tfg.func @test() -> (tensor<i32>) {
%A, %ctlA = A : () -> (tensor<i32>)
return(%A) : tensor<i32>
}
)mlir";
MLIRContext context;
context.getOrLoadDialect<TFGraphDialect>();
OwningOpRef<ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
Operation *module_op = module.get();
EXPECT_FALSE(isa<TFOp>(module_op));
EXPECT_FALSE(dyn_cast<TFOp>(module_op));
module->walk([&](TFOp op) {
EXPECT_TRUE(isa<TFOp>(op.getOperation()));
EXPECT_TRUE(dyn_cast<TFOp>(op.getOperation()));
});
}
TEST(TFOpWrapper, ControlOperands) {
const char *const code = R"mlir(
tfg.func @test(%a: tensor<i32> {tfg.name = "a"},
%b: tensor<i32> {tfg.name = "b"}) -> (tensor<i32>) {
%A, %ctlA = A(%a, %b) [%a.ctl, %b.ctl] : (tensor<i32>, tensor<i32>)
-> (tensor<i32>)
return(%A) : tensor<i32>
}
)mlir";
MLIRContext context;
context.getOrLoadDialect<TFGraphDialect>();
OwningOpRef<ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
TFOp a_op;
module->walk([&](TFOp op) {
if (op->getName().getStringRef() == "tfg.A") a_op = op;
});
ASSERT_TRUE(a_op);
EXPECT_EQ(a_op.controlRet().getDefiningOp(), a_op.getOperation());
OperandRange operands = a_op->getOperands();
OperandRange data = a_op.getNonControlOperands();
OperandRange ctls = a_op.getControlOperands();
EXPECT_EQ(operands.size(), 4u);
EXPECT_EQ(data.size(), 2u);
EXPECT_EQ(ctls.size(), 2u);
OperandRange::iterator ctl_it = llvm::find_if(operands, [](Value operand) {
return mlir::isa<ControlType>(operand.getType());
});
EXPECT_NE(ctl_it, operands.end());
EXPECT_EQ(data.end(), ctl_it);
EXPECT_EQ(*ctls.begin(), *ctl_it);
}
TEST(TFOpWrapper, AttributeGetterSetters) {
MLIRContext context;
auto *tfg_dialect = context.getOrLoadDialect<TFGraphDialect>();
OperationState state(UnknownLoc::get(&context), "tfg.A");
state.addTypes(tfg_dialect->getControlType());
TFOp op = Operation::create(state);
auto cleanup = llvm::make_scope_exit([&] { op->destroy(); });
{
EXPECT_FALSE(op.nameAttr());
StringRef a_name = "a_name";
op.setName(a_name);
EXPECT_EQ(op.name(), a_name);
StringRef another_name = "another_name";
op.setName(StringAttr::get(&context, another_name));
EXPECT_EQ(op.name(), another_name);
}
{
StringRef a_device = "/some_device";
EXPECT_FALSE(op.requestedDeviceAttr());
op.setRequestedDevice(a_device);
EXPECT_EQ(op.requestedDevice(), a_device);
StringRef another_device = "/some_other_device";
op.setRequestedDevice(StringAttr::get(&context, another_device));
EXPECT_EQ(op.requestedDevice(), another_device);
}
{
StringRef a_device = "/some_assigned_device";
EXPECT_FALSE(op.assignedDeviceAttr());
op.setAssignedDevice(a_device);
EXPECT_EQ(op.assignedDevice(), a_device);
StringRef another_device = "/some_other_assigned_device";
op.setAssignedDevice(StringAttr::get(&context, another_device));
EXPECT_EQ(op.assignedDevice(), another_device);
}
{
op->removeAttr(tfg_dialect->getAssignedDeviceAttrIdentifier());
EXPECT_EQ(op.deviceAttr(), op.requestedDeviceAttr());
StringRef device = "/an_assigned_device";
op.setAssignedDevice(device);
EXPECT_EQ(op.deviceAttr(), op.assignedDeviceAttr());
EXPECT_EQ(op.device(), device);
op->removeAttr(tfg_dialect->getAssignedDeviceAttrIdentifier());
op->removeAttr(tfg_dialect->getDeviceAttrIdentifier());
EXPECT_EQ(op.device(), "");
}
{
auto tpu_replicate = StringAttr::get(op->getContext(), "a_tpu");
op.setTpuReplicate(tpu_replicate);
EXPECT_EQ(op.tpuReplicate(), tpu_replicate);
}
}
TEST(TFOpWrapper, ValueControlRet) {
const char *const code = R"mlir(
tfg.func @test(%arg: tensor<i32> {tfg.name = "arg"}) -> (tensor<i32>) {
%Const, %ctl = Const {dtype = i32, value = dense<0> : tensor<i32>} : () -> (tensor<i32>)
%Add, %ctl_2 = Add(%Const, %arg) [%ctl] {T = i32} : (tensor<i32>, tensor<i32>) -> (tensor<i32>)
return(%Add) : tensor<i32>
}
)mlir";
MLIRContext context;
context.getOrLoadDialect<TFGraphDialect>();
OwningOpRef<ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
GraphFuncOp func = module->lookupSymbol<GraphFuncOp>("test");
ASSERT_TRUE(func);
auto iterator = func.getBody().begin()->begin();
TFOp const_op = &(*iterator++);
TFOp add_op = &(*iterator);
OperandControlRetRange ret_range(add_op->getOperands());
EXPECT_EQ(ret_range[0], const_op.controlRet());
EXPECT_EQ(ret_range[1], func.getBody().begin()->getArguments()[1]);
EXPECT_EQ(ret_range[2], const_op.controlRet());
for (Value v : ret_range) EXPECT_TRUE(mlir::isa<ControlType>(v.getType()));
}
}
}
} |
1,449 | cpp | tensorflow/tensorflow | tf_op_registry | tensorflow/core/ir/tf_op_registry.cc | tensorflow/core/ir/tf_op_registry_test.cc | #ifndef TENSORFLOW_CORE_IR_TF_OP_REGISTRY_H_
#define TENSORFLOW_CORE_IR_TF_OP_REGISTRY_H_
#include "tensorflow/core/ir/interfaces.h"
namespace tensorflow {
class OpRegistry;
}
namespace mlir {
namespace tfg {
class TensorFlowOpRegistryInterface : public TensorFlowRegistryInterfaceBase {
public:
TensorFlowOpRegistryInterface(Dialect *dialect,
const tensorflow::OpRegistry *registry)
: TensorFlowRegistryInterfaceBase(dialect), registry_(registry) {}
explicit TensorFlowOpRegistryInterface(Dialect *dialect);
bool isStateful(Operation *op) const override;
const tensorflow::OpRegistry *GetRegistry() const { return registry_; }
private:
const tensorflow::OpRegistry *registry_;
};
}
}
#endif
#include "tensorflow/core/ir/tf_op_registry.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/ir/ops.h"
namespace mlir {
namespace tfg {
TensorFlowOpRegistryInterface::TensorFlowOpRegistryInterface(Dialect *dialect)
: TensorFlowOpRegistryInterface(dialect, tensorflow::OpRegistry::Global()) {
}
static bool IsStatefulImpl(const tensorflow::OpRegistry *registry,
StringRef op_name) {
const tensorflow::OpRegistrationData *op_reg_data =
registry->LookUp(op_name.str());
if (!op_reg_data) return true;
return op_reg_data->op_def.is_stateful();
}
bool TensorFlowOpRegistryInterface::isStateful(Operation *op) const {
if (op->hasTrait<OpTrait::IntrinsicOperation>()) return false;
if (auto func = dyn_cast<GraphFuncOp>(op)) return func.getIsStateful();
StringRef op_name = op->getName().stripDialect();
if (op->getNumRegions() && op_name.ends_with("Region"))
op_name = op_name.drop_back(6);
return IsStatefulImpl(registry_, op_name);
}
}
} | #include "tensorflow/core/ir/tf_op_registry.h"
#include <string>
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/FormatVariadic.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/interfaces.h"
#include "tensorflow/core/ir/ops.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace tfg {
namespace {
void PrepareContext(MLIRContext *context) {
DialectRegistry registry;
registry.insert<TFGraphDialect>();
registry.addExtension(+[](mlir::MLIRContext *ctx, TFGraphDialect *dialect) {
dialect->addInterfaces<TensorFlowOpRegistryInterface>();
});
context->appendDialectRegistry(registry);
}
TEST(TensorFlowOpRegistryInterface, TestIntrinsicOps) {
MLIRContext context(MLIRContext::Threading::DISABLED);
PrepareContext(&context);
const char *const code = R"mlir(
tfg.func @test(%arg: tensor<i32>) -> (tensor<i32>) {
return(%arg) : tensor<i32>
}
)mlir";
OwningOpRef<ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
auto func_op = cast<GraphFuncOp>(&module->front());
auto ret_op = cast<ReturnOp>(func_op.getBody().front().getTerminator());
EXPECT_FALSE(dyn_cast<TensorFlowRegistryInterface>(*func_op));
EXPECT_FALSE(dyn_cast<TensorFlowRegistryInterface>(*ret_op));
}
TEST(TensorFlowOpRegistryInterface, TestStatelessTFOps) {
MLIRContext context(MLIRContext::Threading::DISABLED);
PrepareContext(&context);
const char *const code = R"mlir(
tfg.func @test(%lhs: tensor<i32>, %rhs: tensor<i32>) -> (tensor<i32>) {
%Add, %ctl = Add(%lhs, %rhs) : (tensor<i32>, tensor<i32>) -> (tensor<i32>)
return(%Add) : tensor<i32>
}
)mlir";
OwningOpRef<ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
Operation *add =
&cast<GraphFuncOp>(&module->front()).getBody().front().front();
auto iface = dyn_cast<TensorFlowRegistryInterface>(add);
ASSERT_TRUE(iface);
EXPECT_FALSE(iface.isStateful());
}
TEST(TensorFlowOpRegistryInterface, TestStatelessAndStatefulRegionOps) {
MLIRContext context(MLIRContext::Threading::DISABLED);
PrepareContext(&context);
const char *const code_template = R"mlir(
tfg.func @test(%idx: tensor<i32>, %arg: tensor<i32>) -> (tensor<i32>) {{
%Case, %ctl = {0}CaseRegion %idx {{
yield(%arg) : tensor<i32>
} : (tensor<i32>) -> (tensor<i32>)
return(%Case) : tensor<i32>
}
)mlir";
SmallVector<StringRef, 2> prefixes = {"", "Stateless"};
SmallVector<bool, 2> expected = {true, false};
for (auto it : llvm::zip(prefixes, expected)) {
std::string code = llvm::formatv(code_template, std::get<0>(it)).str();
OwningOpRef<ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
Operation *case_op =
&cast<GraphFuncOp>(&module->front()).getBody().front().front();
auto iface = dyn_cast<TensorFlowRegistryInterface>(case_op);
ASSERT_TRUE(iface);
EXPECT_EQ(iface.isStateful(), std::get<1>(it));
}
}
}
}
} |
1,450 | cpp | tensorflow/tensorflow | dialect | tensorflow/core/ir/types/dialect.cc | tensorflow/core/ir/types/dialect_test.cc | #ifndef TENSORFLOW_CORE_IR_TYPES_DIALECT_H_
#define TENSORFLOW_CORE_IR_TYPES_DIALECT_H_
#include <optional>
#include <string>
#include "mlir/Dialect/Quant/QuantTypes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Dialect.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/ir/types/dialect.h.inc"
#define GET_TYPEDEF_CLASSES
#include "tensorflow/core/ir/types/types.h.inc"
namespace mlir {
namespace tf_type {
class TensorFlowType : public Type {
public:
using Type::Type;
static bool classof(Type type);
};
inline bool IsValidTFElementType(Type type) {
return mlir::isa<ComplexType, FloatType, IntegerType, TensorFlowType,
quant::QuantizedType>(type);
}
inline bool IsValidTFTensorType(Type type) {
if (auto tensor_ty = mlir::dyn_cast<TensorType>(type))
return IsValidTFElementType(tensor_ty.getElementType());
return false;
}
namespace detail {
template <typename Derived>
class TensorFlowTypeImpl
: public Type::TypeBase<Derived, TensorFlowType, TypeStorage> {
public:
using Base = typename Type::TypeBase<Derived, TensorFlowType, TypeStorage>;
using TFBase = TensorFlowTypeImpl<Derived>;
using Base::Base;
};
}
class TensorFlowRefType : public TensorFlowType {
public:
using TensorFlowType::TensorFlowType;
static bool classof(Type type);
static TensorFlowType get(Type type);
static TensorFlowType getChecked(Type type, MLIRContext* context,
Location loc) {
if (failed(verify(loc, type))) {
return TensorFlowRefType();
}
return get(type);
}
static LogicalResult verify(Location loc, Type type) {
if (!IsValidTFTensorType(type)) {
return emitError(loc) << "invalid TensorFlow type: " << type;
}
return success();
}
Type RemoveRef();
};
#define HANDLE_TF_TYPE(tftype, enumerant, name_marg) \
class tftype##Type : public detail::TensorFlowTypeImpl<tftype##Type> { \
public: \
using TFBase::TFBase; \
static constexpr StringLiteral name = #name_marg; \
};
#define HANDLE_CUSTOM_TF_TYPE(tftype, enumerant, name_marg)
#include "tensorflow/core/ir/types/types.def"
namespace detail {
class TypeWithSubtypeStorage : public TypeStorage {
public:
using KeyTy = ArrayRef<TensorType>;
static TypeWithSubtypeStorage* construct(TypeStorageAllocator& allocator,
const KeyTy& key) {
ArrayRef<TensorType> subtypes = allocator.copyInto(key);
return new (allocator.allocate<TypeWithSubtypeStorage>())
TypeWithSubtypeStorage(subtypes);
}
explicit TypeWithSubtypeStorage(const KeyTy& key) : subtypes_(key) {}
bool operator==(const KeyTy& key) const { return key == subtypes_; }
static llvm::hash_code hashKey(const KeyTy& key) {
return llvm::hash_combine_range(key.begin(), key.end());
}
KeyTy subtypes_;
};
template <typename Derived>
class TypeWithSubtypeImpl
: public Type::TypeBase<Derived, TensorFlowType, TypeWithSubtypeStorage> {
public:
using Base = Type::TypeBase<Derived, TensorFlowType, TypeWithSubtypeStorage>;
using TFBase = TypeWithSubtypeImpl<Derived>;
using Base::Base;
static Derived get(ArrayRef<TensorType> subtypes, MLIRContext* context) {
return Base::get(context, subtypes);
}
static Derived getChecked(ArrayRef<TensorType> subtypes, MLIRContext* context,
Location loc) {
return Base::getChecked(loc, subtypes);
}
static Derived getChecked(function_ref<InFlightDiagnostic()> emitError,
MLIRContext* context,
ArrayRef<TensorType> subtypes) {
return Base::getChecked(emitError, context, subtypes);
}
static Derived get(MLIRContext* context) { return get({}, context); }
static LogicalResult verify(function_ref<InFlightDiagnostic()> emitError,
ArrayRef<TensorType> subtypes) {
for (TensorType subtype : subtypes) {
if (!IsValidTFTensorType(subtype)) {
return emitError() << "invalid " << Derived::getTypeName()
<< " subtype: " << subtype;
}
}
return success();
}
ArrayRef<TensorType> getSubtypes() { return Base::getImpl()->subtypes_; }
};
}
class TensorFlowTypeWithSubtype : public TensorFlowType {
public:
using TensorFlowType::TensorFlowType;
static bool classof(Type type);
Type RemoveSubtypes();
TensorFlowTypeWithSubtype clone(ArrayRef<TensorType> new_subtypes);
ArrayRef<TensorType> GetSubtypes();
};
inline Type GetDefaultTypeOf(TensorFlowTypeWithSubtype type) {
return type.RemoveSubtypes();
}
class ResourceType : public detail::TypeWithSubtypeImpl<ResourceType> {
public:
using TFBase::TFBase;
static constexpr ::mlir::StringLiteral name = "tf_type.resource";
static std::string getTypeName() { return "ResourceType"; }
};
class VariantType : public detail::TypeWithSubtypeImpl<VariantType> {
public:
using TFBase::TFBase;
static constexpr ::mlir::StringLiteral name = "tf_type.variant";
static std::string getTypeName() { return "VariantType"; }
};
Type GetCastCompatibleType(Type a, Type b, bool may_ignore_ref_type_a = false);
bool BroadcastCompatible(TypeRange lhs, TypeRange rhs);
bool HasCompatibleElementTypes(Type lhs, Type rhs,
bool may_ignore_ref_type_lhs = false);
bool AreCastCompatible(TypeRange types);
bool ArraysAreCastCompatible(TypeRange lhs, TypeRange rhs);
Type DropSubTypes(Type ty);
Type DropRefType(Type ty);
Type DropRefAndSubTypes(Type ty);
class OperandShapeIterator final
: public llvm::mapped_iterator<Operation::operand_iterator,
std::optional<ArrayRef<int64_t>> (*)(
Value)> {
public:
using reference = std::optional<ArrayRef<int64_t>>;
explicit OperandShapeIterator(Operation::operand_iterator it);
};
using OperandShapeRange = iterator_range<OperandShapeIterator>;
class ResultShapeIterator final
: public llvm::mapped_iterator<Operation::result_iterator,
std::optional<ArrayRef<int64_t>> (*)(
Value)> {
public:
using reference = std::optional<ArrayRef<int64_t>>;
explicit ResultShapeIterator(Operation::result_iterator it);
};
using ResultShapeRange = iterator_range<ResultShapeIterator>;
template <typename RangeT>
auto filter_resources(RangeT&& range) {
return llvm::make_filter_range(std::forward<RangeT>(range), [](Value val) {
return mlir::isa<ResourceType>(getElementTypeOrSelf(val.getType()));
});
}
inline Type GetElementTypeOrSelfResolveRef(Type type) {
Type element_type = getElementTypeOrSelf(type);
if (auto ref_type = mlir::dyn_cast<TensorFlowRefType>(element_type)) {
element_type = ref_type.RemoveRef();
}
return element_type;
}
}
}
#define GET_ATTRDEF_CLASSES
#include "tensorflow/core/ir/types/attributes.h.inc"
#include "tensorflow/core/ir/types/attributes_enum.h.inc"
#endif
#include "tensorflow/core/ir/types/dialect.h"
#include <cstdint>
#include <optional>
#include <string>
#include "absl/strings/escaping.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/TypeSwitch.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Traits.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Dialect.h"
#include "mlir/IR/DialectImplementation.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OpImplementation.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#define GET_ATTRDEF_CLASSES
#include "tensorflow/core/ir/types/attributes.cc.inc"
#include "tensorflow/core/ir/types/attributes_enum.cc.inc"
#define GET_TYPEDEF_CLASSES
#include "tensorflow/core/ir/types/types.cc.inc"
#include "tensorflow/core/ir/types/dialect.cpp.inc"
namespace mlir {
namespace tf_type {
void TFTypeDialect::initialize() {
addAttributes<
#define GET_ATTRDEF_LIST
#include "tensorflow/core/ir/types/attributes.cc.inc"
>();
addTypes<ControlType, OpaqueTensorType,
#define HANDLE_TF_TYPE(tftype, enumerant, name) tftype##Type,
#define HANDLE_LAST_TF_TYPE(tftype, enumerant, name) tftype##Type
#include "tensorflow/core/ir/types/types.def"
>();
}
namespace {
template <typename TypeWithSubtype>
Type ParseTypeWithSubtype(MLIRContext* context, DialectAsmParser& parser) {
if (failed(parser.parseOptionalLess())) return TypeWithSubtype::get(context);
SmallVector<TensorType, 1> subtypes;
do {
TensorType tensor_ty;
if (parser.parseType(tensor_ty)) return Type();
if (!IsValidTFTensorType(tensor_ty)) {
parser.emitError(parser.getNameLoc()) << "invalid subtype: " << tensor_ty;
return Type();
}
subtypes.push_back(tensor_ty);
} while (succeeded(parser.parseOptionalComma()));
if (parser.parseGreater()) return Type();
return TypeWithSubtype::get(subtypes, context);
}
template <typename TypeWithSubtype>
void PrintTypeWithSubtype(StringRef type, TypeWithSubtype ty,
DialectAsmPrinter& os) {
os << type;
ArrayRef<TensorType> subtypes = ty.getSubtypes();
if (subtypes.empty()) return;
os << "<";
interleaveComma(subtypes, os);
os << ">";
}
Type ParseResourceType(MLIRContext* context, DialectAsmParser& parser) {
return ParseTypeWithSubtype<ResourceType>(context, parser);
}
void PrintResourceType(ResourceType ty, DialectAsmPrinter& os) {
return PrintTypeWithSubtype("resource", ty, os);
}
Type ParseVariantType(MLIRContext* context, DialectAsmParser& parser) {
return ParseTypeWithSubtype<VariantType>(context, parser);
}
void PrintVariantType(VariantType ty, DialectAsmPrinter& os) {
return PrintTypeWithSubtype("variant", ty, os);
}
}
Type TFTypeDialect::parseType(DialectAsmParser& parser) const {
StringRef type_tag;
llvm::SMLoc loc = parser.getNameLoc();
Type genType;
auto parse_result = generatedTypeParser(parser, &type_tag, genType);
if (parse_result.has_value()) return genType;
#define HANDLE_TF_TYPE(tftype, enumerant, name) \
if (type_tag == name) return tftype##Type::get(getContext());
#define HANDLE_CUSTOM_TF_TYPE(tftype, enumerant, name)
#include "tensorflow/core/ir/types/types.def"
if (type_tag.starts_with("resource")) {
Type ret = ParseResourceType(getContext(), parser);
if (!ret) parser.emitError(loc, "invalid resource type");
return ret;
}
if (type_tag.starts_with("variant")) {
Type ret = ParseVariantType(getContext(), parser);
if (!ret) parser.emitError(loc, "invalid variant type");
return ret;
}
parser.emitError(parser.getNameLoc(),
"unknown type in TF graph dialect: " + type_tag);
return {};
}
void TFTypeDialect::printType(Type type, DialectAsmPrinter& printer) const {
#define HANDLE_TF_TYPE(tftype, enumerant, name) \
if (auto derived_ty = type.dyn_cast<tftype##Type>()) { \
printer << name; \
return; \
}
#define HANDLE_CUSTOM_TF_TYPE(tftype, enumerant, name) \
if (auto derived_ty = type.dyn_cast<tftype##Type>()) { \
Print##tftype##Type(derived_ty, printer); \
return; \
}
#include "tensorflow/core/ir/types/types.def"
if (failed(generatedTypePrinter(type, printer)))
llvm::report_fatal_error("unexpected tensorflow graph type kind");
}
Attribute VersionAttr::parse(AsmParser& parser, Type) {
if (failed(parser.parseLess())) return {};
int32_t producer, min_consumer;
if (parser.parseKeyword("producer", " in tf_type version") ||
parser.parseEqual() || parser.parseInteger(producer) ||
parser.parseComma() ||
parser.parseKeyword("min_consumer", " in tf_type version") ||
parser.parseEqual() || parser.parseInteger(min_consumer))
return {};
SmallVector<int32_t, 4> bad_consumers;
if (!parser.parseOptionalComma()) {
if (parser.parseKeyword("bad_consumers", " in tf_type version") ||
parser.parseEqual() || parser.parseLSquare())
return {};
do {
int32_t bad_consumer;
if (parser.parseInteger(bad_consumer)) return {};
bad_consumers.push_back(bad_consumer);
} while (!parser.parseOptionalComma());
if (parser.parseRSquare()) return {};
}
if (failed(parser.parseGreater())) return {};
return VersionAttr::get(parser.getContext(), producer, min_consumer,
bad_consumers);
}
void VersionAttr::print(AsmPrinter& printer) const {
llvm::raw_ostream& os = printer.getStream();
os << "<producer = " << getProducer()
<< ", min_consumer = " << getMinConsumer();
ArrayRef<int32_t> badConsumers = getBadConsumers();
if (!badConsumers.empty()) {
os << ", bad_consumers = [";
llvm::interleaveComma(badConsumers, os);
os << "]";
}
os << ">";
}
FailureOr<FullTypeAttr> RawFullTypeAttrParser(AsmParser& parser) {
SmallVector<FullTypeAttr> args;
llvm::StringRef type_id_str;
if (failed(parser.parseKeyword(&type_id_str))) {
parser.emitError(
parser.getCurrentLocation(),
"failed to parse TFType_FullTypeAttr parameter keyword for "
"'type_id'");
return failure();
}
std::optional<FullTypeId> type_id = symbolizeFullTypeId(type_id_str);
if (!type_id) {
parser.emitError(parser.getCurrentLocation(),
"failed to parse TFType_FullTypeAttr parameter "
"'type_id'");
return failure();
}
if (parser.parseCommaSeparatedList(AsmParser::Delimiter::OptionalLessGreater,
[&]() {
FailureOr<tf_type::FullTypeAttr> arg =
RawFullTypeAttrParser(parser);
if (failed(arg)) return failure();
args.push_back(*arg);
return success();
}))
return failure();
Attribute attr;
parser.parseOptionalAttribute(attr);
return FullTypeAttr::get(parser.getContext(), static_cast<int32_t>(*type_id),
args, attr);
}
Attribute FullTypeAttr::parse(AsmParser& parser, Type odsType) {
if (failed(parser.parseLess())) return {};
FailureOr<tf_type::FullTypeAttr> ret = RawFullTypeAttrParser(parser);
if (succeeded(ret) && failed(parser.parseGreater())) return {};
return ret.value_or(FullTypeAttr());
}
static void RawFullTypeAttrPrint(FullTypeAttr tfattr, AsmPrinter& printer) {
printer << stringifyFullTypeId(tf_type::FullTypeId(tfattr.getTypeId()));
if (!tfattr.getArgs().empty()) {
printer << "<";
llvm::interleaveComma(tfattr.getArgs(), printer, [&](Attribute arg) {
if (auto t = mlir::dyn_cast<FullTypeAttr>(arg))
RawFullTypeAttrPrint(t, printer);
else
printer << "<<INVALID ARG>>";
});
printer << ">";
}
if (tfattr.getAttr()) {
printer << ' ';
printer.printStrippedAttrOrType(tfattr.getAttr());
}
}
void FullTypeAttr::print(AsmPrinter& printer) const {
printer << "<";
RawFullTypeAttrPrint(*this, printer);
printer << ">";
}
void FuncAttr::print(AsmPrinter& os) const {
if (getName().getRootReference().getValue().empty())
os << "<\"\", " << getAttrs() << ">";
else
os << "<" << getName() << ", " << getAttrs() << ">";
}
Attribute FuncAttr::parse(AsmParser& parser, Type type) {
if (failed(parser.parseLess())) return {};
llvm::SMLoc loc = parser.getCurrentLocation();
Attribute name, dict;
if (failed(parser.parseAttribute(name))) {
parser.emitError(loc) << "expected symbol while parsing tf.func attribute";
return {};
}
if (auto func_name_str = mlir::dyn_cast<StringAttr>(name)) {
if (!func_name_str.getValue().empty()) {
parser.emitError(loc)
<< "expected empty string or symbol while parsing tf.func "
"attribute";
return {};
}
name = SymbolRefAttr::get(parser.getContext(), "");
}
if (!mlir::isa<SymbolRefAttr>(name)) {
parser.emitError(loc) << "expected symbol while parsing tf.func attribute";
return {};
}
if (failed(parser.parseComma())) return {};
loc = parser.getCurrentLocation();
if (failed(parser.parseAttribute(dict)) || !mlir::isa<DictionaryAttr>(dict)) {
parser.emitError(loc)
<< "expected Dictionary attribute while parsing tf.func attribute";
return {};
}
if (failed(parser.parseGreater())) return {};
return FuncAttr::get(parser.getContext(), mlir::cast<SymbolRefAttr>(name),
mlir::cast<DictionaryAttr>(dict));
}
void PlaceholderAttr::print(AsmPrinter& os) const {
os << "<" << StringAttr::get(getContext(), getValue()) << ">";
}
Attribute PlaceholderAttr::parse(AsmParser& parser, Type type) {
if (failed(parser.parseLess())) return {};
std::string content;
if (failed(parser.parseOptionalString(&content))) {
parser.emitError(parser.getCurrentLocation())
<< "expected string while parsing tf.placeholder attribute";
return {};
}
if (failed(parser.parseGreater())) return {};
return PlaceholderAttr::get(parser.getContext(), content);
}
void ShapeAttr::print(AsmPrinter& os) const {
os << "<";
if (hasRank()) {
auto print_dim = [&](int64_t dim) {
if (dim != ShapedType::kDynamic) {
if (dim == 0) {
os << "00";
} else {
os << dim;
}
} else {
os << "?";
}
};
llvm::interleave(getShape(), os, print_dim, "x");
} else {
os << "*";
}
os << ">";
}
Attribute ShapeAttr::parse(AsmParser& parser, Type type) {
if (failed(parser.parseLess())) return {};
if (succeeded(parser.parseOptionalStar())) {
if (failed(parser.parseGreater())) {
parser.emitError(parser.getCurrentLocation())
<< "expected `>` after `*` when parsing a tf.shape "
"attribute";
return {};
}
return ShapeAttr::get(parser.getContext(), std::nullopt);
}
SmallVector<int64_t> shape;
if (failed(parser.parseOptionalGreater())) {
auto parse_element = [&]() {
shape.emplace_back();
llvm::SMLoc loc = parser.getCurrentLocation();
if (succeeded(parser.parseOptionalQuestion())) {
shape.back() = ShapedType::kDynamic;
} else if (failed(parser.parseInteger(shape.back()))) {
parser.emitError(loc)
<< "expected an integer or `?` when parsing a tf.shape attribute";
return failure();
}
return success();
};
if (failed(parse_element())) return {};
while (failed(parser.parseOptionalGreater())) {
if (failed(parser.parseXInDimensionList()) || failed(parse_element()))
return {};
}
}
return ShapeAttr::get(parser.getContext(), llvm::ArrayRef(shape));
}
ShapeAttr ShapeAttr::get(MLIRContext* context,
std::optional<ArrayRef<int64_t>> shape) {
if (shape) return Base::get(context, *shape, false);
return Base::get(context, ArrayRef<int64_t>(), true);
}
ShapeAttr ShapeAttr::get(MLIRContext* context, ShapedType shaped_type) {
if (shaped_type.hasRank())
return Base::get(context, shaped_type.getShape(), false);
return Base::get(context, ArrayRef<int64_t>(), true);
}
std::optional<ArrayRef<int64_t>> ShapeAttr::getValue() const {
if (hasRank()) return getShape();
return std::nullopt;
}
bool ShapeAttr::hasRank() const { return !getImpl()->unranked; }
int64_t ShapeAttr::getRank() const {
assert(hasRank());
return getImpl()->shape.size();
}
bool ShapeAttr::hasStaticShape() const {
if (!hasRank()) return false;
for (auto dim : getShape()) {
if (dim < 0) return false;
}
return true;
}
namespace {
std::optional<ArrayRef<int64_t>> GetShape(Value value) {
auto shaped_type = mlir::cast<ShapedType>(value.getType());
if (shaped_type.hasRank()) return shaped_type.getShape();
return std::nullopt;
}
bool GetCastCompatibleShape(ArrayRef<int64_t> a_shape,
ArrayRef<int64_t> b_shape,
SmallVectorImpl<int64_t>* refined_shape) {
if (a_shape.size() != b_shape.size()) return false;
int64_t rank = a_shape.size();
refined_shape->reserve(rank);
for (auto dims : llvm::zip(a_shape, b_shape)) {
int64_t dim1 = std::get<0>(dims);
int64_t dim2 = std::get<1>(dims);
if (ShapedType::isDynamic(dim1)) {
refined_shape->push_back(dim2);
continue;
}
if (ShapedType::isDynamic(dim2)) {
refined_shape->push_back(dim1);
continue;
}
if (dim1 == dim2) {
refined_shape->push_back(dim1);
continue;
}
return false;
}
return true;
}
}
OperandShapeIterator::OperandShapeIterator(Operation::operand_iterator it)
: llvm::mapped_iterator<Operation::operand_iterator,
std::optional<ArrayRef<int64_t>> (*)(Value)>(
it, &GetShape) {}
ResultShapeIterator::ResultShapeIterator(Operation::result_iterator it)
: llvm::mapped_iterator<Operation::result_iterator,
std::optional<ArrayRef<int64_t>> (*)(Value)>(
it, &GetShape) {}
bool TensorFlowType::classof(Type type) {
return llvm::isa<TFTypeDialect>(type.getDialect());
}
bool TensorFlowRefType::classof(Type type) {
return mlir::isa<
#define HANDLE_TF_TYPE(tftype, enumerant, name)
#define HANDLE_TF_REF_TYPE(tftype, enumerant, name) tftype##Type,
#define HANDLE_LAST_TF_TYPE(tftype, enumerant, name) tftype##Type
#include "tensorflow/core/ir/types/types.def"
>(type);
}
TensorFlowType TensorFlowRefType::get(Type type) {
MLIRContext* ctx = type.getContext();
type = getElementTypeOrSelf(type);
if (type.isF16()) {
return HalfRefType::get(ctx);
} else if (type.isF32()) {
return FloatRefType::get(ctx);
} else i | #include "tensorflow/core/ir/types/dialect.h"
#include <cstdint>
#include <limits>
#include <gmock/gmock.h>
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace tfg {
namespace {
TEST(TFTypesDialect, TestFuncAttrSubElement) {
const char *const code = R"mlir(
"test.op"() {func = #tf_type.func<@foo, {bar = @foo}>} : () -> ()
)mlir";
MLIRContext context;
context.allowUnregisteredDialects();
context.getOrLoadDialect<tf_type::TFTypeDialect>();
OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
Operation &test_op = module->front();
Builder b(&context);
StringAttr baz = b.getStringAttr("baz");
ASSERT_TRUE(succeeded(SymbolTable::replaceAllSymbolUses(
b.getStringAttr("foo"), baz, test_op.getParentRegion())));
auto func_attr = mlir::dyn_cast<tf_type::FuncAttr>(test_op.getAttr("func"));
ASSERT_TRUE(func_attr);
auto sym_ref = FlatSymbolRefAttr::get(baz);
EXPECT_TRUE(func_attr.getName() == sym_ref);
auto bar_ref = func_attr.getAttrs().get("bar");
EXPECT_TRUE(bar_ref == sym_ref);
}
TEST(TFTypesDialect, ParsesDimensionListWithZero) {
const char *const code = R"mlir(
"test.op"() {shape = #tf_type.shape<00x128>} : () -> ()
)mlir";
MLIRContext context;
context.allowUnregisteredDialects();
context.getOrLoadDialect<tf_type::TFTypeDialect>();
OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
Operation &test_op = module->front();
auto shape_attr =
mlir::dyn_cast<tf_type::ShapeAttr>(test_op.getAttr("shape"));
ASSERT_TRUE(shape_attr);
EXPECT_THAT(shape_attr.getShape(), testing::ElementsAre(0, 128));
}
TEST(TFTypesDialect, ParsesDimensionListWithQuestionMark) {
const char *const code = R"mlir(
"test.op"() {shape = #tf_type.shape<0x?x2>} : () -> ()
)mlir";
MLIRContext context;
context.allowUnregisteredDialects();
context.getOrLoadDialect<tf_type::TFTypeDialect>();
OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
Operation &test_op = module->front();
auto shape_attr =
mlir::dyn_cast<tf_type::ShapeAttr>(test_op.getAttr("shape"));
ASSERT_TRUE(shape_attr);
EXPECT_THAT(shape_attr.getShape(),
testing::ElementsAre(0, std::numeric_limits<int64_t>::min(), 2));
}
TEST(TFTypesDialect, ParsesDimensionListWithNegativeOne) {
const char *const code = R"mlir(
"test.op"() {shape = #tf_type.shape<0x-1x2>} : () -> ()
)mlir";
MLIRContext context;
context.allowUnregisteredDialects();
context.getOrLoadDialect<tf_type::TFTypeDialect>();
OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
Operation &test_op = module->front();
auto shape_attr =
mlir::dyn_cast<tf_type::ShapeAttr>(test_op.getAttr("shape"));
ASSERT_TRUE(shape_attr);
EXPECT_THAT(shape_attr.getShape(), testing::ElementsAre(0, -1, 2));
}
}
}
} |
1,451 | cpp | tensorflow/tensorflow | kernel_stats_utils | tensorflow/core/profiler/utils/kernel_stats_utils.cc | tensorflow/core/profiler/utils/kernel_stats_utils_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_UTILS_KERNEL_STATS_UTILS_H_
#define TENSORFLOW_CORE_PROFILER_UTILS_KERNEL_STATS_UTILS_H_
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/kernel_stats.pb.h"
namespace tensorflow {
namespace profiler {
void ParseKernelLaunchParams(absl::string_view xstat_kernel_details,
KernelReport* kernel);
bool IsKernelUsingTensorCore(absl::string_view kernel_name);
bool IsOpTensorCoreEligible(absl::string_view tf_op_name);
bool IsEinsumTensorCoreEligible(absl::string_view equation);
struct KernelReportLessThanComparator {
bool operator()(const KernelReport& lhs, const KernelReport& rhs) const;
};
struct KernelReportEqualToComparator {
bool operator()(const KernelReport& lhs, const KernelReport& rhs) const;
};
void SortAndKeepTopKDurationKernelReportsInDb(KernelStatsDb* kernel_stats_db);
struct KernelReportValue {
uint64 total_duration_ns = 0;
uint64 min_duration_ns = 0;
uint64 max_duration_ns = 0;
uint64 occurrences = 0;
};
struct KernelKeyWrap {
const KernelReport* key;
template <typename H>
friend H AbslHashValue(H h, KernelKeyWrap wrap) {
return H::combine(
std::move(h),
wrap.key->is_kernel_using_tensor_core(),
wrap.key->is_op_tensor_core_eligible(),
wrap.key->block_dim(0),
wrap.key->block_dim(1),
wrap.key->block_dim(2),
wrap.key->grid_dim(0),
wrap.key->grid_dim(1),
wrap.key->grid_dim(2),
wrap.key->registers_per_thread(),
wrap.key->static_shmem_bytes(),
wrap.key->dynamic_shmem_bytes(),
wrap.key->name(),
wrap.key->op_name());
}
};
struct KernelHash {
size_t operator()(const KernelReport& key) const {
return absl::Hash<KernelKeyWrap>()(KernelKeyWrap{&key});
}
};
using KernelReportMap =
absl::flat_hash_map<KernelReport, KernelReportValue, KernelHash,
KernelReportEqualToComparator>;
void CopyTopKDurationKernelReportsToDb(const KernelReportMap& reports,
KernelStatsDb* dst);
void InsertOrUpdateKernelReport(const KernelReport& kernel,
const KernelReportValue& value,
KernelReportMap* dst);
void MergeKernelReports(const KernelReportMap& reports, KernelReportMap* dst);
struct OpLevelKernelStats {
bool is_op_tensor_core_eligible = false;
uint64 total_duration_ns = 0;
uint64 tensor_core_duration_ns = 0;
};
using KernelStatsByOpName =
absl::flat_hash_map<absl::string_view, OpLevelKernelStats>;
KernelStatsByOpName GroupKernelReportsByOpName(
const KernelStatsDb& kernel_stats_db);
}
}
#endif
#include "tensorflow/core/profiler/utils/kernel_stats_utils.h"
#include <algorithm>
#include <string>
#include <tuple>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/kernel_stats.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
const int kMaxNumOfKernels = 1000;
constexpr absl::string_view kTensorCoreKernelNamePatterns[] = {
"16816",
"c1688",
"conv1x1",
"conv2d_c1_k1",
"dgrad_1x1_stride_2x2",
"direct_group",
"first_layer_wgrad_kernel",
"h1688",
"h884",
"hmma",
"i16832",
"i8816",
"s884",
"s1688",
"xmma_gemm",
"xmma_implicit_gemm",
"xmma_sparse_conv",
"xmma_sparse_gemm",
"xmma_warp_specialized_implicit_gemm"};
}
void ParseKernelLaunchParams(absl::string_view xstat_kernel_details,
KernelReport* kernel) {
const std::vector<absl::string_view> params =
absl::StrSplit(xstat_kernel_details, absl::ByAnyChar(" \n"));
constexpr uint32 kNumDimensions = 3;
for (uint32 dim = 0; dim < kNumDimensions; ++dim) {
kernel->add_block_dim(1);
kernel->add_grid_dim(1);
}
for (const auto& param : params) {
const std::vector<absl::string_view> key_value = absl::StrSplit(param, ':');
if (key_value.size() != 2) {
continue;
}
absl::string_view key = key_value[0];
absl::string_view value_str = key_value[1];
uint32 value = 0;
double pct = 0.0;
if (key == "regs" && absl::SimpleAtoi(value_str, &value)) {
kernel->set_registers_per_thread(value);
} else if (key == "static_shared" && absl::SimpleAtoi(value_str, &value)) {
kernel->set_static_shmem_bytes(value);
} else if (key == "dynamic_shared" && absl::SimpleAtoi(value_str, &value)) {
kernel->set_dynamic_shmem_bytes(value);
} else if (key == "block") {
const std::vector<absl::string_view>& block =
absl::StrSplit(value_str, ',');
uint32 tmp[3];
if (block.size() == 3 && absl::SimpleAtoi(block[0], &tmp[0]) &&
absl::SimpleAtoi(block[1], &tmp[1]) &&
absl::SimpleAtoi(block[2], &tmp[2])) {
std::copy_n(tmp, 3, kernel->mutable_block_dim()->begin());
}
} else if (key == "grid") {
const std::vector<absl::string_view>& grid =
absl::StrSplit(value_str, ',');
uint32 tmp[3];
if (grid.size() == 3 && absl::SimpleAtoi(grid[0], &tmp[0]) &&
absl::SimpleAtoi(grid[1], &tmp[1]) &&
absl::SimpleAtoi(grid[2], &tmp[2])) {
std::copy_n(tmp, 3, kernel->mutable_grid_dim()->begin());
}
} else if (key == "occ_pct" && absl::SimpleAtod(value_str, &pct)) {
kernel->set_occupancy_pct(pct);
}
}
}
bool IsKernelUsingTensorCore(absl::string_view kernel_name) {
VLOG(1) << "kernel name: " << kernel_name;
for (absl::string_view pattern : kTensorCoreKernelNamePatterns) {
if (absl::StrContains(kernel_name, pattern)) {
return true;
}
}
return false;
}
bool IsOpTensorCoreEligible(absl::string_view tf_op_name) {
return false
|| absl::EndsWith(tf_op_name, "Conv2D")
|| absl::EndsWith(tf_op_name, "Conv2DBackpropFilter")
|| absl::EndsWith(tf_op_name, "Conv2DBackpropInput")
|| absl::EndsWith(tf_op_name, "Conv3D")
|| absl::EndsWith(tf_op_name, "DepthwiseConv2dNative")
|| absl::EndsWith(tf_op_name, "DepthwiseConv2dNativeBackpropFilter")
|| absl::EndsWith(tf_op_name, "DepthwiseConv2dNativeBackpropInput")
|| absl::StrContains(tf_op_name, "BatchMatMul")
|| absl::EndsWith(tf_op_name, "/MatMul")
|| absl::EndsWith(tf_op_name, "FusedMatMul")
|| absl::EndsWith(tf_op_name, "/CudnnRNN")
|| absl::StrContains(tf_op_name, "CudnnRNNV")
|| absl::StrContains(tf_op_name, "CudnnRNNForward")
|| absl::StrContains(tf_op_name, "CudnnRNNBackprop")
|| absl::EndsWith(tf_op_name, "XlaDot")
|| absl::EndsWith(tf_op_name, "XlaDotV2");
}
bool IsEinsumTensorCoreEligible(absl::string_view equation) {
if (equation.empty()) {
return false;
}
const std::vector<absl::string_view> input_output =
absl::StrSplit(equation, "->");
if (input_output.size() != 2) {
return false;
}
const std::vector<absl::string_view> lhs_rhs =
absl::StrSplit(input_output[0], ',');
return lhs_rhs.size() == 2;
}
bool KernelReportLessThanComparator::operator()(const KernelReport& lhs,
const KernelReport& rhs) const {
auto lhs_tuple = std::make_tuple(
lhs.name(),
lhs.grid_dim(0),
lhs.grid_dim(1),
lhs.grid_dim(2),
lhs.block_dim(0),
lhs.block_dim(1),
lhs.block_dim(2),
lhs.registers_per_thread(),
lhs.static_shmem_bytes(),
lhs.dynamic_shmem_bytes(),
lhs.is_kernel_using_tensor_core(),
lhs.is_op_tensor_core_eligible(),
lhs.op_name());
auto rhs_tuple = std::make_tuple(
rhs.name(),
rhs.grid_dim(0),
rhs.grid_dim(1),
rhs.grid_dim(2),
rhs.block_dim(0),
rhs.block_dim(1),
rhs.block_dim(2),
rhs.registers_per_thread(),
rhs.static_shmem_bytes(),
rhs.dynamic_shmem_bytes(),
rhs.is_kernel_using_tensor_core(),
rhs.is_op_tensor_core_eligible(),
rhs.op_name());
return lhs_tuple < rhs_tuple;
}
bool KernelReportEqualToComparator::operator()(const KernelReport& lhs,
const KernelReport& rhs) const {
return (
lhs.is_kernel_using_tensor_core() == rhs.is_kernel_using_tensor_core() &&
lhs.is_op_tensor_core_eligible() == rhs.is_op_tensor_core_eligible() &&
lhs.block_dim(0) == rhs.block_dim(0) &&
lhs.block_dim(1) == rhs.block_dim(1) &&
lhs.block_dim(2) == rhs.block_dim(2) &&
lhs.grid_dim(0) == rhs.grid_dim(0) &&
lhs.grid_dim(1) == rhs.grid_dim(1) &&
lhs.grid_dim(2) == rhs.grid_dim(2) &&
lhs.registers_per_thread() == rhs.registers_per_thread() &&
lhs.static_shmem_bytes() == rhs.static_shmem_bytes() &&
lhs.dynamic_shmem_bytes() == rhs.dynamic_shmem_bytes() &&
lhs.name() == rhs.name() &&
lhs.op_name() == rhs.op_name());
}
void SortAndKeepTopKDurationKernelReportsInDb(KernelStatsDb* kernel_stats_db) {
auto comp = [](const KernelReport& lhs, const KernelReport& rhs) {
return lhs.total_duration_ns() > rhs.total_duration_ns() ||
(lhs.total_duration_ns() == rhs.total_duration_ns() &&
KernelReportLessThanComparator()(lhs, rhs));
};
if (kernel_stats_db->reports_size() > kMaxNumOfKernels) {
std::partial_sort(
kernel_stats_db->mutable_reports()->begin(),
kernel_stats_db->mutable_reports()->begin() + kMaxNumOfKernels,
kernel_stats_db->mutable_reports()->end(), comp);
kernel_stats_db->mutable_reports()->erase(
kernel_stats_db->mutable_reports()->begin() + kMaxNumOfKernels,
kernel_stats_db->mutable_reports()->end());
} else {
std::sort(kernel_stats_db->mutable_reports()->begin(),
kernel_stats_db->mutable_reports()->end(), comp);
}
}
void CopyTopKDurationKernelReportsToDb(const KernelReportMap& reports,
KernelStatsDb* dst) {
std::vector<std::pair<const KernelReport*, const KernelReportValue*>>
kernels_to_sort;
kernels_to_sort.reserve(reports.size());
for (const auto& report_value : reports) {
kernels_to_sort.push_back(
std::make_pair(&report_value.first, &report_value.second));
}
auto comp =
[](const std::pair<const KernelReport*, const KernelReportValue*>& lhs,
const std::pair<const KernelReport*, const KernelReportValue*>& rhs) {
return lhs.second->total_duration_ns > rhs.second->total_duration_ns ||
(lhs.second->total_duration_ns ==
rhs.second->total_duration_ns &&
KernelReportLessThanComparator()(*lhs.first, *rhs.first));
};
if (kernels_to_sort.size() > kMaxNumOfKernels) {
absl::c_partial_sort(kernels_to_sort,
kernels_to_sort.begin() + kMaxNumOfKernels, comp);
} else {
absl::c_sort(kernels_to_sort, comp);
}
int copy_size =
std::min(kMaxNumOfKernels, static_cast<int>(kernels_to_sort.size()));
for (int i = 0; i < copy_size; i++) {
KernelReport* report = dst->add_reports();
*report = *kernels_to_sort[i].first;
const KernelReportValue& kernel_value = *kernels_to_sort[i].second;
report->set_occurrences(kernel_value.occurrences);
report->set_min_duration_ns(kernel_value.min_duration_ns);
report->set_max_duration_ns(kernel_value.max_duration_ns);
report->set_total_duration_ns(kernel_value.total_duration_ns);
}
}
void InsertOrUpdateKernelReport(const KernelReport& kernel,
const KernelReportValue& value,
KernelReportMap* dst) {
KernelReportValue& element = (*dst)[kernel];
if (element.occurrences == 0) {
element = value;
} else {
element.total_duration_ns += value.total_duration_ns;
element.min_duration_ns =
std::min(element.min_duration_ns, value.min_duration_ns);
element.max_duration_ns =
std::max(element.max_duration_ns, value.max_duration_ns);
element.occurrences += value.occurrences;
}
}
void MergeKernelReports(const KernelReportMap& reports, KernelReportMap* dst) {
for (auto& kernel_value : reports) {
InsertOrUpdateKernelReport(kernel_value.first, kernel_value.second, dst);
}
}
KernelStatsByOpName GroupKernelReportsByOpName(
const KernelStatsDb& kernel_stats_db) {
KernelStatsByOpName op_level_kernel_stats;
for (const KernelReport& kernel_report : kernel_stats_db.reports()) {
auto ret = op_level_kernel_stats.emplace(kernel_report.op_name(),
OpLevelKernelStats());
if (ret.second) {
OpLevelKernelStats& stats = ret.first->second;
stats.is_op_tensor_core_eligible =
kernel_report.is_op_tensor_core_eligible();
stats.total_duration_ns += kernel_report.total_duration_ns();
if (kernel_report.is_kernel_using_tensor_core()) {
stats.tensor_core_duration_ns += kernel_report.total_duration_ns();
}
} else {
OpLevelKernelStats& stats = ret.first->second;
DCHECK_EQ(stats.is_op_tensor_core_eligible,
kernel_report.is_op_tensor_core_eligible());
stats.total_duration_ns += kernel_report.total_duration_ns();
if (kernel_report.is_kernel_using_tensor_core()) {
stats.tensor_core_duration_ns += kernel_report.total_duration_ns();
}
}
}
return op_level_kernel_stats;
}
}
} | #include "tensorflow/core/profiler/utils/kernel_stats_utils.h"
#include <gmock/gmock.h>
#include "xla/backends/profiler/gpu/cupti_collector.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/protobuf/kernel_stats.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
using ::testing::FieldsAre;
TEST(KernelStatsUtilsTest, TestGroupKernelReportsByOpName) {
KernelStatsDb kernel_stats_db;
KernelReport* kernel_report_1 = kernel_stats_db.add_reports();
kernel_report_1->set_name("op1_kernel1");
kernel_report_1->set_op_name("op1");
kernel_report_1->set_total_duration_ns(1000);
kernel_report_1->set_is_kernel_using_tensor_core(true);
kernel_report_1->set_is_op_tensor_core_eligible(true);
KernelReport* kernel_report_2 = kernel_stats_db.add_reports();
kernel_report_2->set_name("op1_kernel2");
kernel_report_2->set_op_name("op1");
kernel_report_2->set_total_duration_ns(1000);
kernel_report_2->set_is_kernel_using_tensor_core(false);
kernel_report_2->set_is_op_tensor_core_eligible(true);
KernelReport* kernel_report_3 = kernel_stats_db.add_reports();
kernel_report_3->set_name("op2_kernel1");
kernel_report_3->set_op_name("op2");
kernel_report_3->set_total_duration_ns(100);
kernel_report_3->set_is_kernel_using_tensor_core(false);
kernel_report_3->set_is_op_tensor_core_eligible(false);
KernelStatsByOpName kernel_stats_by_op_name =
GroupKernelReportsByOpName(kernel_stats_db);
ASSERT_EQ(kernel_stats_by_op_name.size(), 2);
auto iter1 = kernel_stats_by_op_name.find("op1");
auto iter2 = kernel_stats_by_op_name.find("op2");
ASSERT_NE(iter1, kernel_stats_by_op_name.end());
ASSERT_NE(iter2, kernel_stats_by_op_name.end());
const OpLevelKernelStats& op1_stats = iter1->second;
const OpLevelKernelStats& op2_stats = iter2->second;
EXPECT_EQ(op1_stats.is_op_tensor_core_eligible, true);
EXPECT_EQ(op1_stats.total_duration_ns, 2000);
EXPECT_EQ(op1_stats.tensor_core_duration_ns, 1000);
EXPECT_EQ(op2_stats.is_op_tensor_core_eligible, false);
EXPECT_EQ(op2_stats.total_duration_ns, 100);
EXPECT_EQ(op2_stats.tensor_core_duration_ns, 0);
}
TEST(KernelStatsUtilsTest, KernelDetailsXStatParser) {
xla::profiler::KernelDetails kernel_info;
kernel_info.registers_per_thread = 10;
kernel_info.static_shared_memory_usage = 128;
kernel_info.dynamic_shared_memory_usage = 256;
kernel_info.block_x = 32;
kernel_info.block_y = 8;
kernel_info.block_z = 4;
kernel_info.grid_x = 3;
kernel_info.grid_y = 2;
kernel_info.grid_z = 1;
const double occupancy_pct = 50.0;
std::string xstat_kernel_details = ToXStat(kernel_info, occupancy_pct);
KernelReport kernel;
ParseKernelLaunchParams(xstat_kernel_details, &kernel);
EXPECT_EQ(kernel.registers_per_thread(), 10);
EXPECT_EQ(kernel.static_shmem_bytes(), 128);
EXPECT_EQ(kernel.dynamic_shmem_bytes(), 256);
EXPECT_EQ(kernel.block_dim()[0], 32);
EXPECT_EQ(kernel.block_dim()[1], 8);
EXPECT_EQ(kernel.block_dim()[2], 4);
EXPECT_EQ(kernel.grid_dim()[0], 3);
EXPECT_EQ(kernel.grid_dim()[1], 2);
EXPECT_EQ(kernel.grid_dim()[2], 1);
}
TEST(KernelStatsUtilsTest, KernelDetailsTokenizer) {
KernelReport kernel;
absl::string_view kernel_details_0 = "odd grid:3,2,1";
ParseKernelLaunchParams(kernel_details_0, &kernel);
EXPECT_EQ(kernel.grid_dim()[0], 3);
EXPECT_EQ(kernel.grid_dim()[1], 2);
EXPECT_EQ(kernel.grid_dim()[2], 1);
absl::string_view kernel_details_1 = "block:6,5,4 odd ";
ParseKernelLaunchParams(kernel_details_1, &kernel);
EXPECT_EQ(kernel.block_dim()[0], 6);
EXPECT_EQ(kernel.block_dim()[1], 5);
EXPECT_EQ(kernel.block_dim()[2], 4);
absl::string_view kernel_details_2 = "block:1,2,3 odd grid:4,5,6";
ParseKernelLaunchParams(kernel_details_2, &kernel);
EXPECT_EQ(kernel.block_dim()[0], 1);
EXPECT_EQ(kernel.block_dim()[1], 2);
EXPECT_EQ(kernel.block_dim()[2], 3);
EXPECT_EQ(kernel.grid_dim()[0], 4);
EXPECT_EQ(kernel.grid_dim()[1], 5);
EXPECT_EQ(kernel.grid_dim()[2], 6);
absl::string_view kernel_details_3 = "static_shared:7 dynamic_shared:8";
ParseKernelLaunchParams(kernel_details_3, &kernel);
EXPECT_EQ(kernel.static_shmem_bytes(), 7);
EXPECT_EQ(kernel.dynamic_shmem_bytes(), 8);
}
TEST(KernelStatsUtilsTest, TestInsertOrUpdateKernelReport) {
KernelReport kr;
kr.set_name("op1_kernel1");
kr.set_op_name("op1");
kr.add_block_dim(32);
kr.add_block_dim(8);
kr.add_block_dim(4);
kr.add_grid_dim(3);
kr.add_grid_dim(2);
kr.add_grid_dim(1);
KernelReportValue krv1;
krv1.total_duration_ns = 1700;
krv1.min_duration_ns = 500;
krv1.max_duration_ns = 1200;
krv1.occurrences = 2;
KernelReportValue krv2;
krv2.total_duration_ns = 900;
krv2.min_duration_ns = 900;
krv2.max_duration_ns = 900;
krv2.occurrences = 1;
KernelReportMap dst1;
InsertOrUpdateKernelReport(kr, krv1, &dst1);
InsertOrUpdateKernelReport(kr, krv2, &dst1);
EXPECT_THAT(dst1[kr], FieldsAre(2600, 500, 1200, 3));
KernelReportMap dst2;
InsertOrUpdateKernelReport(kr, krv2, &dst2);
InsertOrUpdateKernelReport(kr, krv1, &dst2);
EXPECT_THAT(dst2[kr], FieldsAre(2600, 500, 1200, 3));
}
}
}
} |
1,452 | cpp | tensorflow/tensorflow | step_intersection | tensorflow/core/profiler/utils/step_intersection.cc | tensorflow/core/profiler/utils/step_intersection_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_UTILS_STEP_INTERSECTION_H_
#define TENSORFLOW_CORE_PROFILER_UTILS_STEP_INTERSECTION_H_
#include <algorithm>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
namespace tensorflow {
namespace profiler {
struct StepsAlignment {
uint32 begin_subordinate_idx;
uint32 begin_chief_idx;
uint32 num_steps;
};
class StepIntersection {
public:
StepIntersection(
uint32 max_steps,
const absl::flat_hash_map<uint32, const StepDatabaseResult*>&
perhost_stepdb);
uint32 NumSteps() const { return end_chief_idx_ - begin_chief_idx_; }
bool EmptyIntersect() const { return empty_intersect_; }
std::vector<uint32> DstStepNumbers() const;
uint32 FirstStepIndex(uint32 host_id) const;
uint32 StepsDropped() const { return steps_dropped_; }
std::string DebugString() const;
private:
absl::flat_hash_map<uint32, StepsAlignment> perhost_alignment_;
uint32
chief_host_id_;
uint32 steps_dropped_;
bool empty_intersect_;
uint32 begin_chief_idx_;
uint32 end_chief_idx_;
};
}
}
#endif
#include "tensorflow/core/profiler/utils/step_intersection.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tsl/profiler/utils/timespan.h"
namespace tensorflow {
namespace profiler {
namespace {
tsl::profiler::Timespan StepTimespan(const PerCoreStepInfo& percore_stepinfo) {
uint64 min_ps = kuint64max;
uint64 max_ps = 0;
for (const auto& core_stepinfo : percore_stepinfo.step_info_per_core()) {
const auto& stepinfo = core_stepinfo.second;
uint64 begin_ps = stepinfo.begin_ps();
uint64 end_ps = begin_ps + stepinfo.duration_ps();
min_ps = std::min(min_ps, begin_ps);
max_ps = std::max(max_ps, end_ps);
}
return (min_ps < max_ps)
? tsl::profiler::Timespan::FromEndPoints(min_ps, max_ps)
: tsl::profiler::Timespan();
}
tsl::profiler::Timespan AllStepsTimespan(const StepDatabaseResult& step_db) {
uint64 min_ps = kuint64max;
uint64 max_ps = 0;
for (const auto& step : step_db.step_sequence()) {
tsl::profiler::Timespan timespan = StepTimespan(step);
uint64 begin_ps = timespan.begin_ps();
uint64 end_ps = timespan.end_ps();
min_ps = std::min(min_ps, begin_ps);
max_ps = std::max(max_ps, end_ps);
}
return (min_ps < max_ps)
? tsl::profiler::Timespan::FromEndPoints(min_ps, max_ps)
: tsl::profiler::Timespan();
}
struct AlignmentInfo {
StepsAlignment alignment;
double similarity;
};
double StepSimilarity(const PerCoreStepInfo& subordinate_step,
const PerCoreStepInfo& chief_step) {
tsl::profiler::Timespan subordinate_timespan = StepTimespan(subordinate_step);
tsl::profiler::Timespan chief_timespan = StepTimespan(chief_step);
return chief_timespan.OverlappedDurationPs(subordinate_timespan);
}
AlignmentInfo ComputeAlignmentInfo(const StepDatabaseResult& subordinate,
uint32 subordinate_anchor,
const StepDatabaseResult& chief,
uint32 chief_anchor) {
uint32 pre_anchor_steps = std::min(subordinate_anchor, chief_anchor);
uint32 post_anchor_steps =
std::min(subordinate.step_sequence_size() - subordinate_anchor,
chief.step_sequence_size() - chief_anchor);
uint32 alignment_steps = pre_anchor_steps + post_anchor_steps;
double similarity = 0;
uint32 begin_subordinate_idx = subordinate_anchor - pre_anchor_steps;
uint32 begin_chief_idx = chief_anchor - pre_anchor_steps;
for (uint32 i = 0; i < alignment_steps; i++) {
similarity +=
StepSimilarity(subordinate.step_sequence(begin_subordinate_idx + i),
chief.step_sequence(begin_chief_idx + i));
}
StepsAlignment alignment = {begin_subordinate_idx, begin_chief_idx,
alignment_steps};
return {alignment, similarity};
}
StepsAlignment FindStepsAlignment(const StepDatabaseResult& subordinate,
const StepDatabaseResult& chief) {
double max_similarity = -1;
StepsAlignment alignment = {0, 0, 0};
if (subordinate.step_sequence_size() == 0 || chief.step_sequence_size() == 0)
return alignment;
for (auto c = 0; c < chief.step_sequence_size(); c++) {
AlignmentInfo info =
ComputeAlignmentInfo(subordinate, 0, chief, c);
if (info.similarity <= max_similarity) continue;
max_similarity = info.similarity;
alignment = info.alignment;
}
for (auto s = 1; s < subordinate.step_sequence_size(); s++) {
AlignmentInfo info =
ComputeAlignmentInfo(subordinate, s, chief, 0);
if (info.similarity <= max_similarity) continue;
max_similarity = info.similarity;
alignment = info.alignment;
}
return alignment;
}
std::string StringStepsAlignment(const StepsAlignment& alignment) {
return absl::StrCat(
"[begin_subordinate_idx: ", alignment.begin_subordinate_idx,
", begin_chief_idx: ", alignment.begin_chief_idx,
", num_steps: ", alignment.num_steps, "]");
}
std::string StringDstStepNumbers(const std::vector<uint32>& step_numbers) {
std::string str;
absl::StrAppend(&str, "[");
for (auto i = 0; i < step_numbers.size(); i++) {
if (i > 0) absl::StrAppend(&str, ", ");
absl::StrAppend(&str, step_numbers[i]);
}
absl::StrAppend(&str, "]");
return str;
}
std::string StringSrcToDstIndexMap(uint32 src_first_step_idx,
uint32 num_steps) {
std::string str;
absl::StrAppend(&str, "[");
for (auto i = 0; i < num_steps; i++) {
if (i > 0) absl::StrAppend(&str, ", ");
absl::StrAppend(&str, src_first_step_idx + i, ":", i);
}
absl::StrAppend(&str, "]");
return str;
}
}
StepIntersection::StepIntersection(
uint32 max_steps,
const absl::flat_hash_map<uint32, const StepDatabaseResult*>&
perhost_stepdb) {
empty_intersect_ = false;
chief_host_id_ = kuint32max;
uint64 min_duration_ps = kuint64max;
const StepDatabaseResult* chief_step_db = nullptr;
for (const auto& hostid_stepdb : perhost_stepdb) {
auto host_id = hostid_stepdb.first;
const auto& step_db = hostid_stepdb.second;
tsl::profiler::Timespan timespan = AllStepsTimespan(*step_db);
if (timespan.duration_ps() < min_duration_ps) {
chief_host_id_ = host_id;
chief_step_db = step_db;
min_duration_ps = timespan.duration_ps();
}
}
if (chief_host_id_ == kuint32max) {
steps_dropped_ = 0;
begin_chief_idx_ = 0;
end_chief_idx_ = 0;
return;
}
uint32 max_begin_chief_idx = 0;
uint32 min_end_chief_idx = kuint32max;
for (const auto& hostid_stepdb : perhost_stepdb) {
auto host_id = hostid_stepdb.first;
const auto& step_db = hostid_stepdb.second;
if (host_id == chief_host_id_) {
perhost_alignment_[host_id] = {
0, 0,
static_cast<uint32>(step_db->step_sequence_size())};
} else {
perhost_alignment_[host_id] =
FindStepsAlignment(*step_db, *chief_step_db);
}
uint32 host_begin_chief_idx = perhost_alignment_[host_id].begin_chief_idx;
max_begin_chief_idx = std::max(max_begin_chief_idx, host_begin_chief_idx);
uint32 host_end_chief_idx = perhost_alignment_[host_id].begin_chief_idx +
perhost_alignment_[host_id].num_steps;
min_end_chief_idx = std::min(min_end_chief_idx, host_end_chief_idx);
}
if (max_begin_chief_idx > min_end_chief_idx) {
steps_dropped_ = 0;
begin_chief_idx_ = 0;
end_chief_idx_ = 0;
empty_intersect_ = true;
return;
}
begin_chief_idx_ = max_begin_chief_idx;
uint32 num_steps = min_end_chief_idx - max_begin_chief_idx;
if (num_steps > max_steps) {
steps_dropped_ = num_steps - max_steps;
end_chief_idx_ = max_begin_chief_idx + max_steps;
} else {
steps_dropped_ = 0;
end_chief_idx_ = min_end_chief_idx;
}
}
std::vector<uint32> StepIntersection::DstStepNumbers() const {
std::vector<uint32> result;
result.reserve(NumSteps());
for (uint32 i = 0; i < NumSteps(); i++) {
result.push_back(i);
}
return result;
}
uint32 StepIntersection::FirstStepIndex(uint32 host_id) const {
const auto* alignment = gtl::FindOrNull(perhost_alignment_, host_id);
if (alignment == nullptr) return 0;
DCHECK(alignment->begin_chief_idx <= begin_chief_idx_);
uint32 shift = begin_chief_idx_ - alignment->begin_chief_idx;
uint32 begin_subordinate_idx = alignment->begin_subordinate_idx + shift;
return begin_subordinate_idx;
}
std::string StepIntersection::DebugString() const {
std::string str;
absl::StrAppend(&str, "chief host id_: ", chief_host_id_, "\n");
absl::StrAppend(&str, "begin_chief_idx_: ", begin_chief_idx_,
", num_steps: ", NumSteps(), "\n");
absl::StrAppend(
&str, "DstStepNumbers(): ", StringDstStepNumbers(DstStepNumbers()), "\n");
std::vector<uint32> host_ids;
host_ids.reserve(perhost_alignment_.size());
for (const auto& hostid_alignment : perhost_alignment_) {
auto host_id = hostid_alignment.first;
host_ids.push_back(host_id);
}
absl::c_sort(host_ids);
absl::StrAppend(&str, "perhost_alignment:\n");
for (const auto host_id : host_ids) {
const auto* ptr = gtl::FindOrNull(perhost_alignment_, host_id);
if (ptr == nullptr) continue;
absl::StrAppend(&str, "host: ", host_id,
", step-alignment: ", StringStepsAlignment(*ptr), "\n");
}
absl::StrAppend(&str, "SrcToDstIndexMap():\n");
for (const auto host_id : host_ids) {
absl::StrAppend(&str, "host: ", host_id, ", src-to-dst-index-map: ",
StringSrcToDstIndexMap(FirstStepIndex(host_id), NumSteps()),
"\n");
}
return str;
}
}
} | #include "tensorflow/core/profiler/utils/step_intersection.h"
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace profiler {
namespace {
using PerHostStepDb =
absl::flat_hash_map<uint32 , StepDatabaseResult>;
constexpr uint64 kStepDurationPs = 2000000000;
constexpr uint32 kNumStepsPerHost = 10;
constexpr uint64 kStepGapPs = 0;
constexpr uint32 kNumCoresPerHost = 8;
PerCoreStepInfo CreateOneTestStep(uint32 host_id, uint32 num_steps,
uint32 step_idx, uint64 step_begin_ps) {
PerCoreStepInfo result;
uint32 step_num =
step_idx * host_id;
result.set_step_num(step_num);
StepInfoResult info;
info.set_step_num(step_num);
if (host_id == 0 && step_idx == (num_steps - 1)) {
info.set_duration_ps(kStepDurationPs - 1);
} else {
info.set_duration_ps(kStepDurationPs);
}
info.set_begin_ps(step_begin_ps);
for (uint32 core_id = 0; core_id < kNumCoresPerHost; core_id++) {
(*result.mutable_step_info_per_core())[core_id] = info;
}
return result;
}
PerHostStepDb CreateTestSteps(uint32 num_hosts, uint64 shift_ps) {
PerHostStepDb result;
uint64 first_step_begin_ps = 0;
for (uint32 host_id = 0; host_id < num_hosts; host_id++) {
StepDatabaseResult step_db;
uint64 step_begin_ps = first_step_begin_ps;
for (uint32 step_idx = 0; step_idx < kNumStepsPerHost; step_idx++) {
*step_db.add_step_sequence() =
CreateOneTestStep(host_id, kNumStepsPerHost, step_idx, step_begin_ps);
step_begin_ps += (kStepDurationPs + kStepGapPs);
}
result[host_id] = step_db;
first_step_begin_ps += shift_ps;
}
return result;
}
PerHostStepDb CreateEmptyIntersectTestSteps() {
PerHostStepDb result;
uint64 step_begin_ps;
uint32 host_id;
host_id = 0;
step_begin_ps = 0;
uint64 host_0_num_steps = 10;
StepDatabaseResult step_db_0;
for (uint32 step_idx = 0; step_idx < host_0_num_steps; step_idx++) {
*step_db_0.add_step_sequence() =
CreateOneTestStep(host_id, host_0_num_steps, step_idx, step_begin_ps);
step_begin_ps += (kStepDurationPs + kStepGapPs);
}
result[host_id] = step_db_0;
host_id = 1;
step_begin_ps = (host_0_num_steps - 2) * (kStepDurationPs + kStepGapPs);
uint64 host_1_num_steps = 5;
StepDatabaseResult step_db_1;
for (uint32 step_idx = 0; step_idx < host_1_num_steps; step_idx++) {
*step_db_1.add_step_sequence() =
CreateOneTestStep(host_id, host_1_num_steps, step_idx, step_begin_ps);
step_begin_ps += (kStepDurationPs + kStepGapPs);
}
result[host_id] = step_db_1;
host_id = 2;
step_begin_ps = (host_0_num_steps + host_1_num_steps - 4) *
(kStepDurationPs + kStepGapPs);
uint64 host_2_num_steps = 10;
StepDatabaseResult step_db_2;
for (uint32 step_idx = 0; step_idx < host_2_num_steps; step_idx++) {
*step_db_2.add_step_sequence() =
CreateOneTestStep(host_id, host_2_num_steps, step_idx, step_begin_ps);
step_begin_ps += (kStepDurationPs + kStepGapPs);
}
result[host_id] = step_db_2;
return result;
}
PerHostStepDb CreateNoStep(uint32 num_hosts) {
PerHostStepDb result;
for (uint32 host_id = 0; host_id < num_hosts; host_id++) {
StepDatabaseResult step_db;
result[host_id] = step_db;
}
return result;
}
absl::flat_hash_map<uint32 , const StepDatabaseResult*> Convert(
const PerHostStepDb& perhost_stepdb) {
absl::flat_hash_map<uint32 , const StepDatabaseResult*> result;
for (const auto& hostid_stepdb : perhost_stepdb) {
auto host_id = hostid_stepdb.first;
const auto& step_db = hostid_stepdb.second;
result[host_id] = &step_db;
}
return result;
}
TEST(StepIntersectionTest, EachHostShiftedBy1StepDuration) {
uint32 num_hosts = 4;
uint64 shift_ps = kStepDurationPs;
PerHostStepDb perhost_stepdb = CreateTestSteps(num_hosts, shift_ps);
StepIntersection intersection =
StepIntersection(kNumStepsPerHost, Convert(perhost_stepdb));
EXPECT_EQ(intersection.StepsDropped(), 0);
uint32 dst_num_steps = kNumStepsPerHost - num_hosts + 1;
EXPECT_EQ(intersection.NumSteps(), dst_num_steps);
uint32 src_first_step_index = intersection.FirstStepIndex(0);
EXPECT_EQ(src_first_step_index, num_hosts - 1);
std::vector<uint32> dst_step_numbers = intersection.DstStepNumbers();
for (uint32 i = 0; i < dst_num_steps; i++) {
EXPECT_EQ(dst_step_numbers[i], i);
}
}
TEST(StepIntersectionTest, ExactlyNoShift) {
uint32 num_hosts = 4;
uint64 shift_ps = 0;
PerHostStepDb perhost_stepdb = CreateTestSteps(num_hosts, shift_ps);
StepIntersection intersection =
StepIntersection(kNumStepsPerHost, Convert(perhost_stepdb));
EXPECT_EQ(intersection.StepsDropped(), 0);
uint32 dst_num_steps = kNumStepsPerHost;
EXPECT_EQ(intersection.NumSteps(), dst_num_steps);
std::vector<uint32> dst_step_numbers = intersection.DstStepNumbers();
for (uint32 i = 0; i < dst_num_steps; i++) {
EXPECT_EQ(dst_step_numbers[i], i);
}
for (uint32 host_id = 0; host_id < num_hosts; host_id++) {
uint32 src_first_step_index = intersection.FirstStepIndex(host_id);
EXPECT_EQ(src_first_step_index, 0);
}
}
TEST(StepIntersectionTest, EachHostShiftedByJustABit) {
uint32 num_hosts = 4;
uint64 shift_ps = 100;
PerHostStepDb perhost_stepdb = CreateTestSteps(num_hosts, shift_ps);
StepIntersection intersection =
StepIntersection(kNumStepsPerHost, Convert(perhost_stepdb));
EXPECT_EQ(intersection.StepsDropped(), 0);
uint32 dst_num_steps = kNumStepsPerHost;
EXPECT_EQ(intersection.NumSteps(), dst_num_steps);
std::vector<uint32> dst_step_numbers = intersection.DstStepNumbers();
for (uint32 i = 0; i < dst_num_steps; i++) {
EXPECT_EQ(dst_step_numbers[i], i);
}
for (uint32 host_id = 0; host_id < num_hosts; host_id++) {
uint32 src_first_step_index = intersection.FirstStepIndex(host_id);
EXPECT_EQ(src_first_step_index, 0);
}
}
TEST(StepIntersectionTest, SingleHost) {
uint32 num_hosts = 1;
uint64 shift_ps = 0;
PerHostStepDb perhost_stepdb = CreateTestSteps(num_hosts, shift_ps);
StepIntersection intersection =
StepIntersection(kNumStepsPerHost, Convert(perhost_stepdb));
EXPECT_EQ(intersection.StepsDropped(), 0);
uint32 dst_num_steps = kNumStepsPerHost;
EXPECT_EQ(intersection.NumSteps(), dst_num_steps);
std::vector<uint32> dst_step_numbers = intersection.DstStepNumbers();
for (uint32 i = 0; i < dst_num_steps; i++) {
EXPECT_EQ(dst_step_numbers[i], i);
}
for (uint32 host_id = 0; host_id < num_hosts; host_id++) {
uint32 src_first_step_index = intersection.FirstStepIndex(host_id);
EXPECT_EQ(src_first_step_index, 0);
}
}
TEST(StepIntersectionTest, WithMaxSteps) {
uint32 num_hosts = 4;
uint64 shift_ps = 0;
uint32 max_steps = 3;
PerHostStepDb perhost_stepdb = CreateTestSteps(num_hosts, shift_ps);
StepIntersection intersection =
StepIntersection(max_steps, Convert(perhost_stepdb));
EXPECT_EQ(intersection.StepsDropped(), kNumStepsPerHost - max_steps);
EXPECT_EQ(intersection.NumSteps(), max_steps);
}
TEST(StepIntersectionTest, NoStep) {
uint32 num_hosts = 4;
uint32 max_steps = 100;
PerHostStepDb perhost_stepdb = CreateNoStep(num_hosts);
StepIntersection intersection =
StepIntersection(max_steps, Convert(perhost_stepdb));
EXPECT_EQ(intersection.NumSteps(), 0);
EXPECT_FALSE(intersection.EmptyIntersect());
}
TEST(StepIntersectionTest, EmptyIntersection) {
uint32 max_steps = 100;
PerHostStepDb perhost_stepdb = CreateEmptyIntersectTestSteps();
StepIntersection intersection =
StepIntersection(max_steps, Convert(perhost_stepdb));
EXPECT_EQ(intersection.StepsDropped(), 0);
EXPECT_EQ(intersection.NumSteps(), 0);
EXPECT_TRUE(intersection.EmptyIntersect());
}
}
}
} |
1,453 | cpp | tensorflow/tensorflow | op_metrics_db_utils | tensorflow/core/profiler/utils/op_metrics_db_utils.cc | tensorflow/core/profiler/utils/op_metrics_db_utils_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_UTILS_OP_METRICS_DB_UTILS_H_
#define TENSORFLOW_CORE_PROFILER_UTILS_OP_METRICS_DB_UTILS_H_
#include <algorithm>
#include <optional>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/op_metrics.pb.h"
#include "tsl/profiler/utils/xplane_visitor.h"
namespace tensorflow {
namespace profiler {
TF_CONST_INIT extern const absl::string_view kIdle;
class OpMetricsDbBuilder {
public:
explicit OpMetricsDbBuilder(OpMetricsDb* db);
protected:
OpMetrics* LookupOrInsertNewOpMetrics(uint64 hlo_module_id,
absl::string_view name);
OpMetricsDb* db() { return db_; }
private:
absl::flat_hash_map<uint64 ,
absl::flat_hash_map<std::string , OpMetrics*>>
op_metrics_map_;
OpMetricsDb* db_;
};
class XEventsOpMetricsDbBuilder {
public:
void AddOpMetric(const tsl::profiler::XEventVisitor& xevent);
OpMetricsDb Finalize(uint64_t total_time);
OpMetricsDb Finalize();
private:
using OpMetricBySymbol =
absl::flat_hash_map<uint64_t, OpMetrics>;
absl::flat_hash_map<uint64_t, OpMetricBySymbol>
flat_op_metric_;
};
inline void SetTotalTimePs(OpMetricsDb& db, uint64_t total_time_ps) {
db.set_total_time_ps(std::max(db.total_op_time_ps(), total_time_ps));
}
inline uint64_t TotalTimePs(const OpMetricsDb& db, bool exclude_idle = false) {
return exclude_idle ? db.total_op_time_ps() : db.total_time_ps();
}
double IdleTimeRatio(const OpMetricsDb& db);
uint64 IdleTimePs(const OpMetricsDb& db);
void SetIdleOp(uint64_t idle_time_ps, OpMetrics& metrics);
void AddIdleOp(OpMetricsDb& db);
inline bool IsIdleOp(const OpMetrics& metrics) {
return metrics.category() == kIdle;
}
inline uint64_t ChildrenTimePs(const OpMetrics& metrics) {
return metrics.time_ps() - metrics.self_time_ps();
}
std::optional<double> HostInfeedEnqueueRatio(const OpMetricsDb& db);
OpMetricsDb CreateTfMetricsDbFromDeviceOpMetricsDb(
const OpMetricsDb& device_op_metrics_db, bool with_idle = true);
}
}
#endif
#include "tensorflow/core/profiler/utils/op_metrics_db_utils.h"
#include <algorithm>
#include <optional>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/op_metrics.pb.h"
#include "tensorflow/core/profiler/utils/math_utils.h"
#include "tsl/profiler/utils/tf_op_utils.h"
#include "tsl/profiler/utils/xplane_schema.h"
#include "tsl/profiler/utils/xplane_visitor.h"
namespace tensorflow {
namespace profiler {
const absl::string_view kIdle = "IDLE";
namespace {
constexpr uint64_t kRootSymbolId = 0;
using tsl::profiler::StatType;
using tsl::profiler::XEventMetadataVisitor;
using tsl::profiler::XStatVisitor;
class DeviceTfOpMetricsDbBuilder : public OpMetricsDbBuilder {
public:
explicit DeviceTfOpMetricsDbBuilder(OpMetricsDb* db)
: OpMetricsDbBuilder(db) {}
void UpdateTfOpMetricsWithDeviceOpMetrics(
absl::string_view tf_op_name, absl::string_view tf_op_type,
const OpMetrics& device_op_metrics) {
OpMetrics* tf_op_metrics = OpMetricsDbBuilder::LookupOrInsertNewOpMetrics(
0, tf_op_name);
if (tf_op_metrics->category().empty()) {
tf_op_metrics->set_category(tf_op_type == tsl::profiler::kUnknownOp
? "Unknown"
: std::string(tf_op_type));
}
tf_op_metrics->set_is_eager(device_op_metrics.is_eager());
tf_op_metrics->set_occurrences(std::max(tf_op_metrics->occurrences(),
device_op_metrics.occurrences()));
tf_op_metrics->set_time_ps(tf_op_metrics->time_ps() +
device_op_metrics.time_ps());
tf_op_metrics->set_self_time_ps(tf_op_metrics->self_time_ps() +
device_op_metrics.self_time_ps());
tf_op_metrics->set_flops(tf_op_metrics->flops() +
device_op_metrics.flops());
tf_op_metrics->set_bytes_accessed(tf_op_metrics->bytes_accessed() +
device_op_metrics.bytes_accessed());
}
};
struct OpKey {
std::optional<uint64_t> program_id;
std::optional<uint64_t> symbol_id;
};
OpKey GetOpKeyFromHloEventMetadata(
const XEventMetadataVisitor& hlo_event_metadata) {
OpKey op_key;
hlo_event_metadata.ForEachStat([&](const XStatVisitor& stat) {
if (stat.Type().has_value()) {
switch (static_cast<StatType>(*stat.Type())) {
case StatType::kProgramId:
op_key.program_id = stat.IntOrUintValue();
break;
case StatType::kSymbolId:
op_key.symbol_id = stat.IntOrUintValue();
break;
default:
break;
}
}
});
return op_key;
}
void SetOpMetadataFromHloEventMetadata(
const XEventMetadataVisitor& hlo_event_metadata, OpMetrics* op_metrics) {
if (hlo_event_metadata.HasDisplayName()) {
op_metrics->set_name(std::string(hlo_event_metadata.DisplayName()));
op_metrics->set_long_name(std::string(hlo_event_metadata.Name()));
} else {
op_metrics->set_name(std::string(hlo_event_metadata.Name()));
}
hlo_event_metadata.ForEachStat([&](const XStatVisitor& stat) {
if (stat.Type().has_value()) {
switch (static_cast<StatType>(*stat.Type())) {
case StatType::kProgramId:
op_metrics->set_hlo_module_id(stat.IntOrUintValue());
break;
case StatType::kHloCategory:
op_metrics->set_category(std::string(stat.StrOrRefValue()));
break;
case StatType::kTfOp:
op_metrics->set_provenance(std::string(stat.StrOrRefValue()));
break;
case StatType::kFlops:
op_metrics->set_flops(stat.IntOrUintValue());
break;
case StatType::kBytesAccessed:
op_metrics->set_bytes_accessed(stat.IntOrUintValue());
break;
case StatType::kMemoryAccessBreakdown: {
tensorflow::profiler::MemoryAccessBreakdown breakdown;
const auto& value = stat.BytesValue();
if (breakdown.ParseFromArray(value.data(), value.size())) {
*op_metrics->mutable_memory_accessed_breakdown() =
breakdown.memory_accessed();
}
break;
}
case StatType::kDeduplicatedName:
op_metrics->set_deduplicated_name(std::string(stat.StrOrRefValue()));
break;
default:
break;
}
}
});
hlo_event_metadata.ForEachChild(
[&](const XEventMetadataVisitor& child_hlo_event_metadata) {
OpMetrics* child = op_metrics->mutable_children()->add_metrics_db();
child->set_occurrences(1);
SetOpMetadataFromHloEventMetadata(child_hlo_event_metadata, child);
});
}
void SetOpMetricsFromHloEvent(const tsl::profiler::XEventVisitor& hlo_event,
OpMetrics* op_metrics) {
uint64_t duration_ps = hlo_event.DurationPs();
uint64_t min_duration_ps = duration_ps;
uint64_t self_duration_ps = duration_ps;
uint64_t dma_stall_ps = 0;
hlo_event.ForEachStat([&](const XStatVisitor& stat) {
if (!stat.Type()) return;
switch (static_cast<StatType>(*stat.Type())) {
case StatType::kMinDurationPs:
min_duration_ps = stat.IntValue();
break;
case StatType::kSelfDurationPs:
self_duration_ps = stat.IntValue();
break;
case StatType::kDmaStallDurationPs:
dma_stall_ps = stat.IntValue();
break;
default:
break;
}
});
if (op_metrics->occurrences() == 0) {
SetOpMetadataFromHloEventMetadata(hlo_event.Metadata(), op_metrics);
op_metrics->set_occurrences(hlo_event.NumOccurrences());
op_metrics->set_time_ps(duration_ps);
op_metrics->set_min_time_ps(min_duration_ps);
op_metrics->set_self_time_ps(self_duration_ps);
op_metrics->set_dma_stall_ps(dma_stall_ps);
} else {
op_metrics->set_occurrences(op_metrics->occurrences() +
hlo_event.NumOccurrences());
op_metrics->set_time_ps(op_metrics->time_ps() + duration_ps);
op_metrics->set_min_time_ps(
std::min<uint64_t>(op_metrics->min_time_ps(), min_duration_ps));
op_metrics->set_self_time_ps(op_metrics->self_time_ps() + self_duration_ps);
op_metrics->set_dma_stall_ps(op_metrics->dma_stall_ps() + dma_stall_ps);
}
}
void AdjustFlopsAndBytesAccessed(OpMetrics& op_metrics) {
op_metrics.set_flops(op_metrics.flops() * op_metrics.occurrences());
op_metrics.set_bytes_accessed(op_metrics.bytes_accessed() *
op_metrics.occurrences());
for (auto& memory_access : *op_metrics.mutable_memory_accessed_breakdown()) {
memory_access.set_bytes_accessed(memory_access.bytes_accessed() *
op_metrics.occurrences());
}
}
}
OpMetricsDbBuilder::OpMetricsDbBuilder(OpMetricsDb* db) : db_(db) {
DCHECK_NE(db_, nullptr);
DCHECK_EQ(db_->metrics_db_size(), 0);
}
OpMetrics* OpMetricsDbBuilder::LookupOrInsertNewOpMetrics(
uint64 hlo_module_id, absl::string_view name) {
OpMetrics*& op_metrics = op_metrics_map_[hlo_module_id][name];
if (op_metrics == nullptr) {
op_metrics = db_->add_metrics_db();
op_metrics->set_hlo_module_id(hlo_module_id);
op_metrics->set_name(name.data(), name.size());
}
return op_metrics;
}
void XEventsOpMetricsDbBuilder::AddOpMetric(
const tsl::profiler::XEventVisitor& event) {
OpKey key = GetOpKeyFromHloEventMetadata(event.Metadata());
if (!key.program_id.has_value() || !key.symbol_id.has_value()) return;
OpMetricBySymbol& op_metric_by_symbol =
flat_op_metric_[key.program_id.value()];
if (key.symbol_id != kRootSymbolId) {
OpMetrics& op_metrics = op_metric_by_symbol[key.symbol_id.value()];
SetOpMetricsFromHloEvent(event, &op_metrics);
}
}
OpMetricsDb XEventsOpMetricsDbBuilder::Finalize(uint64_t total_time_ps) {
OpMetricsDb db = Finalize();
SetTotalTimePs(db, total_time_ps);
AddIdleOp(db);
return db;
}
OpMetricsDb XEventsOpMetricsDbBuilder::Finalize() {
OpMetricsDb db;
uint64_t total_op_time_ps = 0;
for (auto& [program_id, op_metric_by_symbol] : flat_op_metric_) {
for (auto& [symbol_id, op_metrics] : op_metric_by_symbol) {
AdjustFlopsAndBytesAccessed(op_metrics);
total_op_time_ps += op_metrics.self_time_ps();
db.add_metrics_db()->Swap(&op_metrics);
}
}
db.set_total_op_time_ps(total_op_time_ps);
return db;
}
double IdleTimeRatio(const OpMetricsDb& db) {
return 1.0 -
tsl::profiler::SafeDivide(db.total_op_time_ps(), db.total_time_ps());
}
uint64 IdleTimePs(const OpMetricsDb& db) {
DCHECK_GE(db.total_time_ps(), db.total_op_time_ps());
return db.total_time_ps() - db.total_op_time_ps();
}
void SetIdleOp(uint64_t idle_time_ps, OpMetrics& metrics) {
metrics.set_name(std::string(kIdle));
metrics.set_category(std::string(kIdle));
metrics.set_occurrences(0);
metrics.set_time_ps(idle_time_ps);
metrics.set_self_time_ps(idle_time_ps);
}
void AddIdleOp(OpMetricsDb& db) {
uint64 idle_time_ps = IdleTimePs(db);
SetIdleOp(idle_time_ps, *db.add_metrics_db());
}
std::optional<double> HostInfeedEnqueueRatio(const OpMetricsDb& db) {
if (db.total_host_infeed_enq_start_timestamp_ps_diff() > 0) {
return tsl::profiler::SafeDivide(
db.total_host_infeed_enq_duration_ps(),
db.total_host_infeed_enq_start_timestamp_ps_diff());
}
return std::nullopt;
}
OpMetricsDb CreateTfMetricsDbFromDeviceOpMetricsDb(
const OpMetricsDb& device_op_metrics_db, bool with_idle) {
OpMetricsDb tf_op_metrics_db;
DeviceTfOpMetricsDbBuilder builder(&tf_op_metrics_db);
for (const auto& device_op_metrics : device_op_metrics_db.metrics_db()) {
if (IsIdleOp(device_op_metrics)) {
if (with_idle) {
builder.UpdateTfOpMetricsWithDeviceOpMetrics(kIdle, kIdle,
device_op_metrics);
}
} else if (device_op_metrics.provenance().empty()) {
builder.UpdateTfOpMetricsWithDeviceOpMetrics(device_op_metrics.name(),
tsl::profiler::kUnknownOp,
device_op_metrics);
} else {
tsl::profiler::TfOp tf_op =
tsl::profiler::ParseTfOpFullname(device_op_metrics.provenance());
builder.UpdateTfOpMetricsWithDeviceOpMetrics(tf_op.name, tf_op.type,
device_op_metrics);
}
}
tf_op_metrics_db.set_total_op_time_ps(
device_op_metrics_db.total_op_time_ps());
tf_op_metrics_db.set_total_time_ps(
with_idle ? device_op_metrics_db.total_time_ps()
: device_op_metrics_db.total_op_time_ps());
return tf_op_metrics_db;
}
}
} | #include "tensorflow/core/profiler/utils/op_metrics_db_utils.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/protobuf/op_metrics.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
constexpr double kMaxError = 1E-10;
TEST(OpMetricsDbTest, IdleTimeRatio) {
OpMetricsDb metrics_db_0;
metrics_db_0.set_total_time_ps(100000000);
metrics_db_0.set_total_op_time_ps(60000000);
EXPECT_NEAR(0.4, IdleTimeRatio(metrics_db_0), kMaxError);
OpMetricsDb metrics_db_1;
metrics_db_1.set_total_time_ps(200000000);
metrics_db_1.set_total_op_time_ps(150000000);
EXPECT_NEAR(0.25, IdleTimeRatio(metrics_db_1), kMaxError);
OpMetricsDb metrics_db_2;
metrics_db_1.set_total_time_ps(0);
metrics_db_1.set_total_op_time_ps(0);
EXPECT_NEAR(1.0, IdleTimeRatio(metrics_db_2), kMaxError);
}
}
}
} |
1,454 | cpp | tensorflow/tensorflow | derived_timeline | tensorflow/core/profiler/utils/derived_timeline.cc | tensorflow/core/profiler/utils/derived_timeline_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_UTILS_DERIVED_TIMELINE_H_
#define TENSORFLOW_CORE_PROFILER_UTILS_DERIVED_TIMELINE_H_
#include <cstdint>
#include <functional>
#include <optional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tsl/profiler/utils/group_events.h"
#include "tsl/profiler/utils/timespan.h"
namespace tensorflow {
namespace profiler {
class DerivedXEventBuilder {
public:
DerivedXEventBuilder(XEventBuilder event, std::optional<int64_t> group_id);
bool ShouldExpand(const XEventMetadata& event_metadata,
std::optional<int64_t> group_id) const;
void Expand(tsl::profiler::Timespan event_span);
tsl::profiler::Timespan GetTimespan() const { return event_.GetTimespan(); }
void SetTimespan(tsl::profiler::Timespan event_span) {
event_.SetTimespan(event_span);
}
private:
XEventBuilder event_;
std::optional<int64_t> group_id_;
};
class DerivedXLineBuilder {
public:
DerivedXLineBuilder(XPlaneBuilder* plane, int64_t line_id,
absl::string_view name, int64_t timestamp_ns,
std::vector<DerivedXLineBuilder*> dependent_lines);
XLineBuilder& Line() { return line_; }
void ExpandOrAddEvent(const XEventMetadata& event_metadata,
tsl::profiler::Timespan event_span,
std::optional<int64_t> group_id);
void ExpandOrAddEvents(
const std::vector<XEventMetadata*>& events_metadata_per_level,
tsl::profiler::Timespan event_span, std::optional<int64_t> group_id);
void ResetLastEvents(int level = 0);
private:
void ExpandOrAddLevelEvent(const XEventMetadata& event_metadata,
tsl::profiler::Timespan event_span,
std::optional<int64_t> group_id, int level);
void AdjustDurationForTraceViewer(int level);
const XStatMetadata* group_id_stat_metadata_ = nullptr;
XLineBuilder line_;
absl::flat_hash_map<int, std::optional<DerivedXEventBuilder>>
last_event_by_level_;
std::vector<DerivedXLineBuilder*> dependent_lines_;
};
struct Symbol {
absl::string_view tf_op_name;
std::string source_info;
std::string hlo_text;
};
using SymbolResolver = std::function<Symbol(std::optional<uint64_t> program_id,
absl::string_view hlo_module_name,
absl::string_view hlo_op)>;
void ProcessTfOpEvent(absl::string_view tf_op_full_name,
tsl::profiler::Timespan event_span,
std::optional<int64_t> group_id,
XPlaneBuilder& plane_builder,
DerivedXLineBuilder& tf_name_scope_line_builder,
DerivedXLineBuilder& tf_op_line_builder);
void DeriveStepEventsFromGroups(
const tsl::profiler::GroupMetadataMap& group_metadata_map,
XPlane* device_trace);
void DeriveEventsFromAnnotations(const SymbolResolver& symbol_resolver,
XPlane* device_trace);
void DeriveEventsFromHostTrace(
const XPlane* host_trace,
const tsl::profiler::GroupMetadataMap& group_metadata_map,
std::vector<XPlane*> device_traces);
void GenerateDerivedTimeLines(
const tsl::profiler::GroupMetadataMap& group_metadata_map, XSpace* space);
void DeriveLinesFromStats(tensorflow::profiler::XPlane* device_trace);
void DeriveLinesForXlaCpuOps(tensorflow::profiler::XPlane* host_trace);
}
}
#endif
#include "tensorflow/core/profiler/utils/derived_timeline.h"
#include <algorithm>
#include <cstdint>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/util/stats_calculator.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/gpu_event_stats.h"
#include "tensorflow/core/profiler/utils/hlo_module_map.h"
#include "tensorflow/core/profiler/utils/hlo_proto_map.h"
#include "tensorflow/core/profiler/utils/host_offload_utils.h"
#include "tensorflow/core/profiler/utils/math_utils.h"
#include "tensorflow/core/profiler/utils/trace_utils.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_utils.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
#include "tsl/profiler/convert/xla_op_utils.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
#include "tsl/profiler/utils/group_events.h"
#include "tsl/profiler/utils/tf_op_utils.h"
#include "tsl/profiler/utils/tf_xplane_visitor.h"
#include "tsl/profiler/utils/timespan.h"
#include "tsl/profiler/utils/tpu_xplane_utils.h"
#include "tsl/profiler/utils/trace_utils.h"
#include "tsl/profiler/utils/xplane_schema.h"
namespace tensorflow {
namespace profiler {
namespace {
using tsl::profiler::FindMutableTensorCorePlanes;
inline std::string HloModuleEventName(const GpuEventStats& stats) {
return stats.program_id ? tsl::profiler::HloModuleNameWithProgramId(
stats.hlo_module_name, *stats.program_id)
: std::string(stats.hlo_module_name);
}
inline std::string HloOpEventPrefix(const GpuEventStats& stats) {
return stats.program_id ? absl::StrCat(*stats.program_id, "/")
: absl::StrCat(stats.hlo_module_name, "/");
}
std::vector<XEventMetadata*> GetOrCreateHloOpEventsMetadata(
XPlaneBuilder& xplane, const GpuEventStats& stats, const Symbol symbol) {
DCHECK(stats.IsXlaOp());
DCHECK(!stats.hlo_module_name.empty());
std::vector<XEventMetadata*> hlo_op_events_metadata;
hlo_op_events_metadata.reserve(stats.hlo_op_names.size());
std::string hlo_op_event_prefix = HloOpEventPrefix(stats);
for (absl::string_view hlo_op_name : stats.hlo_op_names) {
XEventMetadata* hlo_op_event_metadata = xplane.GetOrCreateEventMetadata(
absl::StrCat(hlo_op_event_prefix, hlo_op_name));
if (hlo_op_event_metadata->display_name().empty()) {
hlo_op_event_metadata->set_display_name(std::string(hlo_op_name));
}
hlo_op_events_metadata.push_back(hlo_op_event_metadata);
if (!symbol.hlo_text.empty()) {
XStatsBuilder<XEventMetadata> event_stats(hlo_op_event_metadata, &xplane);
event_stats.SetOrAddStatValue(*xplane.GetOrCreateStatMetadata("hlo_text"),
symbol.hlo_text);
}
}
return hlo_op_events_metadata;
}
}
void ProcessTfOpEvent(absl::string_view tf_op_full_name,
tsl::profiler::Timespan event_span,
std::optional<int64_t> group_id,
XPlaneBuilder& plane_builder,
DerivedXLineBuilder& tf_name_scope_line_builder,
DerivedXLineBuilder& tf_op_line_builder) {
tsl::profiler::TfOp tf_op = tsl::profiler::ParseTfOpFullname(tf_op_full_name);
tsl::profiler::Category category = tf_op.category;
if (category == tsl::profiler::Category::kTensorFlow ||
category == tsl::profiler::Category::kJax) {
tf_name_scope_line_builder.ExpandOrAddEvents(
plane_builder.GetOrCreateEventsMetadata(
tsl::profiler::ParseTfNameScopes(tf_op)),
event_span, group_id);
}
XEventMetadata* tf_op_event_metadata =
plane_builder.GetOrCreateEventMetadata(tf_op_full_name);
if (tf_op_event_metadata->display_name().empty()) {
tf_op_event_metadata->set_display_name(tsl::profiler::TfOpEventName(tf_op));
}
tf_op_line_builder.ExpandOrAddEvent(*tf_op_event_metadata, event_span,
group_id);
}
DerivedXEventBuilder::DerivedXEventBuilder(XEventBuilder event,
std::optional<int64_t> group_id)
: event_(std::move(event)), group_id_(group_id) {}
bool DerivedXEventBuilder::ShouldExpand(const XEventMetadata& event_metadata,
std::optional<int64_t> group_id) const {
return event_.MetadataId() == event_metadata.id() && group_id_ == group_id;
}
void DerivedXEventBuilder::Expand(tsl::profiler::Timespan event_span) {
tsl::profiler::Timespan timespan = event_.GetTimespan();
DCHECK_LE(timespan.begin_ps(), event_span.begin_ps());
timespan.ExpandToInclude(event_span);
event_.SetTimespan(timespan);
}
DerivedXLineBuilder::DerivedXLineBuilder(
XPlaneBuilder* plane, int64_t line_id, absl::string_view name,
int64_t timestamp_ns, std::vector<DerivedXLineBuilder*> dependent_lines)
: group_id_stat_metadata_(
plane->GetOrCreateStatMetadata(GetStatTypeStr(StatType::kGroupId))),
line_(plane->GetOrCreateLine(line_id)),
dependent_lines_(std::move(dependent_lines)) {
line_.SetName(name);
line_.SetTimestampNs(timestamp_ns);
}
void DerivedXLineBuilder::ExpandOrAddEvent(const XEventMetadata& event_metadata,
tsl::profiler::Timespan event_span,
std::optional<int64_t> group_id) {
ExpandOrAddLevelEvent(event_metadata, event_span, group_id,
0);
}
void DerivedXLineBuilder::ExpandOrAddEvents(
const std::vector<XEventMetadata*>& events_metadata_per_level,
tsl::profiler::Timespan event_span, std::optional<int64_t> group_id) {
if (events_metadata_per_level.empty()) return;
size_t current_nested_level = events_metadata_per_level.size();
for (size_t level = 0; level < current_nested_level; ++level) {
ExpandOrAddLevelEvent(*events_metadata_per_level[level], event_span,
group_id, level);
}
ResetLastEvents(current_nested_level);
}
void DerivedXLineBuilder::ExpandOrAddLevelEvent(
const XEventMetadata& event_metadata, tsl::profiler::Timespan event_span,
std::optional<int64_t> group_id, int level) {
auto& last_event = last_event_by_level_[level];
if (last_event && last_event->ShouldExpand(event_metadata, group_id)) {
last_event->Expand(event_span);
} else {
ResetLastEvents(level);
XEventBuilder event = line_.AddEvent(event_metadata);
event.SetTimespan(event_span);
if (group_id.has_value()) {
event.AddStatValue(*group_id_stat_metadata_, *group_id);
}
last_event.emplace(std::move(event), group_id);
}
}
void DerivedXLineBuilder::AdjustDurationForTraceViewer(int level) {
if (level >= last_event_by_level_.size() || !last_event_by_level_[level])
return;
int max_level = level;
for (; max_level < last_event_by_level_.size(); ++max_level) {
if (!last_event_by_level_[max_level].has_value()) {
break;
}
}
--max_level;
if (max_level <= level) return;
auto& event_on_top_stack = *last_event_by_level_[max_level];
tsl::profiler::Timespan timespan = event_on_top_stack.GetTimespan();
int64_t max_shrink_ns = timespan.duration_ps() / 1000 - 1;
int64_t shrink_ns = 0;
std::optional<tsl::profiler::Timespan> last_level_timespan;
for (int i = level; i <= max_level; ++i) {
auto& current_event = *last_event_by_level_[i];
if (shrink_ns < max_shrink_ns &&
last_level_timespan == current_event.GetTimespan()) {
shrink_ns++;
}
last_level_timespan = current_event.GetTimespan();
if (shrink_ns) {
current_event.SetTimespan(tsl::profiler::Timespan::FromEndPoints(
last_level_timespan->begin_ps(),
last_level_timespan->end_ps() - 1000 * shrink_ns));
}
}
}
void DerivedXLineBuilder::ResetLastEvents(int level) {
AdjustDurationForTraceViewer(level);
for (int i = level, end = last_event_by_level_.size(); i < end; ++i) {
last_event_by_level_[i].reset();
}
if (level == 0) {
for (DerivedXLineBuilder* line : dependent_lines_) {
line->ResetLastEvents(0);
}
}
}
void DeriveStepEventsFromGroups(
const tsl::profiler::GroupMetadataMap& group_metadata_map,
XPlane* device_trace) {
XPlaneVisitor plane_visitor =
tsl::profiler::CreateTfXPlaneVisitor(device_trace);
const XStatMetadata* group_id_stat_metadata =
plane_visitor.GetStatMetadataByType(StatType::kGroupId);
if (group_id_stat_metadata == nullptr) return;
XPlaneBuilder plane_builder(device_trace);
int64_t start_timestamp_ns = GetStartTimestampNs(*device_trace);
DerivedXLineBuilder steps(&plane_builder, kThreadIdStepInfo, kStepLineName,
start_timestamp_ns, {});
for (const XEventVisitor& event_visitor :
GetSortedEvents<XEventVisitor>(plane_visitor)) {
std::optional<XStatVisitor> group_id_stat =
event_visitor.GetStat(StatType::kGroupId, *group_id_stat_metadata);
if (group_id_stat.has_value()) {
int64_t group_id = group_id_stat->IntValue();
steps.ExpandOrAddEvent(
*plane_builder.GetOrCreateEventMetadata(absl::StrCat(group_id)),
event_visitor.GetTimespan(), group_id);
}
}
AddGroupMetadataToStepEvents(group_metadata_map, steps.Line());
}
void DeriveEventsFromAnnotations(const SymbolResolver& symbol_resolver,
XPlane* device_trace) {
XPlaneVisitor plane_visitor =
tsl::profiler::CreateTfXPlaneVisitor(device_trace);
XPlaneBuilder plane_builder(device_trace);
int64_t start_timestamp_ns = GetStartTimestampNs(*device_trace);
DerivedXLineBuilder tf_ops(&plane_builder, kThreadIdTfOp,
kTensorFlowOpLineName, start_timestamp_ns, {});
DerivedXLineBuilder tf_name_scope(&plane_builder, kThreadIdTfNameScope,
kTensorFlowNameScopeLineName,
start_timestamp_ns, {&tf_ops});
DerivedXLineBuilder hlo_ops(&plane_builder, kThreadIdHloOp, kXlaOpLineName,
start_timestamp_ns, {});
DerivedXLineBuilder hlo_modules(&plane_builder, kThreadIdHloModule,
kXlaModuleLineName, start_timestamp_ns,
{&tf_name_scope, &hlo_ops});
DerivedXLineBuilder source(&plane_builder, kThreadIdSource, kSourceLineName,
start_timestamp_ns, {});
for (const XEventVisitor& event :
GetSortedEvents<XEventVisitor>(plane_visitor)) {
GpuEventStats stats(&event);
if (!stats.IsKernel()) continue;
tsl::profiler::Timespan event_span = event.GetTimespan();
if (!stats.hlo_module_name.empty()) {
hlo_modules.ExpandOrAddEvent(
*plane_builder.GetOrCreateEventMetadata(HloModuleEventName(stats)),
event_span, stats.group_id);
}
if (stats.IsXlaOp()) {
auto symbol = symbol_resolver(stats.program_id, stats.hlo_module_name,
stats.hlo_op_names.back());
hlo_ops.ExpandOrAddEvents(
GetOrCreateHloOpEventsMetadata(plane_builder, stats, symbol),
event_span, stats.group_id);
if (!symbol.tf_op_name.empty()) {
ProcessTfOpEvent(symbol.tf_op_name,
event_span, stats.group_id, plane_builder,
tf_name_scope, tf_ops);
}
if (!symbol.source_info.empty()) {
source.ExpandOrAddEvent(
*plane_builder.GetOrCreateEventMetadata(symbol.source_info),
event_span, stats.group_id);
}
} else if (stats.IsTfOp()) {
ProcessTfOpEvent(stats.tf_op_fullname,
event_span, stats.group_id, plane_builder, tf_name_scope,
tf_ops);
}
}
RemoveEmptyLines(device_trace);
}
void DeriveEventsFromHostTrace(
const XPlane* host_trace,
const tsl::profiler::GroupMetadataMap& group_metadata_map,
std::vector<XPlane*> device_traces) {
struct GroupLaunchInfo {
tsl::profiler::Timespan timespan;
tsl::Stat<uint64_t> stat;
void AddEventTimespan(tsl::profiler::Timespan event_span) {
if (stat.count() == 0) {
timespan = event_span;
} else {
timespan.ExpandToInclude(event_span);
}
stat.UpdateStat(event_span.duration_ps());
}
};
using DeviceLaunchInfo =
absl::flat_hash_map<int64_t , GroupLaunchInfo>;
const int num_devices = device_traces.size();
std::vector<DeviceLaunchInfo> per_device_launch_info(num_devices);
XPlaneVisitor host_plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace);
host_plane.ForEachLine([&](const XLineVisitor& line) {
if (IsDerivedThreadId(line.Id())) return;
line.ForEachEvent([&](const XEventVisitor& event) {
if (absl::StartsWith(event.Name(), "cu")) return;
LaunchEventStats stats(&event);
if (stats.group_id.has_value() && stats.IsLaunch() &&
0 <= *stats.device_id && *stats.device_id < num_devices) {
GroupLaunchInfo& group_launch_info =
per_device_launch_info[*stats.device_id][*stats.group_id];
group_launch_info.AddEventTimespan(event.GetTimespan());
}
});
});
int64_t host_plane_start = GetStartTimestampNs(*host_trace);
for (int i = 0; i < num_devices; ++i) {
if (per_device_launch_info[i].empty()) continue;
int64_t device_plane_start = GetStartTimestampNs(*device_traces[i]);
XPlaneBuilder device_plane(device_traces[i]);
const XStatMetadata& group_id_stat_metadata =
*device_plane.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kGroupId));
const XStatMetadata& num_launches_stat_metadata =
*device_plane.GetOrCreateStatMetadata("num_launches");
const XStatMetadata& max_launch_time_us_stat_metadata =
*device_plane.GetOrCreateStatMetadata("max_launch_time_us");
const XStatMetadata& avg_launch_time_us_stat_metadata =
*device_plane.GetOrCreateStatMetadata("avg_launch_time_us");
XLineBuilder launch_line =
device_plane.GetOrCreateLine(kThreadIdKernelLaunch);
launch_line.SetName(kKernelLaunchLineName);
launch_line.SetTimestampNs(std::min(device_plane_start, host_plane_start));
for (const auto& kv : per_device_launch_info[i]) {
int64_t group_id = kv.first;
const GroupLaunchInfo& group_info = kv.second;
if (const tsl::profiler::GroupMetadata* group_metadata =
gtl::FindOrNull(group_metadata_map, group_id)) {
XEventBuilder device_event =
launch_line.AddEvent(*device_plane.GetOrCreateEventMetadata(
absl::StrCat("Launch Stats for ", group_metadata->name)));
device_event.SetTimespan(group_info.timespan);
device_event.AddStatValue(group_id_stat_metadata, group_id);
device_event.AddStatValue(num_launches_stat_metadata,
group_info.stat.count());
device_event.AddStatValue(
max_launch_time_us_stat_metadata,
tsl::profiler::PicoToMicro(group_info.stat.max()));
device_event.AddStatValue(
avg_launch_time_us_stat_metadata,
tsl::profiler::PicoToMicro(group_info.stat.avg()));
}
}
}
}
void GenerateDerivedTimeLines(
const tsl::profiler::GroupMetadataMap& group_metadata_map, XSpace* space) {
HloModuleMap hlo_module_map;
{
HloProtoMap hlo_proto_map;
hlo_proto_map.AddHloProtosFromXSpace(*space);
for (const auto& [program_id, hlo_proto] : hlo_proto_map) {
AddHloProto(hlo_module_map, program_id, *hlo_proto);
}
}
auto symbol_resolver = [&](absl::optional<uint64_t> program_id,
absl::string_view hlo_module,
absl::string_view hlo_op) -> Symbol {
Symbol output;
const auto* hlo_instruction =
GetHloInstruction(hlo_module_map, program_id, hlo_op);
if (hlo_instruction != nullptr) {
output.tf_op_name = hlo_instruction->op_full_name();
output.source_info = std::string(hlo_instruction->source_info());
}
return output;
};
std::vector<XPlane*> device_planes =
FindMutablePlanesWithPrefix(space, kGpuPlanePrefix);
for (XPlane* plane : device_planes) {
DeriveStepEventsFromGroups(group_metadata_map, plane);
DeriveEventsFromAnnotations(symbol_resolver, plane);
}
const XPlane* host_plane = FindPlaneWithName(*space, kHostThreadsPlaneName);
if (host_plane) {
DeriveEventsFromHostTrace(host_plane, group_metadata_map, device_planes);
}
for (XPlane* plane : FindMutableTensorCorePlanes(space)) {
DeriveLinesFromStats(plane);
SortXPlane(plane);
}
}
void DeriveLinesFromStats(XPlane* device_trace) {
XPlaneVisitor plane_visitor =
tsl::profiler::CreateTfXPlaneVisitor(device_trace);
XPlaneBuilder plane_builder(device_trace);
int64_t start_timestamp_ns = GetStartTimestampNs(*device_trace);
DerivedXLineBuilder tf_ops(
&plane_builder, tensorflow::profiler::kThreadIdTfOp,
tensorflow::profiler::kTensorFlowOpLineName, start_timestamp_ns, {});
DerivedXLineBuilder tf_name_scope(
&plane_builder, tensorflow::profiler::kThreadIdTfNameScope,
tensorflow::profiler::kTensorFlowNameScopeLineName, start_timestamp_ns,
{&tf_ops});
DerivedXLineBuilder source(
&plane_builder, tensorflow::profiler::kThreadIdSource,
tensorflow::profiler::kSourceLineName, start_timestamp_ns, {});
XLineBuilder host_offload_op_line_builder =
plane_builder.GetOrCreateLine(kThreadIdHostOffloadOp);
host_offload_op_line_builder.SetName(kHostOffloadOpLineName);
host_offload_op_line_builder.SetTimestampNs(start_timestamp_ns);
HostOffloadEventProcessor host_offload_event_processor(
&plane_builder, &host_offload_op_line_builder);
for (const XEventVisitor& event :
GetSortedEvents<XEventVisitor>(plane_visitor, true)) {
tsl::profiler::Timespan event_span = event.GetTimespan();
std::optional<absl::string_view> tf_op_name;
std::optional<absl::string_view> source_info;
std::optional<uint64_t> group_id;
std::optional<uint64_t> is_async;
auto for_each_stat = [&](const XStatVisitor& stat) {
if (stat.Type() == StatType::kTfOp) {
tf_op_name = stat.StrOrRefValue();
} else if (stat.Type() == StatType::kGroupId) {
group_id = stat.IntOrUintValue();
} else if (stat.Type() == StatType::kSourceInfo) {
source_info = stat.StrOrRefValue();
} else if (stat.Type() == StatType::kIsAsync) {
is_async = stat.IntOrUintValue();
}
};
event.Metadata().ForEachStat(for_each_stat);
event.ForEachStat(for_each_stat);
if (is_async && *is_async) continue;
if (tf_op_name && !tf_op_name->empty()) {
ProcessTfOpEvent(*tf_op_name, event_span, group_id, plane_builder,
tf_name_scope, tf_ops);
}
if (source_info && !source_info->empty()) {
source.ExpandOrAddEvent(
*plane_builder.GetOrCreateEventMetadata(*source_info), event_span,
group_id);
}
if (host_offload_event_processor.IsHostOffloadOpName(event)) {
host_offload_event_processor.ProcessHostOffloadOpEvent(event, group_id);
}
}
RemoveEmptyLines(device_trace);
}
void DeriveLinesForXlaCpuOps(XPlane* host_trace) {
if (host_trace == nullptr ||
!absl::StartsWith(host_trace->name(), kHostThreadsPlaneName))
return;
XPlaneVisitor visitor = tsl::profiler::CreateTfXPlaneVisitor(host_trace);
XPlane destination_plane;
XPlaneBuilder plane_builder(&destination_plane);
int64_t line_id = tsl::profiler::kThreadIdHostXlaRegionStart;
visitor.ForEachLine([&](const XLineVisitor& line) {
int64_t start_timestamp_ns = line.TimestampNs();
DerivedXLineBuilder tf_ops(
&plane_builder, line_id++,
absl::StrCat(line.Name(), "-",
tensorflow::profiler::kTensorFlowOpLineName),
start_timestamp_ns, {});
DerivedXLineBuilder tf_name_scope(
&plane_builder, line_id++,
absl::StrCat(line.Name(), "-",
tensorflow::profiler::kTensorFlowNameScopeLineName),
start_timestamp_ns, {&tf_ops});
DerivedXLineBuilder xla_cpu_ops(
&plane_builder, line_id++,
absl::StrCat(line.Name(), "-", tsl::profiler::kXlaModuleLineName),
start_timestamp_ns, {});
line.ForEachEvent([&](const XEventVisitor& event) {
std::optional<std::string> hlo_module_name;
std::optional<std::string> framework_op_name;
event.ForEachStat([&](const XStatVisitor& stat) {
if (!stat.Type().has_value()) return;
switch (stat.Type().value()) {
case StatType::kHloModule:
hlo_module_name = stat.StrOrRefValue();
break;
case StatType::kTfOp:
framework_op_name = stat.StrOrRefValue();
break;
}
});
if (hlo_module_name.has_value()) {
xla_cpu_ops.ExpandOrAddEvent(
*plane_builder.GetOrCreateEventMetadata(*hlo_module_name),
event.GetTimespan(), std::nullopt);
}
if (framework_op_name.has_value()) {
ProcessTfOpEvent(*framework_op_name, event.GetTimespan(), std::nullopt,
plane_builder, tf_name_scope, tf_ops);
}
});
});
RemoveEmptyLines(&destination_plane);
MergePlanes(destination_plane, host_trace);
}
}
} | #include "tensorflow/core/profiler/utils/derived_timeline.h"
#include <map>
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/trace_utils.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_test_utils.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
#include "tsl/profiler/utils/group_events.h"
#include "tsl/profiler/utils/tf_xplane_visitor.h"
#include "tsl/profiler/utils/xplane_schema.h"
namespace tensorflow {
namespace profiler {
namespace {
TEST(DerivedTimelineTest, EmptySpaceTest) {
XSpace space;
tsl::profiler::GroupMetadataMap group_metadata_map;
GenerateDerivedTimeLines(group_metadata_map, &space);
EXPECT_EQ(space.planes_size(), 0);
}
TEST(DerivedTimelineTest, HloModuleNameTest) {
const absl::string_view kHloModuleName = "hlo_module";
const absl::string_view kKernelDetails = "kernel_details";
XSpace space;
tsl::profiler::GroupMetadataMap group_metadata_map;
XPlane* plane = GetOrCreateGpuXPlane(&space, 0);
XPlaneBuilder plane_builder(plane);
auto line_builder = plane_builder.GetOrCreateLine(0);
CreateXEvent(&plane_builder, &line_builder, "op1", 0, 100,
{{StatType::kHloModule, kHloModuleName},
{StatType::kKernelDetails, kKernelDetails}});
CreateXEvent(&plane_builder, &line_builder, "op2", 200, 300,
{{StatType::kHloModule, kHloModuleName},
{StatType::kKernelDetails, kKernelDetails}});
GenerateDerivedTimeLines(group_metadata_map, &space);
XPlaneVisitor plane_visitor = tsl::profiler::CreateTfXPlaneVisitor(plane);
EXPECT_EQ(plane_visitor.NumLines(), 2);
plane_visitor.ForEachLine([&](const XLineVisitor& line_visitor) {
if (line_visitor.Id() == 0) return;
EXPECT_EQ(line_visitor.Id(), kThreadIdHloModule);
EXPECT_EQ(line_visitor.NumEvents(), 1);
line_visitor.ForEachEvent([&](const XEventVisitor& event_visitor) {
EXPECT_EQ(event_visitor.Name(), kHloModuleName);
});
});
}
TEST(DerivedTimelineTest, TfOpLineTest) {
const absl::string_view kTfOpName = "mul:Mul";
const absl::string_view kKernelDetails = "kernel_details";
XSpace space;
tsl::profiler::GroupMetadataMap group_metadata_map;
XPlane* plane = GetOrCreateGpuXPlane(&space, 0);
XPlaneBuilder plane_builder(plane);
auto line_builder = plane_builder.GetOrCreateLine(0);
CreateXEvent(&plane_builder, &line_builder, "op1", 0, 100,
{{StatType::kTfOp, kTfOpName},
{StatType::kKernelDetails, kKernelDetails}});
CreateXEvent(&plane_builder, &line_builder, "op2", 200, 300,
{{StatType::kTfOp, kTfOpName},
{StatType::kKernelDetails, kKernelDetails}});
GenerateDerivedTimeLines(group_metadata_map, &space);
XPlaneVisitor plane_visitor = tsl::profiler::CreateTfXPlaneVisitor(plane);
EXPECT_EQ(plane_visitor.NumLines(), 2);
plane_visitor.ForEachLine([&](const XLineVisitor& line_visitor) {
if (line_visitor.Id() == 0) return;
EXPECT_EQ(line_visitor.Id(), kThreadIdTfOp);
EXPECT_EQ(line_visitor.NumEvents(), 1);
line_visitor.ForEachEvent([&](const XEventVisitor& event_visitor) {
EXPECT_EQ(event_visitor.Name(), kTfOpName);
EXPECT_EQ(event_visitor.OffsetPs(), 0);
EXPECT_EQ(event_visitor.DurationPs(), 500);
});
});
}
TEST(DerivedTimelineTest, DependencyTest) {
constexpr int64_t kFirstGroupId = 0;
constexpr int64_t kSecondGroupId = 1;
const absl::string_view kTfOpName = "mul:Mul";
const absl::string_view kKernelDetails = "kernel_details";
XSpace space;
tsl::profiler::GroupMetadataMap group_metadata_map(
{{0, {"train 0"}}, {1, {"train 1"}}});
XPlane* plane = GetOrCreateGpuXPlane(&space, 0);
XPlaneBuilder plane_builder(plane);
auto line_builder = plane_builder.GetOrCreateLine(0);
CreateXEvent(&plane_builder, &line_builder, "op1", 0, 100,
{{StatType::kGroupId, kFirstGroupId},
{StatType::kTfOp, kTfOpName},
{StatType::kKernelDetails, kKernelDetails}});
CreateXEvent(&plane_builder, &line_builder, "op2", 200, 300,
{{StatType::kGroupId, kSecondGroupId},
{StatType::kTfOp, kTfOpName},
{StatType::kKernelDetails, kKernelDetails}});
GenerateDerivedTimeLines(group_metadata_map, &space);
XPlaneVisitor plane_visitor = tsl::profiler::CreateTfXPlaneVisitor(plane);
EXPECT_EQ(plane_visitor.NumLines(), 3);
plane_visitor.ForEachLine([&](const XLineVisitor& line_visitor) {
if (line_visitor.Id() == 0) return;
EXPECT_TRUE(line_visitor.Id() == kThreadIdStepInfo ||
line_visitor.Id() == kThreadIdTfOp);
EXPECT_EQ(line_visitor.NumEvents(), 2);
});
}
TEST(DerivedTimelineTest, TfOpNameScopeTest) {
const absl::string_view kTfOpName = "scope1/scope2/mul:Mul";
const absl::string_view kKernelDetails = "kernel_details";
XSpace space;
tsl::profiler::GroupMetadataMap group_metadata_map;
XPlane* plane = GetOrCreateGpuXPlane(&space, 0);
XPlaneBuilder plane_builder(plane);
auto line_builder = plane_builder.GetOrCreateLine(0);
CreateXEvent(&plane_builder, &line_builder, "op1", 0, 100,
{{StatType::kTfOp, kTfOpName},
{StatType::kKernelDetails, kKernelDetails}});
CreateXEvent(&plane_builder, &line_builder, "op2", 200, 300,
{{StatType::kTfOp, kTfOpName},
{StatType::kKernelDetails, kKernelDetails}});
GenerateDerivedTimeLines(group_metadata_map, &space);
XPlaneVisitor plane_visitor = tsl::profiler::CreateTfXPlaneVisitor(plane);
EXPECT_EQ(plane_visitor.NumLines(), 3);
plane_visitor.ForEachLine([&](const XLineVisitor& line_visitor) {
int64_t line_id = line_visitor.Id();
if (line_id == 0) {
return;
} else if (line_id == kThreadIdTfNameScope) {
EXPECT_EQ(line_visitor.NumEvents(), 2);
line_visitor.ForEachEvent([&](const XEventVisitor& event_visitor) {
EXPECT_EQ(event_visitor.OffsetPs(), 0);
EXPECT_EQ(event_visitor.DurationPs(), 500);
});
} else if (line_id == kThreadIdTfOp) {
EXPECT_EQ(line_visitor.NumEvents(), 1);
line_visitor.ForEachEvent([&](const XEventVisitor& event_visitor) {
EXPECT_EQ(event_visitor.Name(), kTfOpName);
EXPECT_EQ(event_visitor.OffsetPs(), 0);
EXPECT_EQ(event_visitor.DurationPs(), 500);
});
}
});
}
TEST(DerivedTimelineTest, TfOpNameScopeShrinkTest) {
{
XSpace space;
tsl::profiler::GroupMetadataMap group_metadata_map;
XPlane* plane = GetOrCreateGpuXPlane(&space, 0);
XPlaneBuilder plane_builder(plane);
auto line_builder = plane_builder.GetOrCreateLine(0);
CreateXEvent(&plane_builder, &line_builder, "op1", 0, 10000,
{{StatType::kTfOp, "a/b/c/Add:Add"},
{StatType::kKernelDetails, "blah"}});
CreateXEvent(
&plane_builder, &line_builder, "op2", 20000, 30000,
{{StatType::kTfOp, "a/d/Mul:Mul"}, {StatType::kKernelDetails, "blah"}});
GenerateDerivedTimeLines(group_metadata_map, &space);
XPlaneVisitor plane_visitor = tsl::profiler::CreateTfXPlaneVisitor(plane);
EXPECT_EQ(plane_visitor.NumLines(), 3);
plane_visitor.ForEachLine([&](const XLineVisitor& line_visitor) {
int64_t line_id = line_visitor.Id();
if (line_id == 0) {
return;
} else if (line_id == kThreadIdTfNameScope) {
EXPECT_EQ(line_visitor.NumEvents(), 4);
std::map<absl::string_view, uint64_t> durations;
line_visitor.ForEachEvent([&](const XEventVisitor& event_visitor) {
durations[event_visitor.Name()] = event_visitor.DurationPs();
});
EXPECT_EQ(durations["a"], 50000);
EXPECT_EQ(durations["b"], 10000);
EXPECT_EQ(durations["c"], 9000);
EXPECT_EQ(durations["d"], 30000);
}
});
}
{
XSpace space;
tsl::profiler::GroupMetadataMap group_metadata_map;
XPlane* plane = GetOrCreateGpuXPlane(&space, 0);
XPlaneBuilder plane_builder(plane);
auto line_builder = plane_builder.GetOrCreateLine(0);
CreateXEvent(&plane_builder, &line_builder, "op1", 0, 10000,
{{StatType::kTfOp, "a/b/c/d/e/Add:Add"},
{StatType::kKernelDetails, "blah"}});
CreateXEvent(&plane_builder, &line_builder, "op2", 10000, 2000,
{{StatType::kTfOp, "a/b/c/d/f/Sub:Sub"},
{StatType::kKernelDetails, "blah"}});
CreateXEvent(
&plane_builder, &line_builder, "op3", 20000, 30000,
{{StatType::kTfOp, "a/g/Mul:Mul"}, {StatType::kKernelDetails, "blah"}});
GenerateDerivedTimeLines(group_metadata_map, &space);
XPlaneVisitor plane_visitor = tsl::profiler::CreateTfXPlaneVisitor(plane);
EXPECT_EQ(plane_visitor.NumLines(), 3);
plane_visitor.ForEachLine([&](const XLineVisitor& line_visitor) {
int64_t line_id = line_visitor.Id();
if (line_id == 0) {
return;
} else if (line_id == kThreadIdTfNameScope) {
EXPECT_EQ(line_visitor.NumEvents(), 7);
std::map<absl::string_view, uint64_t> durations;
line_visitor.ForEachEvent([&](const XEventVisitor& event_visitor) {
durations[event_visitor.Name()] = event_visitor.DurationPs();
});
for (const auto& [name, duration] : durations) {
LOG(ERROR) << name << ": " << duration;
}
EXPECT_EQ(durations["a"], 50000);
EXPECT_EQ(durations["b"], 12000);
EXPECT_EQ(durations["c"], 11000);
EXPECT_EQ(durations["d"], 11000);
EXPECT_EQ(durations["e"], 10000);
EXPECT_EQ(durations["f"], 1000);
EXPECT_EQ(durations["g"], 30000);
}
});
}
}
TEST(DerivedTimelineTest, DeriveLinesForXlaCpuOps) {
XPlane xplane;
XPlaneBuilder plane_builder(&xplane);
plane_builder.SetName(tsl::profiler::kHostThreadsPlaneName);
absl::string_view main_line_name = "main";
auto line_builder = plane_builder.GetOrCreateLine(0);
line_builder.SetName(main_line_name);
CreateXEvent(&plane_builder, &line_builder, "op1", 0, 100,
{{StatType::kHloModule, "Module1"}});
CreateXEvent(&plane_builder, &line_builder, "op2", 200, 400,
{{StatType::kHloModule, "Module2"}});
DeriveLinesForXlaCpuOps(&xplane);
XPlaneVisitor plane_visitor = tsl::profiler::CreateTfXPlaneVisitor(&xplane);
EXPECT_EQ(plane_visitor.NumLines(), 2);
plane_visitor.ForEachLine([&](const XLineVisitor& line_visitor) {
if (line_visitor.Name() == main_line_name) return;
line_visitor.ForEachEvent([&](const XEventVisitor& event_visitor) {
if (event_visitor.Name() == "Module1") {
EXPECT_EQ(event_visitor.DurationPs(), 100);
EXPECT_EQ(event_visitor.OffsetPs(), 0);
} else if (event_visitor.Name() == "Module2") {
EXPECT_EQ(event_visitor.DurationPs(), 400);
EXPECT_EQ(event_visitor.OffsetPs(), 200);
} else {
FAIL() << "Found Event " << event_visitor.Name();
}
});
});
}
}
}
} |
1,455 | cpp | tensorflow/tensorflow | tfprof_stats | tensorflow/core/profiler/internal/tfprof_stats.cc | tensorflow/core/profiler/internal/tfprof_stats_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_INTERNAL_TFPROF_STATS_H_
#define TENSORFLOW_CORE_PROFILER_INTERNAL_TFPROF_STATS_H_
#include <map>
#include <memory>
#include <set>
#include <string>
#include "tensorflow/c/checkpoint_reader.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/profiler/internal/tfprof_code.h"
#include "tensorflow/core/profiler/internal/tfprof_graph.h"
#include "tensorflow/core/profiler/internal/tfprof_node.h"
#include "tensorflow/core/profiler/internal/tfprof_op.h"
#include "tensorflow/core/profiler/internal/tfprof_scope.h"
#include "tensorflow/core/profiler/internal/tfprof_show.h"
#include "tensorflow/core/profiler/internal/tfprof_utils.h"
#include "tensorflow/core/profiler/tfprof_log.pb.h"
#include "tensorflow/core/profiler/tfprof_options.h"
#include "tensorflow/core/profiler/tfprof_output.pb.h"
#include "tensorflow/core/protobuf/config.pb.h"
namespace tensorflow {
namespace tfprof {
class TFStats {
public:
TFStats(std::unique_ptr<GraphDef> graph,
std::unique_ptr<RunMetadata> run_meta,
std::unique_ptr<OpLogProto> op_log,
std::unique_ptr<checkpoint::CheckpointReader> ckpt_reader);
TFStats(const string& filename,
std::unique_ptr<checkpoint::CheckpointReader> ckpt_reader);
~TFStats() = default;
const std::map<string, std::unique_ptr<TFGraphNode>>& nodes() const {
return nodes_map_;
}
const std::set<int64_t>& steps() const { return steps_; }
bool has_code_traces() const { return has_code_traces_; }
double run_coverage() const {
return covered_nodes_.size() / (nodes_map_.size() + 1e-10);
}
void BuildView(const string& cmd);
void BuildAllViews();
const GraphNodeProto& ShowGraphNode(const string& cmd,
const Options& opts) const;
const MultiGraphNodeProto& ShowMultiGraphNode(const string& cmd,
const Options& opts) const;
void AddGraph(std::unique_ptr<GraphDef> graph);
void AddRunMeta(int64_t step, std::unique_ptr<RunMetadata> run_meta);
void AddOpLogProto(std::unique_ptr<OpLogProto> op_log);
void SerializeToString(string* content);
void WriteProfile(const string& filename);
void AddNodeForTest(int64_t step, std::unique_ptr<TFGraphNode> node);
private:
bool Validate(const Options& opts) const;
string MaybeReportMissingTrace() const;
std::set<int64_t> steps_;
bool has_code_traces_;
bool miss_accelerator_stream_;
std::unique_ptr<TFScope> scope_view_;
std::unique_ptr<TFGraph> graph_view_;
std::unique_ptr<TFCode> code_view_;
std::unique_ptr<TFOp> op_view_;
std::unique_ptr<checkpoint::CheckpointReader> ckpt_reader_;
std::map<string, std::unique_ptr<TFGraphNode>> nodes_map_;
GraphNodeProto empty_graph_node_;
MultiGraphNodeProto empty_multi_graph_node_;
std::map<int64_t, string> id_to_string_;
std::set<int64_t> covered_nodes_;
};
}
}
#endif
#include "tensorflow/core/profiler/internal/tfprof_stats.h"
#include <stdio.h>
#include <map>
#include <memory>
#include <utility>
#include <vector>
#include "absl/strings/numbers.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_split.h"
#include "tensorflow/core/platform/regexp.h"
#include "tensorflow/core/profiler/internal/tfprof_timeline.h"
namespace tensorflow {
namespace tfprof {
namespace {
const char* const kProfilePrefix = "Profile:\n";
bool CreateRunMetadataNode(const string& name, NodeDef* def) {
if (name == "RecvTensor" || name == "_SOURCE" ||
name.find("MEMCPY") != name.npos) {
return false;
}
def->set_name(name);
def->set_op(name);
return true;
}
}
TFStats::TFStats(std::unique_ptr<GraphDef> graph,
std::unique_ptr<RunMetadata> run_meta,
std::unique_ptr<OpLogProto> op_log,
std::unique_ptr<checkpoint::CheckpointReader> ckpt_reader)
: has_code_traces_(false),
miss_accelerator_stream_(false),
ckpt_reader_(std::move(ckpt_reader)) {
CHECK(graph) << "Must at least have GraphDef";
AddGraph(std::move(graph));
if (run_meta && run_meta->has_step_stats()) {
AddRunMeta(0, std::move(run_meta));
}
AddOpLogProto(std::move(op_log));
if (ckpt_reader_) {
for (const auto& v : ckpt_reader_->GetVariableToShapeMap()) {
auto node = nodes_map_.find(v.first);
if (node != nodes_map_.end()) {
node->second->AddOpType("_checkpoint_variables");
}
}
}
}
TFStats::TFStats(const string& filename,
std::unique_ptr<checkpoint::CheckpointReader> ckpt_reader)
: has_code_traces_(false),
miss_accelerator_stream_(false),
ckpt_reader_(std::move(ckpt_reader)) {
string str;
Status s = ReadFileToString(Env::Default(), filename, &str);
if (!s.ok()) {
absl::FPrintF(stderr, "Failed to read profile: %s", s.ToString());
return;
}
ProfileProto profile;
if (!profile.ParseFromString(str)) {
absl::FPrintF(stderr, "Failed to parse profile\n");
return;
}
for (const auto& entry : profile.id_to_string()) {
id_to_string_[entry.first] = entry.second;
}
for (const auto& node_pb : profile.nodes()) {
std::unique_ptr<TFGraphNode> node(
new TFGraphNode(node_pb.second, profile, &id_to_string_, &nodes_map_));
nodes_map_.insert(std::pair<string, std::unique_ptr<TFGraphNode>>(
node_pb.second.name(), std::move(node)));
}
has_code_traces_ = profile.has_trace();
for (int64_t s : profile.steps()) {
steps_.insert(s);
}
}
void TFStats::BuildView(const string& cmd) {
if (cmd == kCmds[0] && !scope_view_) {
scope_view_ = std::make_unique<TFScope>(ckpt_reader_.get());
for (auto it = nodes_map_.begin(); it != nodes_map_.end(); it++) {
scope_view_->AddNode(it->second.get());
}
scope_view_->Build();
}
if (cmd == kCmds[1] && !graph_view_) {
graph_view_ = std::make_unique<TFGraph>(ckpt_reader_.get());
for (auto it = nodes_map_.begin(); it != nodes_map_.end(); it++) {
graph_view_->AddNode(it->second.get());
}
graph_view_->Build();
}
if (cmd == kCmds[2] && !code_view_) {
code_view_ = std::make_unique<TFCode>();
for (auto it = nodes_map_.begin(); it != nodes_map_.end(); it++) {
code_view_->AddNode(it->second.get());
}
code_view_->Build();
}
if (cmd == kCmds[3] && !op_view_) {
op_view_ = std::make_unique<TFOp>();
for (auto it = nodes_map_.begin(); it != nodes_map_.end(); it++) {
op_view_->AddNode(it->second.get());
}
op_view_->Build();
}
}
void TFStats::BuildAllViews() {
std::vector<string> cmds_str(kCmds, kCmds + sizeof(kCmds) / sizeof(*kCmds));
for (const string& cmd : cmds_str) {
BuildView(cmd);
}
}
const GraphNodeProto& TFStats::ShowGraphNode(const string& cmd,
const Options& opts) const {
if (!Validate(opts)) {
return empty_graph_node_;
}
string prefix = MaybeReportMissingTrace();
prefix += QueryDoc(cmd, opts) + kProfilePrefix;
if (cmd == kCmds[0]) {
return scope_view_->Show(prefix, opts);
} else if (cmd == kCmds[1]) {
if (opts.step < 0 && opts.output_type == kOutput[0]) {
for (int64_t step : steps_) {
Options nopts = opts;
nopts.step = step;
graph_view_->Show(prefix, nopts);
}
return empty_graph_node_;
}
return graph_view_->Show(prefix, opts);
} else {
absl::FPrintF(stderr, "Unknown command: %s\n", cmd);
return empty_graph_node_;
}
}
const MultiGraphNodeProto& TFStats::ShowMultiGraphNode(
const string& cmd, const Options& opts) const {
if (!Validate(opts)) {
return empty_multi_graph_node_;
}
string prefix = MaybeReportMissingTrace();
prefix += QueryDoc(cmd, opts) + kProfilePrefix;
if (cmd == kCmds[2]) {
if (!has_code_traces()) {
absl::FPrintF(stderr, "No code trace information\n");
return empty_multi_graph_node_;
}
return code_view_->Show(prefix, opts);
} else if (cmd == kCmds[3]) {
return op_view_->Show(prefix, opts);
} else {
absl::FPrintF(stderr, "Unknown command: %s\n", cmd);
return empty_multi_graph_node_;
}
}
void TFStats::AddGraph(std::unique_ptr<GraphDef> graph) {
std::map<string, const NodeDef*> node_defs;
bool node_added = false;
for (const NodeDef& node : graph->node()) {
if (nodes_map_.find(node.name()) != nodes_map_.end()) {
continue;
}
node_added = true;
size_t num_nodes = nodes_map_.size();
nodes_map_[node.name()] =
std::make_unique<TFGraphNode>(&node, num_nodes, &nodes_map_);
node_defs[node.name()] = &node;
}
for (auto it = node_defs.begin(); it != node_defs.end(); it++) {
TFGraphNode* node = nodes_map_.at(it->first).get();
for (int i = 0; i < it->second->input_size(); ++i) {
string node_input = it->second->input(i);
int output_idx = 0;
auto prefix_pos = node_input.find(':');
if (prefix_pos != node_input.npos) {
std::vector<string> input_parts = absl::StrSplit(node_input, ':');
DCHECK(input_parts.size() == 2)
<< "Unknown NodeDef.input format: " << node_input;
node_input = input_parts[0];
DCHECK(absl::SimpleAtoi(input_parts[1], &output_idx))
<< "Failed to parse integer: " << output_idx;
}
if (node_input.substr(0, 1) == "^") {
node_input = node_input.substr(1);
}
node->AddInput(node_input, output_idx, i);
}
}
if (node_added) {
graph_view_.reset(nullptr);
scope_view_.reset(nullptr);
op_view_.reset(nullptr);
code_view_.reset(nullptr);
}
}
void TFStats::AddOpLogProto(std::unique_ptr<OpLogProto> op_log) {
if (!op_log) {
return;
}
for (const auto& entry : op_log->id_to_string()) {
if (id_to_string_.find(entry.first) == id_to_string_.end()) {
id_to_string_[entry.first] = entry.second;
}
}
for (const OpLogEntry& entry : op_log->log_entries()) {
auto node = nodes_map_.find(entry.name());
if (node == nodes_map_.end()) continue;
for (const string& type : entry.types()) {
node->second->AddOpType(type);
}
if (entry.float_ops()) {
node->second->AddFloatOps(entry.float_ops());
}
if (entry.has_code_def()) {
has_code_traces_ = true;
node->second->AddCode(entry.code_def(), &id_to_string_);
}
}
}
void TFStats::AddRunMeta(int64_t step, std::unique_ptr<RunMetadata> run_meta) {
if (!run_meta || !run_meta->has_step_stats()) {
absl::FPrintF(stderr, "Invalid RunMetadata for step %d\n", step);
return;
}
if (steps_.find(step) == steps_.end()) {
steps_.insert(step);
}
steps_.insert(step);
bool has_gpu_scheduling = false;
bool has_gpu_stream = false;
for (const auto& dev_stat : run_meta->step_stats().dev_stats()) {
string dev = absl::AsciiStrToLower(dev_stat.device());
if (IsPlacedOnAccelerator(dev)) {
has_gpu_scheduling = true;
if (CountAsAcceleratorTime(dev)) {
has_gpu_stream = true;
}
}
for (const NodeExecStats& node_stat : dev_stat.node_stats()) {
string name = node_stat.node_name();
auto split_pos = node_stat.node_name().find(':');
if (split_pos != node_stat.node_name().npos) {
name = node_stat.node_name().substr(0, split_pos);
}
auto node = nodes_map_.find(name);
if (node == nodes_map_.end()) {
NodeDef def;
if (CreateRunMetadataNode(name, &def)) {
size_t num_nodes = nodes_map_.size();
nodes_map_[name] =
std::make_unique<TFGraphNode>(&def, num_nodes, &nodes_map_);
nodes_map_.at(name)->AddStepStat(step, dev_stat.device(), node_stat);
}
} else {
covered_nodes_.insert(node->second->id());
node->second->AddStepStat(step, dev_stat.device(), node_stat);
}
}
}
if (has_gpu_scheduling && !has_gpu_stream) {
miss_accelerator_stream_ = true;
}
}
string TFStats::MaybeReportMissingTrace() const {
string report = "";
if (miss_accelerator_stream_) {
report +=
"\n\nFound accelerator operation but misses accelerator "
"stream stats!\n\n"
"It's likely a gpu tracing issue rather than tf-profiler issue.\n"
"If you found your operation missing accelerator time, "
"consider to post to [email protected]!\n\n";
}
return report;
}
void TFStats::SerializeToString(string* content) {
ProfileProto profile;
for (const auto& entry : id_to_string_) {
(*profile.mutable_id_to_string())[entry.first] = entry.second;
}
for (auto it = nodes_map_.begin(); it != nodes_map_.end(); it++) {
if (it->second->id() < 0) {
continue;
}
(*profile.mutable_nodes())[it->second->id()].MergeFrom(
it->second->ToProto(nodes_map_));
}
profile.set_has_trace(has_code_traces_);
profile.set_miss_accelerator_stream(miss_accelerator_stream_);
for (int64_t s : steps_) {
profile.add_steps(s);
}
*content = profile.SerializeAsString();
}
void TFStats::WriteProfile(const string& filename) {
string content;
SerializeToString(&content);
Status s = WriteStringToFile(Env::Default(), filename, content);
if (!s.ok()) {
absl::FPrintF(stderr, "%s\n", s.ToString());
}
}
bool TFStats::Validate(const Options& opts) const {
if (opts.step >= 0 && steps_.find(opts.step) == steps_.end()) {
absl::FPrintF(stderr,
"Options -step=%d not found.\nAvailable steps: ", opts.step);
for (int64_t s : steps_) {
absl::FPrintF(stderr, "%d ", s);
}
absl::FPrintF(stderr, "\n");
return false;
}
return true;
}
void TFStats::AddNodeForTest(int64_t step, std::unique_ptr<TFGraphNode> node) {
steps_.insert(step);
nodes_map_[node->name()] = std::move(node);
}
}
} | #include "tensorflow/core/profiler/internal/tfprof_stats.h"
#include <memory>
#include <utility>
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/internal/tfprof_constants.h"
#include "tensorflow/core/profiler/internal/tfprof_utils.h"
#include "tensorflow/core/profiler/tfprof_log.pb.h"
#include "tensorflow/core/profiler/tfprof_options.h"
#include "tensorflow/core/profiler/tfprof_output.pb.h"
namespace tensorflow {
namespace tfprof {
class TFProfStatsTest : public ::testing::Test {
protected:
TFProfStatsTest() {
string graph_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/graph.pbtxt");
std::unique_ptr<tensorflow::GraphDef> graph_pb(new tensorflow::GraphDef());
TF_CHECK_OK(
ReadProtoFile(Env::Default(), graph_path, graph_pb.get(), false));
std::unique_ptr<tensorflow::RunMetadata> run_meta_pb(
new tensorflow::RunMetadata());
string run_meta_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/run_meta");
TF_CHECK_OK(
ReadProtoFile(Env::Default(), run_meta_path, run_meta_pb.get(), true));
std::unique_ptr<OpLogProto> op_log_pb(new OpLogProto());
string op_log_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/tfprof_log");
TF_CHECK_OK(ReadBinaryProto(Env::Default(), op_log_path, op_log_pb.get()));
string ckpt_path = io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/ckpt");
TF_Status* status = TF_NewStatus();
std::unique_ptr<checkpoint::CheckpointReader> ckpt_reader(
new checkpoint::CheckpointReader(ckpt_path, status));
CHECK(TF_GetCode(status) == TF_OK);
TF_DeleteStatus(status);
tf_stats_ =
std::make_unique<TFStats>(std::move(graph_pb), std::move(run_meta_pb),
std::move(op_log_pb), std::move(ckpt_reader));
tf_stats_->BuildAllViews();
}
string TestToFromProto(const string& cmd, const Options& opts) {
string profile_file = io::JoinPath(testing::TmpDir(), "profile");
tf_stats_->WriteProfile(profile_file);
TFStats new_stats(profile_file, nullptr);
new_stats.BuildAllViews();
return new_stats.ShowGraphNode(cmd, opts).DebugString();
}
std::unique_ptr<TFStats> tf_stats_;
};
TEST_F(TFProfStatsTest, CustomOpType) {
Options opts(3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, "name",
{kTrainableVarType},
{".*"}, {""}, {".*"}, {""}, false,
{"params", "bytes", "micros", "float_ops"}, "", {});
const GraphNodeProto& root = tf_stats_->ShowGraphNode("scope", opts);
GraphNodeProto expected;
CHECK(protobuf::TextFormat::ParseFromString(
"name: \"_TFProfRoot\"\ntotal_exec_micros: 13\ntotal_requested_bytes: "
"2560\ntotal_parameters: 451\nchildren {\n name: \"DW\"\n exec_micros: "
"2\n requested_bytes: 1280\n parameters: 162\n total_exec_micros: 2\n "
" total_requested_bytes: 1280\n total_parameters: 162\n devices: "
"\"/job:localhost/replica:0/task:0/gpu:0\"\n cpu_exec_micros: 2\n "
"total_cpu_exec_micros: 2\n run_count: 1\n total_run_count: 1\n "
"total_definition_count: 1\n peak_bytes: 1280\n residual_bytes: 1280\n "
" output_bytes: 1280\n total_peak_bytes: 1280\n total_residual_bytes: "
"1280\n total_output_bytes: 1280\n}\nchildren {\n name: \"DW2\"\n "
"exec_micros: 11\n requested_bytes: 1280\n parameters: 288\n "
"total_exec_micros: 11\n total_requested_bytes: 1280\n "
"total_parameters: 288\n devices: "
"\"/job:localhost/replica:0/task:0/gpu:0\"\n cpu_exec_micros: 11\n "
"total_cpu_exec_micros: 11\n run_count: 1\n total_run_count: 1\n "
"total_definition_count: 1\n peak_bytes: 1280\n residual_bytes: 1280\n "
" output_bytes: 1280\n total_peak_bytes: 1280\n total_residual_bytes: "
"1280\n total_output_bytes: 1280\n}\nchildren {\n name: \"ScalarW\"\n "
"parameters: 1\n total_parameters: 1\n total_definition_count: "
"1\n}\ntotal_cpu_exec_micros: 13\ntotal_run_count: "
"2\ntotal_definition_count: 3\ntotal_peak_bytes: "
"2560\ntotal_residual_bytes: 2560\ntotal_output_bytes: 2560\n",
&expected));
EXPECT_EQ(expected.DebugString(), root.DebugString());
EXPECT_EQ(root.DebugString(), TestToFromProto("scope", opts));
}
TEST_F(TFProfStatsTest, CheckPointOpType) {
Options opts(3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, "name",
{kCkptVarType},
{".*"}, {""}, {".*"}, {""}, false,
{"params", "bytes", "micros", "float_ops"}, "", {});
const GraphNodeProto& root = tf_stats_->ShowGraphNode("scope", opts);
GraphNodeProto expected;
CHECK(protobuf::TextFormat::ParseFromString(
"name: \"_TFProfRoot\"\ntotal_exec_micros: 13\ntotal_requested_bytes: "
"2560\ntotal_parameters: 451\nchildren {\n name: \"DW\"\n exec_micros: "
"2\n requested_bytes: 1280\n parameters: 162\n total_exec_micros: 2\n "
" total_requested_bytes: 1280\n total_parameters: 162\n devices: "
"\"/job:localhost/replica:0/task:0/gpu:0\"\n cpu_exec_micros: 2\n "
"total_cpu_exec_micros: 2\n run_count: 1\n total_run_count: 1\n "
"total_definition_count: 1\n peak_bytes: 1280\n residual_bytes: 1280\n "
" output_bytes: 1280\n total_peak_bytes: 1280\n total_residual_bytes: "
"1280\n total_output_bytes: 1280\n}\nchildren {\n name: \"DW2\"\n "
"exec_micros: 11\n requested_bytes: 1280\n parameters: 288\n "
"total_exec_micros: 11\n total_requested_bytes: 1280\n "
"total_parameters: 288\n devices: "
"\"/job:localhost/replica:0/task:0/gpu:0\"\n cpu_exec_micros: 11\n "
"total_cpu_exec_micros: 11\n run_count: 1\n total_run_count: 1\n "
"total_definition_count: 1\n peak_bytes: 1280\n residual_bytes: 1280\n "
" output_bytes: 1280\n total_peak_bytes: 1280\n total_residual_bytes: "
"1280\n total_output_bytes: 1280\n}\nchildren {\n name: \"ScalarW\"\n "
"parameters: 1\n total_parameters: 1\n total_definition_count: "
"1\n}\ntotal_cpu_exec_micros: 13\ntotal_run_count: "
"2\ntotal_definition_count: 3\ntotal_peak_bytes: "
"2560\ntotal_residual_bytes: 2560\ntotal_output_bytes: 2560\n",
&expected));
EXPECT_EQ(expected.DebugString(), root.DebugString());
EXPECT_EQ(root.DebugString(), TestToFromProto("scope", opts));
}
TEST_F(TFProfStatsTest, TestGraph) {
Options opts(100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, "name", {".*"},
{"DW/Initializer/random_normal/mul"},
{""}, {".*"}, {""}, false,
{"params", "bytes", "micros", "float_ops"}, "", {});
const GraphNodeProto& root = tf_stats_->ShowGraphNode("graph", opts);
GraphNodeProto expected;
CHECK(protobuf::TextFormat::ParseFromString(
"name: \"_TFProfRoot\"\ntotal_exec_micros: 4945\ntotal_requested_bytes: "
"30464\ntotal_parameters: 451\nchildren {\n name: "
"\"DW/Initializer/random_normal/mul\"\n children {\n name: "
"\"DW/Initializer/random_normal/RandomStandardNormal\"\n children {\n "
" name: \"DW/Initializer/random_normal/shape\"\n "
"total_definition_count: 1\n }\n input_shapes {\n key: 0\n "
" value {\n dim {\n size: 4\n }\n }\n "
"}\n total_definition_count: 2\n }\n children {\n name: "
"\"DW/Initializer/random_normal/stddev\"\n total_definition_count: "
"1\n }\n input_shapes {\n key: 0\n value {\n dim {\n "
"size: 3\n }\n dim {\n size: 3\n }\n dim {\n "
" size: 3\n }\n dim {\n size: 6\n }\n }\n "
"}\n input_shapes {\n key: 1\n value {\n dim {\n "
"size: 1\n }\n }\n }\n total_definition_count: "
"4\n}\ntotal_float_ops: 10440\ntotal_accelerator_exec_micros: "
"404\ntotal_cpu_exec_micros: 4541\ntotal_run_count: "
"6\ntotal_definition_count: 32\ntotal_peak_bytes: "
"25856\ntotal_residual_bytes: 3840\ntotal_output_bytes: 4864\n",
&expected));
EXPECT_EQ(expected.DebugString(), root.DebugString());
EXPECT_EQ(root.DebugString(), TestToFromProto("graph", opts));
}
TEST_F(TFProfStatsTest, TestFloatOps) {
Options opts(10, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, -1, "name", {".*"}, {".*"},
{""}, {".*"}, {""}, false, {"float_ops"}, "", {});
const GraphNodeProto& root = tf_stats_->ShowGraphNode("scope", opts);
GraphNodeProto expected;
CHECK(protobuf::TextFormat::ParseFromString(
"name: \"_TFProfRoot\"\ntotal_exec_micros: 4945\ntotal_requested_bytes: "
"30464\ntotal_parameters: 451\nchildren {\n name: \"Conv2D\"\n "
"exec_micros: 4292\n requested_bytes: 18176\n total_exec_micros: "
"4292\n total_requested_bytes: 18176\n devices: "
"\"/job:localhost/replica:0/task:0/gpu:0\"\n float_ops: 5832\n "
"total_float_ops: 5832\n input_shapes {\n key: 0\n value {\n "
"dim {\n size: 2\n }\n dim {\n size: 6\n "
"}\n dim {\n size: 6\n }\n dim {\n size: "
"3\n }\n }\n }\n input_shapes {\n key: 1\n value {\n "
" dim {\n size: 3\n }\n dim {\n size: 3\n "
"}\n dim {\n size: 3\n }\n dim {\n size: "
"6\n }\n }\n }\n accelerator_exec_micros: 226\n "
"cpu_exec_micros: 4066\n total_accelerator_exec_micros: 226\n "
"total_cpu_exec_micros: 4066\n run_count: 1\n total_run_count: 1\n "
"total_definition_count: 1\n peak_bytes: 14592\n residual_bytes: 768\n "
" output_bytes: 768\n total_peak_bytes: 14592\n total_residual_bytes: "
"768\n total_output_bytes: 768\n}\nchildren {\n name: \"Conv2D_1\"\n "
"exec_micros: 597\n requested_bytes: 9728\n total_exec_micros: 597\n "
"total_requested_bytes: 9728\n devices: "
"\"/job:localhost/replica:0/task:0/gpu:0\"\n float_ops: 4608\n "
"total_float_ops: 4608\n input_shapes {\n key: 0\n value {\n "
"dim {\n size: 2\n }\n dim {\n size: 3\n "
"}\n dim {\n size: 3\n }\n dim {\n size: "
"6\n }\n }\n }\n input_shapes {\n key: 1\n value {\n "
" dim {\n size: 2\n }\n dim {\n size: 2\n "
"}\n dim {\n size: 6\n }\n dim {\n size: "
"12\n }\n }\n }\n accelerator_exec_micros: 178\n "
"cpu_exec_micros: 419\n total_accelerator_exec_micros: 178\n "
"total_cpu_exec_micros: 419\n run_count: 1\n total_run_count: 1\n "
"total_definition_count: 1\n peak_bytes: 8704\n residual_bytes: 512\n "
"output_bytes: 512\n total_peak_bytes: 8704\n total_residual_bytes: "
"512\n total_output_bytes: 512\n}\ntotal_float_ops: "
"10440\ntotal_accelerator_exec_micros: 404\ntotal_cpu_exec_micros: "
"4541\ntotal_run_count: 6\ntotal_definition_count: 35\ntotal_peak_bytes: "
"25856\ntotal_residual_bytes: 3840\ntotal_output_bytes: 4864\n",
&expected));
EXPECT_EQ(expected.DebugString(), root.DebugString());
EXPECT_EQ(root.DebugString(), TestToFromProto("scope", opts));
}
TEST_F(TFProfStatsTest, TestAccountShownNameOnly) {
Options opts(100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, "name", {".*"}, {".*"},
{""}, {"Conv2D_1"},
{""}, true,
{"params"}, "", {});
const GraphNodeProto& root = tf_stats_->ShowGraphNode("scope", opts);
GraphNodeProto expected;
CHECK(protobuf::TextFormat::ParseFromString(
"name: \"_TFProfRoot\"\ntotal_exec_micros: 597\ntotal_requested_bytes: "
"9728\nchildren {\n name: \"Conv2D_1\"\n exec_micros: 597\n "
"requested_bytes: 9728\n total_exec_micros: 597\n "
"total_requested_bytes: 9728\n devices: "
"\"/job:localhost/replica:0/task:0/gpu:0\"\n float_ops: 4608\n "
"total_float_ops: 4608\n input_shapes {\n key: 0\n value {\n "
"dim {\n size: 2\n }\n dim {\n size: 3\n "
"}\n dim {\n size: 3\n }\n dim {\n size: "
"6\n }\n }\n }\n input_shapes {\n key: 1\n value {\n "
" dim {\n size: 2\n }\n dim {\n size: 2\n "
"}\n dim {\n size: 6\n }\n dim {\n size: "
"12\n }\n }\n }\n accelerator_exec_micros: 178\n "
"cpu_exec_micros: 419\n total_accelerator_exec_micros: 178\n "
"total_cpu_exec_micros: 419\n run_count: 1\n total_run_count: 1\n "
"total_definition_count: 1\n peak_bytes: 8704\n residual_bytes: 512\n "
"output_bytes: 512\n total_peak_bytes: 8704\n total_residual_bytes: "
"512\n total_output_bytes: 512\n}\ntotal_float_ops: "
"4608\ntotal_accelerator_exec_micros: 178\ntotal_cpu_exec_micros: "
"419\ntotal_run_count: 1\ntotal_definition_count: 2\ntotal_peak_bytes: "
"8704\ntotal_residual_bytes: 512\ntotal_output_bytes: 512\n",
&expected));
EXPECT_EQ(expected.DebugString(), root.DebugString());
EXPECT_EQ(root.DebugString(), TestToFromProto("scope", opts));
}
TEST_F(TFProfStatsTest, TestShowTensorValue) {
Options opts(10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, "name", {".*"}, {".*"},
{""}, {"DW"}, {""}, false,
{"tensor_value"},
"", {});
const GraphNodeProto& root = tf_stats_->ShowGraphNode("scope", opts);
GraphNodeProto expected;
CHECK(protobuf::TextFormat::ParseFromString(
"name: \"_TFProfRoot\"\ntotal_exec_micros: 4945\ntotal_requested_bytes: "
"30464\ntotal_parameters: 451\nchildren {\n name: \"DW\"\n "
"exec_micros: 2\n requested_bytes: 1280\n parameters: 162\n "
"total_exec_micros: 2\n total_requested_bytes: 1280\n "
"total_parameters: 162\n devices: "
"\"/job:localhost/replica:0/task:0/gpu:0\"\n tensor_value {\n dtype: "
"DT_FLOAT\n value_double: -0.000534315\n value_double: "
"-0.00089602\n value_double: -0.000417239\n value_double: "
"0.00041444\n value_double: 0.000780691\n value_double: "
"-0.000559057\n value_double: -0.000234623\n value_double: "
"0.00013393\n value_double: -0.00187574\n value_double: "
"0.000785666\n value_double: 0.000673294\n value_double: "
"0.000653368\n value_double: 0.000924489\n value_double: "
"-0.000318373\n value_double: -0.000385202\n value_double: "
"-7.92661e-05\n value_double: 2.70287e-05\n value_double: "
"0.00152302\n value_double: 8.04435e-05\n value_double: "
"-0.00058102\n value_double: 0.000244291\n value_double: "
"-0.000438045\n value_double: -0.000110199\n value_double: "
"0.000731663\n value_double: -0.0012326\n value_double: "
"0.00064065\n value_double: -0.00135203\n value_double: "
"-6.42784e-05\n value_double: -0.0011857\n value_double: "
"-0.000487383\n value_double: 3.41493e-05\n value_double: "
"-0.00158447\n value_double: 0.00168448\n value_double: "
"0.00160946\n value_double: -0.000600483\n value_double: "
"0.000650259\n value_double: -0.00109938\n value_double: "
"-0.000842166\n value_double: -0.0022673\n value_double: "
"-0.00101941\n value_double: -0.0011169\n value_double: "
"-0.0013557\n value_double: -1.46354e-05\n value_double: "
"-1.05487e-05\n value_double: -0.00092014\n value_double: "
"0.00272874\n value_double: 5.13942e-05\n value_double: "
"-0.00223472\n value_double: -0.000250875\n value_double: "
"-0.00180747\n value_double: -0.00234714\n value_double: "
"-0.00113523\n value_double: -0.00112635\n value_double: "
"-0.000843118\n value_double: -6.84256e-05\n value_double: "
"0.000243336\n value_double: 0.00119151\n value_double: "
"0.00131022\n value_double: 0.000768038\n value_double: "
"-8.90095e-05\n value_double: -0.000626427\n value_double: "
"-7.0617e-05\n value_double: -0.0021988\n value_double: "
"-0.00221544\n value_double: -0.000393118\n value_double: "
"0.000159464\n value_double: -0.000874746\n value_double: "
"-0.00131239\n value_double: -0.00135747\n value_double: "
"-0.00179753\n value_double: -0.00101005\n value_double: "
"-0.000107518\n value_double: -0.000616882\n value_double: "
"-0.000360923\n value_double: -0.00026896\n value_double: "
"-0.000142548\n value_double: 0.000577227\n value_double: "
"0.000536027\n value_double: 0.00126907\n value_double: "
"-0.00122712\n value_double: -3.60499e-05\n value_double: "
"0.000151026\n value_double: 0.00107658\n value_double: "
"0.00116475\n value_double: -0.00145312\n value_double: "
"0.000233326\n value_double: -0.00020198\n value_double: "
"0.00179029\n value_double: 0.00150048\n value_double: "
"-0.000884775\n value_double: 0.000409188\n value_double: "
"2.97176e-05\n value_double: -0.000506118\n value_double: "
"-2.33992e-05\n value_double: -0.00037212\n value_double: "
"0.000862773\n value_double: 0.00174046\n value_double: "
"-0.000240207\n value_double: 0.000663976\n value_double: "
"-0.00134747\n value_double: 0.00115585\n value_double: "
"0.000555869\n value_double: 0.00176722\n value_double: "
"-0.000518409\n value_double: 0.00101051\n value_double: "
"0.000129399\n value_double: -0.000916389\n value_double: "
"-0.00137693\n value_double: -0.00152412\n value_double: "
"7.32515e-05\n value_double: -0.000190811\n value_double: "
"-0.000158692\n value_double: -5.7791e-05\n value_double: "
"0.000671785\n value_double: -0.00152924\n value_double: "
"0.00117314\n value_double: -0.000384202\n value_double: "
"0.00176709\n value_double: -0.000181703\n value_double: "
"-0.000460994\n value_double: 0.000643716\n value_double: "
"4.76719e-05\n value_double: -0.00101037\n value_double: "
"0.00159621\n value_double: 0.00186758\n value_double: "
"0.00100001\n value_double: -0.00121831\n value_double: "
"0.00132231\n value_double: 0.0013511\n value_double: 0.00106659\n "
" value_double: 0.00018091\n value_double: 0.00155925\n "
"value_double: 4.26087e-05\n value_double: 0.000243264\n "
"value_double: -0.0017202\n value_double: -0.000218897\n "
"value_double: 0.00118693\n value_double: 0.00258909\n "
"value_double: 0.000641913\n value_double: -0.0013211\n "
"value_double: -0.00171943\n value_double: 0.00089151\n "
"value_double: -0.00114969\n value_double: -0.000196331\n "
"value_double: 0.00109994\n value_double: 0.000302616\n "
"value_double: 0.000675812\n value_double: 0.00112222\n "
"value_double: 0.000516456\n value_double: 0.00133357\n "
"value_double: 0.000298491\n value_double: 0.00145934\n "
"value_double: -0.00159102\n value_double: -0.000819061\n "
"value_double: 0.000120583\n value_double: 0.0006108\n "
"value_double: 0.00124132\n value_double: 0.000764859\n "
"value_double: 0.000374641\n value_double: -0.00149603\n "
"value_double: -0.000317367\n value_double: -0.000417829\n }\n "
"cpu_exec_micros: 2\n total_cpu_exec_micros: 2\n run_count: 1\n "
"total_run_count: 1\n total_definition_count: 10\n peak_bytes: 1280\n "
"residual_bytes: 1280\n output_bytes: 1280\n total_peak_bytes: 1280\n "
"total_residual_bytes: 1280\n total_output_bytes: "
"1280\n}\ntotal_float_ops: 10440\ntotal_accelerator_exec_micros: "
"404\ntotal_cpu_exec_micros: 4541\ntotal_run_count: "
"6\ntotal_definition_count: 35\ntotal_peak_bytes: "
"25856\ntotal_residual_bytes: 3840\ntotal_output_bytes: 4864\n",
&expected));
EXPECT_EQ(expected.DebugString(), root.DebugString());
}
}
} |
1,456 | cpp | tensorflow/tensorflow | tfprof_timeline | tensorflow/core/profiler/internal/tfprof_timeline.cc | tensorflow/core/profiler/internal/tfprof_timeline_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_INTERNAL_TFPROF_TIMELINE_H_
#define TENSORFLOW_CORE_PROFILER_INTERNAL_TFPROF_TIMELINE_H_
#include <map>
#include <memory>
#include <set>
#include <vector>
#include "absl/strings/str_cat.h"
#include "json/json.h"
#include "tensorflow/core/profiler/internal/tfprof_node_show.h"
namespace tensorflow {
namespace tfprof {
typedef std::map<string, string> Event;
class ChromeTraceFormatter {
public:
ChromeTraceFormatter() = default;
Json::Value CreateEvent(const string& ph, const string& category,
const string& name, int64_t pid, int64_t tid,
int64_t ts);
void EmitPID(const string& name, int64_t pid);
void EmitRegion(int64_t ts, int64_t duration, int64_t pid, int64_t tid,
const string& category, const string& name, Json::Value args);
void EmitFlowStart(const string& name, int64_t ts, int64_t pid, int64_t tid,
int64_t flow_id);
void EmitFlowEnd(const string& name, int64_t ts, int64_t pid, int64_t tid,
int64_t flow_id);
void EmitCounter(const string& category, const string& name, int64_t pid,
int64_t ts, const string& device, int64_t bytes,
const std::map<int64_t, std::vector<string>>& tensor_mem);
string Format();
private:
std::vector<Json::Value> events_;
std::vector<Json::Value> metadata_;
};
class Process {
public:
Process(const string& device, int64_t pid) : device(device), pid(pid) {}
std::vector<std::map<int64_t, int64_t>> lanes;
string device;
int64_t pid;
};
class TimeNode {
public:
TimeNode(Process* process, GraphNode* node, int64_t start_micros,
int64_t exec_micros)
: process(process),
node(node),
start_micros(start_micros),
exec_micros(exec_micros),
tid(-1) {}
virtual ~TimeNode() = default;
const string& name() { return node->name(); }
Process* process;
GraphNode* node;
int64_t start_micros;
int64_t exec_micros;
int64_t tid;
std::vector<TimeNode*> next_tnodes;
};
class MemoryTracker {
public:
class Device {
public:
std::map<string, std::map<int64_t, int64_t>> tensor_allocs;
std::map<int64_t, int64_t> allocations;
std::map<int64_t, int64_t> tracked_allocations;
};
void TrackNode(int64_t step, const GraphNode* node);
const std::map<string, Device>& devices() const { return devices_; }
private:
std::map<string, Device> devices_;
};
class Timeline {
public:
Timeline(int64_t step, const string& outfile)
: step_(step), outfile_(outfile) {}
~Timeline() = default;
int64_t step() const { return step_; }
void SetStep(int64_t step) { step_ = step; }
void GenerateGraphTimeline(const std::vector<GraphNode*>& gnodes);
void GenerateScopeTimeline(const ScopeNode* node);
void GenerateCodeTimeline(const CodeNode* node);
private:
void TrackNode(const GraphNode* node) { mem_tracker_.TrackNode(step_, node); }
void OutputTimeline();
template <typename Node>
void EmitTreeNode(const Node* node, int64_t start_time, int64_t duration,
int64_t depth, std::set<int64_t>* visited_depth) {
if (visited_depth->find(depth) == visited_depth->end()) {
chrome_formatter_.EmitPID(absl::StrCat("Scope:", depth), depth);
visited_depth->insert(depth);
}
Json::Value args(Json::objectValue);
args["name"] = Json::Value(node->name());
args["op"] = Json::Value(node->name());
chrome_formatter_.EmitRegion(start_time, duration, depth, 0, "Op",
node->name(), args);
int64_t total_micros = 0;
int64_t c_start_time = start_time;
for (const Node* child : node->show_children) {
int64_t total_exec_micros = child->proto().total_exec_micros();
if (total_exec_micros <= 0) {
continue;
}
EmitTreeNode(child, c_start_time, total_exec_micros, depth + 1,
visited_depth);
c_start_time += total_exec_micros;
total_micros += total_exec_micros;
}
CHECK(total_micros <= duration) << node->name() << " parent:" << duration
<< " children:" << total_micros;
}
void AllocateTimeNodes(GraphNode* gnode);
void AllocateLanes();
int64_t AllocatePID();
int64_t step_;
const string outfile_;
int64_t next_pid_ = 0;
MemoryTracker mem_tracker_;
ChromeTraceFormatter chrome_formatter_;
std::map<string, int64_t> device_pids_;
std::map<string, std::unique_ptr<Process>> process_;
std::map<int64_t, std::map<int64_t, std::map<int64_t, TimeNode*>>>
alloc_nodes_;
std::map<string, std::map<int64_t, std::unique_ptr<TimeNode>>> tnodes_;
};
}
}
#endif
#include "tensorflow/core/profiler/internal/tfprof_timeline.h"
#include <algorithm>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/profiler/internal/tfprof_utils.h"
namespace tensorflow {
namespace tfprof {
namespace {
int kMaxDisplayedMemNode = 10;
std::string GetTimeDevName(const std::string& dev) {
if (dev.find("stream") != dev.npos) {
return absl::StrCat("Op execution threads: ", dev);
} else {
return absl::StrCat("Op scheduling threads: ", dev);
}
}
std::string GetMemoryLaneName(const std::string& dev) {
return absl::StrCat("mem usage on:", dev);
}
}
Json::Value ChromeTraceFormatter::CreateEvent(const string& ph,
const string& category,
const string& name, int64_t pid,
int64_t tid, int64_t ts) {
Json::Value event(Json::objectValue);
event["ph"] = Json::Value(ph);
event["cat"] = Json::Value(category);
event["name"] = Json::Value(name);
event["pid"] = Json::Int64(pid);
event["tid"] = Json::Int64(tid);
event["ts"] = Json::Int64(ts);
return event;
}
void ChromeTraceFormatter::EmitPID(const string& name, int64_t pid) {
Json::Value event(Json::objectValue);
event["name"] = Json::Value("process_name");
event["ph"] = Json::Value("M");
event["pid"] = Json::Int64(pid);
Json::Value args(Json::objectValue);
args["name"] = Json::Value(name);
event["args"] = args;
metadata_.push_back(event);
}
void ChromeTraceFormatter::EmitRegion(int64_t ts, int64_t duration, int64_t pid,
int64_t tid, const string& category,
const string& name, Json::Value args) {
Json::Value event = CreateEvent("X", category, name, pid, tid, ts);
event["dur"] = Json::Int64(duration);
event["args"] = std::move(args);
metadata_.push_back(event);
}
void ChromeTraceFormatter::EmitFlowStart(const string& name, int64_t ts,
int64_t pid, int64_t tid,
int64_t flow_id) {
Json::Value event = CreateEvent("s", "DataFlow", name, pid, tid, ts);
event["id"] = Json::Int64(flow_id);
events_.push_back(event);
}
void ChromeTraceFormatter::EmitFlowEnd(const string& name, int64_t ts,
int64_t pid, int64_t tid,
int64_t flow_id) {
Json::Value event = CreateEvent("t", "DataFlow", name, pid, tid, ts);
event["id"] = Json::Int64(flow_id);
events_.push_back(event);
}
void ChromeTraceFormatter::EmitCounter(
const string& category, const string& name, int64_t pid, int64_t ts,
const string& device, int64_t bytes,
const std::map<int64_t, std::vector<string>>& tensor_mem) {
Json::Value event = CreateEvent("C", category, "Allocated Bytes", pid, 0, ts);
Json::Value args(Json::objectValue);
args["Allocator Bytes in Use"] = Json::Int64(bytes);
event["args"] = args;
events_.push_back(event);
Json::Value event2 =
CreateEvent("C", category, "Top Allocations", pid + 1, 0, ts);
Json::Value args2(Json::objectValue);
for (int i = 1; i < kMaxDisplayedMemNode; ++i) {
args2[absl::StrFormat("Top Allocation %02d", i)] = Json::Value("N/A");
}
int count = 0;
for (auto it = tensor_mem.rbegin(); it != tensor_mem.rend(); ++it) {
for (const string& t : it->second) {
if (bytes < it->first || count >= kMaxDisplayedMemNode) {
break;
}
args2[absl::StrFormat("Top Allocation %02d", count)] =
Json::Value(absl::StrCat(it->first / 1000000.0, " MB from ", t));
++count;
bytes -= it->first;
}
}
args2[std::string("Not Displayed")] =
Json::Value(absl::StrFormat("%.2f MB", bytes / 1000000.0));
event2["args"] = args2;
events_.push_back(event2);
}
string ChromeTraceFormatter::Format() {
Json::Value trace;
trace["traceEvents"] = Json::Value(Json::arrayValue);
for (const Json::Value& v : metadata_) {
trace["traceEvents"].append(v);
}
for (const Json::Value& v : events_) {
trace["traceEvents"].append(v);
}
Json::FastWriter writer;
string trace_str = writer.write(trace);
if (trace_str.length() > 200 * 1024 * 1024) {
absl::FPrintF(stderr,
"Trace file is over 200MB. Chrome might not be able to "
"display it. Consider to use filters (e.g. -min_micros "
"> 1000 or -op_type .*gpu:0.* to reduce the size.\n");
}
return trace_str;
}
void MemoryTracker::TrackNode(int64_t step, const GraphNode* node) {
if (!node->Trackable(step)) {
return;
}
Device& dev = devices_[node->node->canonical_device()];
std::map<int64_t, int64_t> allocs;
for (const auto& alloc : node->node->allocations(step)) {
allocs[alloc.alloc_micros()] += alloc.alloc_bytes();
dev.tracked_allocations[alloc.alloc_micros()] += alloc.alloc_bytes();
}
dev.tracked_allocations[0] += node->node->accelerator_persistent_bytes();
allocs[0] += node->node->accelerator_persistent_bytes();
int64_t last = 0;
std::map<int64_t, int64_t>& aggregate_allocs =
dev.tensor_allocs[node->name()];
for (auto it = allocs.begin(); it != allocs.end(); ++it) {
last += it->second;
aggregate_allocs[it->first] = last;
}
for (const auto& bytes_in_use : node->node->allocator_bytes_in_use(step)) {
if (bytes_in_use.first <= 0) continue;
dev.allocations[bytes_in_use.first] = bytes_in_use.second;
}
}
void Timeline::AllocateTimeNodes(GraphNode* gnode) {
if (gnode->Trackable(step_)) {
TrackNode(gnode);
const TFGraphNode* node = gnode->node;
for (const auto& kernel_execs : node->op_execs(step_)) {
const string& device = kernel_execs.first;
if (process_.find(device) == process_.end()) {
int64_t pid = AllocatePID();
process_[device] = std::make_unique<Process>(device, pid);
chrome_formatter_.EmitPID(GetTimeDevName(device), pid);
}
Process* p = process_[device].get();
for (const auto& exec : kernel_execs.second) {
int64_t start_micros = exec.first;
int64_t exec_micros = exec.second;
if (tnodes_[device].find(start_micros) == tnodes_[device].end()) {
tnodes_[device][start_micros] =
std::make_unique<TimeNode>(p, gnode, start_micros, exec_micros);
}
}
}
}
for (GraphNode* n : gnode->show_children) {
AllocateTimeNodes(n);
}
}
void Timeline::GenerateGraphTimeline(const std::vector<GraphNode*>& gnodes) {
for (GraphNode* gnode : gnodes) {
AllocateTimeNodes(gnode);
}
for (auto& process : tnodes_) {
if (!IsCanonicalDevice(process.first)) continue;
for (auto& tn : process.second) {
TimeNode* tnode = tn.second.get();
for (GraphNode* inp : tnode->node->children) {
if (!inp->account || !inp->Trackable(step_)) {
continue;
}
for (const auto& execs : inp->node->cpu_execs(step_)) {
if (!IsCanonicalDevice(execs.first)) continue;
if (process.first == execs.first) {
continue;
}
for (const auto& exec : execs.second) {
int64_t start_micros = exec.first;
auto cprocess = tnodes_.find(execs.first);
if (cprocess == tnodes_.end()) continue;
auto ctn = cprocess->second.find(start_micros);
if (ctn == cprocess->second.end()) continue;
ctn->second->next_tnodes.push_back(tnode);
}
}
}
}
}
AllocateLanes();
absl::FPrintF(stdout, "generating trace file.\n");
int64_t flow_id = 1;
for (const auto& process : alloc_nodes_) {
for (const auto& lane : process.second) {
for (const auto& node : lane.second) {
TimeNode* tnode = node.second;
Json::Value args(Json::objectValue);
args["name"] = Json::Value(tnode->name());
chrome_formatter_.EmitRegion(node.first, tnode->exec_micros,
process.first, lane.first, "Op",
tnode->name(), args);
for (TimeNode* next_tnode : node.second->next_tnodes) {
chrome_formatter_.EmitFlowStart(
tnode->name() + "_flow", tnode->start_micros + tnode->exec_micros,
process.first, lane.first, flow_id);
chrome_formatter_.EmitFlowEnd(
tnode->name() + "_flow", next_tnode->start_micros,
next_tnode->process->pid, next_tnode->tid, flow_id);
flow_id += 1;
}
}
}
}
for (const auto& dev : mem_tracker_.devices()) {
if (IsPlacedOnCPU(dev.first)) {
continue;
}
int64_t pid = AllocatePID();
chrome_formatter_.EmitPID(GetMemoryLaneName(dev.first), pid);
int64_t pid2 = AllocatePID();
chrome_formatter_.EmitPID(GetMemoryLaneName(dev.first) + " allocations",
pid2);
const MemoryTracker::Device& device = dev.second;
int64_t max_bytes_in_use = 0;
int64_t cur_bytes_in_use = 0;
int64_t last_point = 0;
for (const auto& alloc : device.allocations) {
cur_bytes_in_use = alloc.second;
max_bytes_in_use = std::max(max_bytes_in_use, cur_bytes_in_use);
int64_t ts = alloc.first;
if (ts - last_point < 100) continue;
last_point = ts;
std::map<int64_t, std::vector<string>> tensor_mem;
for (const auto& tensor_alloc_it : dev.second.tensor_allocs) {
const auto& tensor_alloc = tensor_alloc_it.second;
auto it = tensor_alloc.lower_bound(ts);
if (it != tensor_alloc.begin()) {
--it;
}
if (it->second > 0) {
tensor_mem[it->second].push_back(tensor_alloc_it.first);
}
}
chrome_formatter_.EmitCounter("Memory", "Memory Series", pid, ts,
dev.first, cur_bytes_in_use, tensor_mem);
}
if (IsPlacedOnAccelerator(dev.first)) {
absl::FPrintF(stdout, "%s peak memory: %.2f MB\n", dev.first,
max_bytes_in_use / 1000000.0);
}
}
OutputTimeline();
}
void Timeline::GenerateScopeTimeline(const ScopeNode* node) {
std::set<int64_t> visited_depth;
EmitTreeNode(node, 0, node->proto().total_exec_micros(), 0, &visited_depth);
OutputTimeline();
}
void Timeline::GenerateCodeTimeline(const CodeNode* node) {
std::set<int64_t> visited_depth;
EmitTreeNode(node, 0, node->proto().total_exec_micros(), 0, &visited_depth);
OutputTimeline();
}
void Timeline::OutputTimeline() {
std::string outfile = absl::StrFormat("%s_%d", outfile_, step());
Status s =
WriteStringToFile(Env::Default(), outfile, chrome_formatter_.Format());
if (!s.ok()) {
absl::FPrintF(stderr, "Failed to write timeline file: %s\nError: %s\n",
outfile, s.ToString());
return;
}
absl::FPrintF(stdout,
"\n******************************************************\n");
absl::FPrintF(stdout,
"Timeline file is written to %s.\n"
"Open a Chrome browser, enter URL chrome:
"load the timeline file.",
outfile);
absl::FPrintF(stdout,
"\n******************************************************\n");
fflush(stdout);
}
void Timeline::AllocateLanes() {
for (auto& process : tnodes_) {
Process* p = process_[process.first].get();
for (auto& tnode : process.second) {
int64_t start_time = tnode.second->start_micros;
int64_t end_time = tnode.second->start_micros + tnode.second->exec_micros;
int64_t l = -1;
for (int64_t i = 0, end = p->lanes.size(); i < end; ++i) {
const auto& lane = p->lanes[i];
l = i;
for (auto cur_it = lane.rbegin(); cur_it != lane.rend(); ++cur_it) {
if (cur_it->second > start_time) {
l = -1;
break;
}
if (start_time > cur_it->second) {
break;
}
}
if (l >= 0) {
break;
}
}
if (l < 0) {
l = p->lanes.size();
std::map<int64_t, int64_t> nlane;
nlane[start_time] = end_time;
p->lanes.push_back(nlane);
} else {
p->lanes[l][start_time] = end_time;
}
tnode.second->tid = l;
alloc_nodes_[p->pid][l][start_time] = tnode.second.get();
}
}
}
int64_t Timeline::AllocatePID() {
int64_t cur_pid = next_pid_;
next_pid_ += 1;
return cur_pid;
}
}
} | #include <memory>
#include <utility>
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/internal/tfprof_constants.h"
#include "tensorflow/core/profiler/internal/tfprof_stats.h"
#include "tensorflow/core/profiler/internal/tfprof_utils.h"
#include "tensorflow/core/profiler/tfprof_log.pb.h"
#include "tensorflow/core/profiler/tfprof_options.h"
#include "tensorflow/core/profiler/tfprof_output.pb.h"
namespace tensorflow {
namespace tfprof {
class TFProfTimelineTest : public ::testing::Test {
protected:
TFProfTimelineTest() {
string graph_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/graph.pbtxt");
std::unique_ptr<tensorflow::GraphDef> graph_pb(new tensorflow::GraphDef());
TF_CHECK_OK(
ReadProtoFile(Env::Default(), graph_path, graph_pb.get(), false));
std::unique_ptr<tensorflow::RunMetadata> run_meta_pb(
new tensorflow::RunMetadata());
string run_meta_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/run_meta");
TF_CHECK_OK(
ReadProtoFile(Env::Default(), run_meta_path, run_meta_pb.get(), true));
tf_stats_ = std::make_unique<TFStats>(
std::move(graph_pb), std::move(run_meta_pb), nullptr, nullptr);
tf_stats_->BuildAllViews();
}
std::unique_ptr<TFStats> tf_stats_;
};
TEST_F(TFProfTimelineTest, GraphView) {
string dump_file = io::JoinPath(testing::TmpDir(), "dump");
Options opts(10000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "name",
{".*"},
{".*"}, {""}, {".*"}, {""}, false,
{"params", "bytes", "micros", "float_ops"}, "timeline",
{{"outfile", dump_file}});
tf_stats_->ShowGraphNode("graph", opts);
string dump_str;
TF_CHECK_OK(ReadFileToString(Env::Default(), dump_file + "_0", &dump_str));
EXPECT_EQ(16556121177519539380ull, Hash64(dump_str));
}
TEST_F(TFProfTimelineTest, ScopeView) {
string dump_file = io::JoinPath(testing::TmpDir(), "dump");
Options opts(5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "name",
{".*"},
{".*"}, {""}, {".*"}, {""}, false,
{"params", "bytes", "micros", "float_ops"}, "timeline",
{{"outfile", dump_file}});
tf_stats_->ShowGraphNode("scope", opts);
string dump_str;
TF_CHECK_OK(ReadFileToString(Env::Default(), dump_file + "_0", &dump_str));
EXPECT_EQ(17545174915963890413ull, Hash64(dump_str));
}
}
} |
1,457 | cpp | tensorflow/tensorflow | tfprof_show | tensorflow/core/profiler/internal/tfprof_show.cc | tensorflow/core/profiler/internal/tfprof_show_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_INTERNAL_TFPROF_SHOW_H_
#define TENSORFLOW_CORE_PROFILER_INTERNAL_TFPROF_SHOW_H_
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/c/checkpoint_reader.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/profiler/internal/tfprof_constants.h"
#include "tensorflow/core/profiler/internal/tfprof_node.h"
#include "tensorflow/core/profiler/internal/tfprof_node_show.h"
#include "tensorflow/core/profiler/internal/tfprof_tensor.h"
#include "tensorflow/core/profiler/internal/tfprof_timeline.h"
#include "tensorflow/core/profiler/internal/tfprof_utils.h"
#include "tensorflow/core/profiler/tfprof_options.h"
#include "tensorflow/core/profiler/tfprof_output.pb.h"
namespace tensorflow {
namespace tfprof {
class TFShow {
public:
explicit TFShow(checkpoint::CheckpointReader* ckpt_reader)
: ckpt_reader_(ckpt_reader) {}
virtual ~TFShow() = default;
virtual void AddNode(TFGraphNode* node) = 0;
virtual void Build() = 0;
virtual const GraphNodeProto& Show(const string& prefix,
const Options& opts) final;
protected:
virtual const ShowNode* ShowInternal(const Options& opts,
Timeline* timeline) = 0;
bool LookUpCheckPoint(const string& name,
std::unique_ptr<TFProfTensor>* tensor);
virtual bool ShouldShowIfExtra(const ShowNode* node, const Options& opts,
int depth) const {
return true;
}
bool ShouldShow(const ShowNode* node, const Options& opts, int depth) const;
bool ShouldTrim(const ShowNode* node,
const std::vector<string>& regexes) const;
bool ReAccount(ShowNode* node, const Options& opts);
string FormatNode(ShowNode* node, const Options& opts) const;
string FormatNodeMemory(ShowNode* node, int64_t bytes,
int64_t total_bytes) const;
string FormatLegend(const Options& opts) const;
template <typename T>
std::vector<T*> SortNodes(const std::vector<T*>& nodes, const Options& opts) {
if (opts.order_by.empty() || nodes.empty()) {
return nodes;
}
std::vector<T*> sorted_nodes = nodes;
std::stable_sort(sorted_nodes.begin(), sorted_nodes.end(),
[&opts](const T* n1, const T* n2) {
if (n1->name() == kTFProfRoot) return true;
if (n2->name() == kTFProfRoot) return false;
bool name_cmp = n1->name() < n2->name();
if (opts.order_by == kOrderBy[0]) {
return name_cmp;
} else if (opts.order_by == kOrderBy[1]) {
return n1->proto().total_requested_bytes() >
n2->proto().total_requested_bytes();
} else if (opts.order_by == kOrderBy[2]) {
return n1->proto().total_peak_bytes() >
n2->proto().total_peak_bytes();
} else if (opts.order_by == kOrderBy[3]) {
return n1->proto().total_residual_bytes() >
n2->proto().total_residual_bytes();
} else if (opts.order_by == kOrderBy[4]) {
return n1->proto().total_output_bytes() >
n2->proto().total_output_bytes();
} else if (opts.order_by == kOrderBy[5]) {
return n1->proto().total_exec_micros() >
n2->proto().total_exec_micros();
} else if (opts.order_by == kOrderBy[6]) {
return n1->proto().total_accelerator_exec_micros() >
n2->proto().total_accelerator_exec_micros();
} else if (opts.order_by == kOrderBy[7]) {
return n1->proto().total_cpu_exec_micros() >
n2->proto().total_cpu_exec_micros();
} else if (opts.order_by == kOrderBy[8]) {
return n1->proto().total_parameters() >
n2->proto().total_parameters();
} else if (opts.order_by == kOrderBy[9]) {
return n1->proto().total_float_ops() >
n2->proto().total_float_ops();
}
return name_cmp;
});
return sorted_nodes;
}
checkpoint::CheckpointReader* ckpt_reader_;
};
template <typename T>
string FormatTotalExecTime(const T* node, const Options& opts) {
string time = FormatTime(node->proto().total_exec_micros());
if (node->account) {
time = FormatTime(node->proto().exec_micros()) + "/" + time;
} else {
time = "--/" + time;
}
return time;
}
template <typename T>
string FormatCPUExecTime(const T* node, const Options& opts) {
string time = FormatTime(node->proto().total_cpu_exec_micros());
if (node->account) {
time = FormatTime(node->proto().cpu_exec_micros()) + "/" + time;
} else {
time = "--/" + time;
}
return time;
}
template <typename T>
string FormatAcceleratorExecTime(const T* node, const Options& opts) {
string time = FormatTime(node->proto().total_accelerator_exec_micros());
if (node->account) {
time = FormatTime(node->proto().accelerator_exec_micros()) + "/" + time;
} else {
time = "--/" + time;
}
return time;
}
}
}
#endif
#include "tensorflow/core/profiler/internal/tfprof_show.h"
#include <memory>
#include <set>
#include <utility>
#include <vector>
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/regexp.h"
namespace tensorflow {
namespace tfprof {
const GraphNodeProto& TFShow::Show(const string& prefix, const Options& opts) {
if (opts.output_type == kOutput[0]) {
Timeline timeline(opts.step, opts.output_options.at(kTimelineOpts[0]));
return ShowInternal(opts, &timeline)->proto();
} else {
const ShowNode* ret = ShowInternal(opts, nullptr);
if (opts.output_type == kOutput[1]) {
absl::PrintF("%s", (prefix + ret->formatted_str));
fflush(stdout);
} else if (opts.output_type == kOutput[2]) {
Status s = WriteStringToFile(Env::Default(),
opts.output_options.at(kFileOpts[0]),
prefix + ret->formatted_str);
if (!s.ok()) {
absl::FPrintF(stderr, "%s\n", s.ToString());
}
} else if (opts.output_type == kOutput[3] ||
opts.output_type == kOutput[4]) {
} else {
absl::FPrintF(stderr, "Unknown output type: %s\n", opts.output_type);
}
return ret->proto();
}
}
bool TFShow::LookUpCheckPoint(const string& name,
std::unique_ptr<TFProfTensor>* tensor) {
if (name == kTFProfRoot || !ckpt_reader_ || !tensor) {
return false;
}
std::unique_ptr<Tensor> out_tensor;
TF_Status* status = TF_NewStatus();
ckpt_reader_->GetTensor(name, &out_tensor, status);
if (TF_GetCode(status) != TF_OK) {
absl::FPrintF(stderr, "%s\n", TF_Message(status));
TF_DeleteStatus(status);
return false;
}
*tensor = std::make_unique<TFProfTensor>(std::move(out_tensor));
TF_DeleteStatus(status);
return true;
}
bool TFShow::ShouldShow(const ShowNode* node, const Options& opts,
int depth) const {
if (node->name() == kTFProfRoot) return true;
if (node->proto().total_requested_bytes() < opts.min_bytes ||
node->proto().total_peak_bytes() < opts.min_peak_bytes ||
node->proto().total_residual_bytes() < opts.min_residual_bytes ||
node->proto().total_output_bytes() < opts.min_output_bytes ||
node->proto().total_exec_micros() < opts.min_micros ||
node->proto().total_accelerator_exec_micros() <
opts.min_accelerator_micros ||
node->proto().total_cpu_exec_micros() < opts.min_cpu_micros ||
node->proto().parameters() < opts.min_params ||
node->proto().float_ops() < opts.min_float_ops ||
node->proto().run_count() < opts.min_occurrence ||
depth > opts.max_depth || !ShouldShowIfExtra(node, opts, depth)) {
return false;
}
bool show = false;
if (opts.show_name_regexes.size() == 1 && opts.show_name_regexes[0] == ".*") {
show = true;
} else {
for (const string& regex : opts.show_name_regexes) {
if (RE2::FullMatch(node->name(), regex)) {
show = true;
break;
}
}
}
if (!show) return false;
for (const string& regex : opts.hide_name_regexes) {
if (RE2::FullMatch(node->name(), regex)) return false;
}
return true;
}
bool TFShow::ShouldTrim(const ShowNode* node,
const std::vector<string>& regexes) const {
for (const string& regex : regexes) {
if (RE2::FullMatch(node->name(), regex)) {
return true;
}
}
return false;
}
bool TFShow::ReAccount(ShowNode* node, const Options& opts) {
node->ReInit(opts.step);
if (opts.account_type_regexes.size() == 1 &&
opts.account_type_regexes[0] == ".*") {
return true;
}
for (const string& regex : opts.account_type_regexes) {
for (const string& type : node->node->op_types()) {
if (RE2::FullMatch(type, regex)) {
return true;
}
}
}
return false;
}
string TFShow::FormatNodeMemory(ShowNode* node, int64_t bytes,
int64_t total_bytes) const {
string memory = FormatMemory(total_bytes);
if (node->account) {
memory = FormatMemory(bytes) + "/" + memory;
} else {
memory = "--/" + memory;
}
return memory;
}
string TFShow::FormatNode(ShowNode* node, const Options& opts) const {
std::vector<string> info;
if (opts.select.find(kShown[2]) != opts.select.end()) {
const string shape = FormatShapes(node->node->shape());
if (!shape.empty()) {
info.push_back(shape);
}
string params = FormatNumber(node->proto().total_parameters()) + " params";
if (node->account) {
params = FormatNumber(node->proto().parameters()) + "/" + params;
} else {
params = "--/" + params;
}
info.push_back(params);
}
if (opts.select.find(kShown[3]) != opts.select.end()) {
string fops = FormatNumber(node->proto().total_float_ops()) + " flops";
if (node->account) {
fops = FormatNumber(node->proto().float_ops()) + "/" + fops;
} else {
fops = "--/" + fops;
}
info.push_back(fops);
}
if (opts.select.find(kShown[0]) != opts.select.end()) {
info.push_back(FormatNodeMemory(node, node->proto().requested_bytes(),
node->proto().total_requested_bytes()));
}
if (opts.select.find(kShown[11]) != opts.select.end()) {
info.push_back(FormatNodeMemory(node, node->proto().peak_bytes(),
node->proto().total_peak_bytes()));
}
if (opts.select.find(kShown[12]) != opts.select.end()) {
info.push_back(FormatNodeMemory(node, node->proto().residual_bytes(),
node->proto().total_residual_bytes()));
}
if (opts.select.find(kShown[13]) != opts.select.end()) {
info.push_back(FormatNodeMemory(node, node->proto().output_bytes(),
node->proto().total_output_bytes()));
}
if (opts.select.find(kShown[1]) != opts.select.end()) {
info.push_back(FormatTotalExecTime(node, opts));
info.push_back(FormatAcceleratorExecTime(node, opts));
info.push_back(FormatCPUExecTime(node, opts));
}
if (opts.select.find(kShown[9]) != opts.select.end() &&
opts.select.find(kShown[1]) == opts.select.end()) {
info.push_back(FormatAcceleratorExecTime(node, opts));
}
if (opts.select.find(kShown[10]) != opts.select.end() &&
opts.select.find(kShown[1]) == opts.select.end()) {
info.push_back(FormatCPUExecTime(node, opts));
}
if (opts.select.find(kShown[5]) != opts.select.end()) {
if (node->proto().devices_size() > 0) {
info.push_back(absl::StrJoin(node->proto().devices(), "|"));
}
}
if (opts.select.find(kShown[6]) != opts.select.end()) {
const std::set<string>& op_types = node->node->op_types();
info.push_back(absl::StrJoin(op_types, "|"));
}
if (opts.select.find(kShown[7]) != opts.select.end()) {
string run = FormatNumber(node->proto().total_run_count());
if (node->account) {
run = FormatNumber(node->proto().run_count()) + "/" + run;
} else {
run = "--/" + run;
}
string definition = FormatNumber(node->proto().total_definition_count());
if (node->account) {
definition = "1/" + definition;
} else {
definition = "--/" + definition;
}
info.push_back(run + "|" + definition);
}
if (opts.select.find(kShown[8]) != opts.select.end()) {
std::vector<string> shape_vec;
for (const auto& s : node->node->input_shapes()) {
if (s.second.empty()) {
shape_vec.push_back(absl::StrFormat("%d:unknown", s.first));
} else {
shape_vec.push_back(
absl::StrFormat("%d:%s", s.first, absl::StrJoin(s.second, "x")));
}
}
info.push_back(absl::StrJoin(shape_vec, "|"));
}
return absl::StrFormat("%s (%s)", node->name(), absl::StrJoin(info, ", "));
}
string TFShow::FormatLegend(const Options& opts) const {
std::vector<string> legends;
if (opts.select.find(kShown[2]) != opts.select.end()) {
legends.push_back("# parameters");
}
if (opts.select.find(kShown[3]) != opts.select.end()) {
legends.push_back("# float_ops");
}
if (opts.select.find(kShown[0]) != opts.select.end()) {
legends.push_back("requested bytes");
}
if (opts.select.find(kShown[11]) != opts.select.end()) {
legends.push_back("peak bytes");
}
if (opts.select.find(kShown[12]) != opts.select.end()) {
legends.push_back("residual bytes");
}
if (opts.select.find(kShown[13]) != opts.select.end()) {
legends.push_back("output bytes");
}
if (opts.select.find(kShown[1]) != opts.select.end()) {
legends.push_back("total execution time");
legends.push_back("accelerator execution time");
legends.push_back("cpu execution time");
}
if (opts.select.find(kShown[9]) != opts.select.end() &&
opts.select.find(kShown[1]) == opts.select.end()) {
legends.push_back("accelerator execution time");
}
if (opts.select.find(kShown[10]) != opts.select.end() &&
opts.select.find(kShown[1]) == opts.select.end()) {
legends.push_back("cpu execution time");
}
if (opts.select.find(kShown[5]) != opts.select.end()) {
legends.push_back("assigned devices");
}
if (opts.select.find(kShown[6]) != opts.select.end()) {
legends.push_back("op types");
}
if (opts.select.find(kShown[7]) != opts.select.end()) {
legends.push_back("op count (run|defined)");
}
if (opts.select.find(kShown[8]) != opts.select.end()) {
legends.push_back("input shapes");
}
return absl::StrFormat("node name | %s\n", absl::StrJoin(legends, " | "));
}
}
} | #include <memory>
#include <utility>
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/internal/tfprof_constants.h"
#include "tensorflow/core/profiler/internal/tfprof_stats.h"
#include "tensorflow/core/profiler/internal/tfprof_utils.h"
#include "tensorflow/core/profiler/tfprof_log.pb.h"
#include "tensorflow/core/profiler/tfprof_options.h"
#include "tensorflow/core/profiler/tfprof_output.pb.h"
namespace tensorflow {
namespace tfprof {
string CheckAndRemoveDoc(const string& doc) {
auto pos = doc.find("Profile:");
CHECK(pos != doc.npos);
return doc.substr(pos + 9);
}
class TFProfShowTest : public ::testing::Test {
protected:
TFProfShowTest() {
string graph_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/graph.pbtxt");
std::unique_ptr<tensorflow::GraphDef> graph_pb(new tensorflow::GraphDef());
TF_CHECK_OK(
ReadProtoFile(Env::Default(), graph_path, graph_pb.get(), false));
std::unique_ptr<tensorflow::RunMetadata> run_meta_pb(
new tensorflow::RunMetadata());
string run_meta_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/run_meta");
TF_CHECK_OK(
ReadProtoFile(Env::Default(), run_meta_path, run_meta_pb.get(), true));
std::unique_ptr<OpLogProto> op_log_pb(new OpLogProto());
string op_log_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/tfprof_log");
TF_CHECK_OK(ReadBinaryProto(Env::Default(), op_log_path, op_log_pb.get()));
string ckpt_path = io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/ckpt");
TF_Status* status = TF_NewStatus();
std::unique_ptr<checkpoint::CheckpointReader> ckpt_reader(
new checkpoint::CheckpointReader(ckpt_path, status));
CHECK(TF_GetCode(status) == TF_OK);
TF_DeleteStatus(status);
tf_stats_ =
std::make_unique<TFStats>(std::move(graph_pb), std::move(run_meta_pb),
std::move(op_log_pb), std::move(ckpt_reader));
tf_stats_->BuildAllViews();
}
string TestToFromProto(const string& cmd, const Options& opts,
bool show_multi_node = false) {
string profile_file = io::JoinPath(testing::TmpDir(), "profile");
tf_stats_->WriteProfile(profile_file);
TFStats new_stats(profile_file, nullptr);
new_stats.BuildAllViews();
if (show_multi_node) {
new_stats.ShowMultiGraphNode(cmd, opts);
} else {
new_stats.ShowGraphNode(cmd, opts);
}
string dump_str;
TF_CHECK_OK(ReadFileToString(Env::Default(),
opts.output_options.at("outfile"), &dump_str));
return dump_str;
}
std::unique_ptr<TFStats> tf_stats_;
};
TEST_F(TFProfShowTest, DumpScopeMode) {
string dump_file = io::JoinPath(testing::TmpDir(), "dump");
Options opts(
5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, "name",
{"VariableV2"},
{".*"}, {""}, {".*"}, {""}, false,
{"params", "bytes", "peak_bytes", "residual_bytes", "output_bytes",
"micros", "accelerator_micros", "cpu_micros", "float_ops"},
"file", {{"outfile", dump_file}});
tf_stats_->ShowGraphNode("scope", opts);
string dump_str;
TF_CHECK_OK(ReadFileToString(Env::Default(), dump_file, &dump_str));
EXPECT_EQ(
"node name | # parameters | # float_ops | requested bytes | peak bytes | "
"residual bytes | output bytes | total execution time | accelerator "
"execution time | cpu execution time\n_TFProfRoot (--/451 params, --/0 "
"flops, --/2.56KB, --/2.56KB, --/2.56KB, --/2.56KB, --/13us, --/0us, "
"--/13us)\n DW (3x3x3x6, 162/162 params, 0/0 flops, 1.28KB/1.28KB, "
"1.28KB/1.28KB, 1.28KB/1.28KB, 1.28KB/1.28KB, 2us/2us, 0us/0us, "
"2us/2us)\n DW2 (2x2x6x12, 288/288 params, 0/0 flops, 1.28KB/1.28KB, "
"1.28KB/1.28KB, 1.28KB/1.28KB, 1.28KB/1.28KB, 11us/11us, 0us/0us, "
"11us/11us)\n ScalarW (1, 1/1 params, 0/0 flops, 0B/0B, 0B/0B, 0B/0B, "
"0B/0B, 0us/0us, 0us/0us, 0us/0us)\n",
CheckAndRemoveDoc(dump_str));
EXPECT_EQ(dump_str, TestToFromProto("scope", opts));
}
TEST_F(TFProfShowTest, DumpAcceleratorAndCPUMicros) {
string dump_file = io::JoinPath(testing::TmpDir(), "dump");
Options opts(5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, "cpu_micros",
{".*"},
{".*"}, {""}, {".*"}, {""}, false,
{"accelerator_micros", "cpu_micros"}, "file",
{{"outfile", dump_file}});
tf_stats_->ShowGraphNode("scope", opts);
string dump_str;
TF_CHECK_OK(ReadFileToString(Env::Default(), dump_file, &dump_str));
EXPECT_EQ(
"node name | accelerator execution time | cpu execution "
"time\n_TFProfRoot (--/404us, --/4.54ms)\n Conv2D (226us/226us, "
"4.07ms/4.07ms)\n Conv2D_1 (178us/178us, 419us/419us)\n "
"_retval_Conv2D_1_0_0 (0us/0us, 41us/41us)\n DW2 (0us/0us, 11us/11us)\n "
" DW2/Assign (0us/0us, 0us/0us)\n DW2/Initializer (0us/0us, "
"0us/0us)\n DW2/Initializer/random_normal (0us/0us, 0us/0us)\n "
" DW2/Initializer/random_normal/RandomStandardNormal (0us/0us, "
"0us/0us)\n DW2/Initializer/random_normal/mean (0us/0us, "
"0us/0us)\n DW2/Initializer/random_normal/mul (0us/0us, "
"0us/0us)\n DW2/Initializer/random_normal/shape (0us/0us, "
"0us/0us)\n DW2/Initializer/random_normal/stddev (0us/0us, "
"0us/0us)\n DW2/read (0us/0us, 0us/0us)\n DW (0us/0us, 2us/2us)\n "
"DW/Assign (0us/0us, 0us/0us)\n DW/Initializer (0us/0us, 0us/0us)\n "
" DW/Initializer/random_normal (0us/0us, 0us/0us)\n "
"DW/Initializer/random_normal/RandomStandardNormal (0us/0us, 0us/0us)\n "
" DW/Initializer/random_normal/mean (0us/0us, 0us/0us)\n "
"DW/Initializer/random_normal/mul (0us/0us, 0us/0us)\n "
"DW/Initializer/random_normal/shape (0us/0us, 0us/0us)\n "
"DW/Initializer/random_normal/stddev (0us/0us, 0us/0us)\n DW/read "
"(0us/0us, 0us/0us)\n zeros (0us/0us, 2us/2us)\n ScalarW (0us/0us, "
"0us/0us)\n ScalarW/Assign (0us/0us, 0us/0us)\n "
"ScalarW/Initializer (0us/0us, 0us/0us)\n "
"ScalarW/Initializer/random_normal (0us/0us, 0us/0us)\n "
"ScalarW/Initializer/random_normal/RandomStandardNormal (0us/0us, "
"0us/0us)\n ScalarW/Initializer/random_normal/mean (0us/0us, "
"0us/0us)\n ScalarW/Initializer/random_normal/mul (0us/0us, "
"0us/0us)\n ScalarW/Initializer/random_normal/shape (0us/0us, "
"0us/0us)\n ScalarW/Initializer/random_normal/stddev (0us/0us, "
"0us/0us)\n ScalarW/read (0us/0us, 0us/0us)\n init (0us/0us, "
"0us/0us)\n",
CheckAndRemoveDoc(dump_str));
EXPECT_EQ(dump_str, TestToFromProto("scope", opts));
}
TEST_F(TFProfShowTest, DumpOpMode) {
string dump_file = io::JoinPath(testing::TmpDir(), "dump");
Options opts(
5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, "params",
{".*"},
{".*"}, {""}, {".*"}, {""}, false,
{"params", "bytes", "micros", "float_ops", "occurrence", "input_shapes"},
"file", {{"outfile", dump_file}});
tf_stats_->ShowMultiGraphNode("op", opts);
string dump_str;
TF_CHECK_OK(ReadFileToString(Env::Default(), dump_file, &dump_str));
EXPECT_EQ(
"nodename|requestedbytes|totalexecutiontime|acceleratorexecutiontime|"
"cpuexecutiontime|#parameters|#float_ops|opoccurrence(run|defined)|"
"inputshapes\nVariableV22.56KB(100.00%,8.40%),13us(100.00%,0.26%),0us("
"100.00%,0.00%),13us(100.00%,0.29%),451params(100.00%,100.00%),0float_"
"ops(100.00%,0.00%),2|3\n\ninput_type:\t(run*2|defined*3)\texec_time:"
"13us\n\nAdd0B(0.00%,0.00%),0us(99.74%,0.00%),0us(100.00%,0.00%),0us(99."
"71%,0.00%),0params(0.00%,0.00%),0float_ops(100.00%,0.00%),0|3\n\ninput_"
"type:0:1,\t1:1\t(run*0|defined*1)\texec_time:0us\ninput_type:0:2x2x6x12,"
"\t1:1\t(run*0|defined*1)\texec_time:0us\ninput_type:0:3x3x3x6,\t1:1\t("
"run*0|defined*1)\texec_time:0us\n\nAssign0B(0.00%,0.00%),0us(99.74%,0."
"00%),0us(100.00%,0.00%),0us(99.71%,0.00%),0params(0.00%,0.00%),0float_"
"ops(100.00%,0.00%),0|3\n\ninput_type:0:1,\t1:1\t(run*0|defined*1)\texec_"
"time:0us\ninput_type:0:2x2x6x12,\t1:2x2x6x12\t(run*0|defined*1)\texec_"
"time:0us\ninput_type:0:3x3x3x6,\t1:3x3x3x6\t(run*0|defined*1)\texec_"
"time:0us\n\nConst0B(0.00%,0.00%),2us(99.74%,0.04%),0us(100.00%,0.00%),"
"2us(99.71%,0.04%),0params(0.00%,0.00%),0float_ops(100.00%,0.00%),1|"
"10\n\ninput_type:\t(run*1|defined*10)\texec_time:2us\n\nConv2D27.90KB("
"91.60%,91.60%),4.89ms(99.70%,98.87%),404us(100.00%,100.00%),4.49ms(99."
"67%,98.77%),0params(0.00%,0.00%),10.44kfloat_ops(100.00%,100.00%),2|"
"2\n\ninput_type:0:2x3x3x6,\t1:2x2x6x12\t(run*1|defined*1)\texec_time:"
"597us\ninput_type:0:2x6x6x3,\t1:3x3x3x6\t(run*1|defined*1)\texec_time:4."
"29ms\n\nIdentity0B(0.00%,0.00%),0us(0.83%,0.00%),0us(0.00%,0.00%),0us(0."
"90%,0.00%),0params(0.00%,0.00%),0float_ops(0.00%,0.00%),0|3\n\ninput_"
"type:0:1\t(run*0|defined*1)\texec_time:0us\ninput_type:0:2x2x6x12\t(run*"
"0|defined*1)\texec_time:0us\ninput_type:0:3x3x3x6\t(run*0|defined*1)"
"\texec_time:0us\n\n",
StringReplace(CheckAndRemoveDoc(dump_str), " ", ""));
EXPECT_EQ(dump_str, TestToFromProto("op", opts, true));
}
}
} |
1,458 | cpp | tensorflow/tensorflow | tfprof_tensor | tensorflow/core/profiler/internal/tfprof_tensor.cc | tensorflow/core/profiler/internal/tfprof_tensor_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_INTERNAL_TFPROF_TENSOR_H_
#define TENSORFLOW_CORE_PROFILER_INTERNAL_TFPROF_TENSOR_H_
#include <memory>
#include <sstream>
#include <typeinfo>
#include <utility>
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/profiler/tfprof_output.pb.h"
namespace tensorflow {
namespace tfprof {
class TFProfTensor {
public:
explicit TFProfTensor(std::unique_ptr<Tensor> tensor)
: tensor_(std::move(tensor)) {
Build();
}
void Display(string* formatted_str, TFProfTensorProto* tfprof_tensor_pb);
private:
const int64_t kTFProfTenosrMaxDisplayLen = 10000;
const int64_t kTFProfTensorMaxWarnLen = 100000;
void Build();
template <typename T>
bool AddValue(const T& value, TFProfTensorProto* dim) {
std::ostringstream sstream;
sstream << value;
if (typeid(value) == typeid(double)) {
double double_val = 0.0;
CHECK(absl::SimpleAtod(sstream.str(), &double_val));
dim->add_value_double(double_val);
absl::StrAppendFormat(&formatted_str_, "%.2f ",
dim->value_double(dim->value_double_size() - 1));
} else if (typeid(value) == typeid(int64_t)) {
int64_t int64_val = 0;
CHECK(absl::SimpleAtoi(sstream.str(), &int64_val));
dim->add_value_int64(int64_val);
absl::StrAppendFormat(&formatted_str_, "%d ",
dim->value_int64(dim->value_int64_size() - 1));
} else if (typeid(value) == typeid(string)) {
dim->add_value_str(sstream.str());
absl::StrAppend(&formatted_str_, "'",
dim->value_str(dim->value_str_size() - 1), "' ");
} else {
CHECK(false) << "Unsupported type: " << typeid(value).name();
}
}
template <typename T>
int64_t BuildOutput(int64_t start, int depth, const std::vector<T>& values,
TFProfTensorProto* dim) {
formatted_str_ += "[";
int64_t nstart = start;
if (tensor_->dims() == 0 && values.size() == 1) {
std::ostringstream sstream;
sstream << values[nstart];
if (typeid(values[nstart]) == typeid(double)) {
double double_val = 0.0;
CHECK(absl::SimpleAtod(sstream.str(), &double_val));
dim->add_value_double(double_val);
absl::StrAppendFormat(&formatted_str_, "%.2f ",
dim->value_double(dim->value_double_size() - 1));
} else if (typeid(values[nstart]) == typeid(int64_t)) {
int64_t int64_val = 0;
CHECK(absl::SimpleAtoi(sstream.str(), &int64_val));
dim->add_value_int64(int64_val);
absl::StrAppendFormat(&formatted_str_, "%d ",
dim->value_int64(dim->value_int64_size() - 1));
} else if (typeid(values[nstart]) == typeid(string)) {
dim->add_value_str(sstream.str());
absl::StrAppend(&formatted_str_, "'",
dim->value_str(dim->value_str_size() - 1), "' ");
} else {
CHECK(false) << "Unsupported type: " << typeid(values[nstart]).name();
}
} else {
for (int i = 0; i < tensor_->dim_size(depth); i++) {
if (depth == tensor_->dims() - 1) {
std::ostringstream sstream;
sstream << values[nstart];
if (typeid(values[nstart]) == typeid(double)) {
double double_val = 0.0;
CHECK(absl::SimpleAtod(sstream.str(), &double_val));
dim->add_value_double(double_val);
absl::StrAppendFormat(
&formatted_str_, "%.2f ",
dim->value_double(dim->value_double_size() - 1));
} else if (typeid(values[nstart]) == typeid(int64_t)) {
int64_t int64_val = 0;
CHECK(absl::SimpleAtoi(sstream.str(), &int64_val));
dim->add_value_int64(int64_val);
absl::StrAppendFormat(
&formatted_str_, "%d ",
dim->value_int64(dim->value_int64_size() - 1));
} else if (typeid(values[nstart]) == typeid(string)) {
dim->add_value_str(sstream.str());
absl::StrAppend(&formatted_str_, "'",
dim->value_str(dim->value_str_size() - 1), "' ");
} else {
CHECK(false) << "Unsupported type: "
<< typeid(values[nstart]).name();
}
++nstart;
} else {
nstart = BuildOutput<T>(nstart, depth + 1, values, dim);
}
}
}
if (formatted_str_.length() > kTFProfTenosrMaxDisplayLen) {
formatted_str_ = formatted_str_.substr(0, kTFProfTenosrMaxDisplayLen);
}
formatted_str_ += "],\n";
return nstart;
}
template <typename T, typename U>
void GetValueVec(std::vector<U>* value_vec) {
if (tensor_->NumElements() > kTFProfTensorMaxWarnLen) {
absl::FPrintF(stderr, "Showing huge tensor, the tool might halt...\n");
}
auto values = tensor_->flat<T>();
for (int64_t i = 0; i < tensor_->NumElements(); i++) {
value_vec->push_back(static_cast<U>(values(i)));
}
}
TFProfTensorProto tfprof_tensor_pb_;
std::unique_ptr<Tensor> tensor_;
string formatted_str_;
};
}
}
#endif
#include "tensorflow/core/profiler/internal/tfprof_tensor.h"
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
namespace tensorflow {
namespace tfprof {
void TFProfTensor::Display(string* formatted_str,
TFProfTensorProto* tfprof_tensor_pb) {
if (formatted_str) {
if (formatted_str_.length() >= kTFProfTenosrMaxDisplayLen) {
*formatted_str =
absl::StrCat(formatted_str_, "...omitted from display\n\n");
} else {
*formatted_str = formatted_str_;
}
}
if (tfprof_tensor_pb) {
tfprof_tensor_pb->MergeFrom(tfprof_tensor_pb_);
}
}
void TFProfTensor::Build() {
tfprof_tensor_pb_.set_dtype(tensor_->dtype());
switch (tensor_->dtype()) {
case DataType::DT_FLOAT:
case DataType::DT_DOUBLE: {
std::vector<double> values_vec;
if (tensor_->dtype() == DataType::DT_FLOAT) {
GetValueVec<float, double>(&values_vec);
} else if (tensor_->dtype() == DataType::DT_DOUBLE) {
GetValueVec<double, double>(&values_vec);
}
BuildOutput<double>(0, 0, values_vec, &tfprof_tensor_pb_);
break;
}
case DataType::DT_INT32:
case DataType::DT_INT64: {
std::vector<int64_t> values_vec;
if (tensor_->dtype() == DataType::DT_INT32) {
GetValueVec<int32, int64_t>(&values_vec);
} else if (tensor_->dtype() == DataType::DT_INT64) {
GetValueVec<int64_t, int64_t>(&values_vec);
}
BuildOutput<int64_t>(0, 0, values_vec, &tfprof_tensor_pb_);
break;
}
case DataType::DT_STRING: {
std::vector<tstring> values_vec;
GetValueVec<tstring, tstring>(&values_vec);
BuildOutput<tstring>(0, 0, values_vec, &tfprof_tensor_pb_);
break;
}
default: {
absl::FPrintF(stderr, "Not Supported type %d\n", tensor_->dtype());
break;
}
}
}
}
} | #include <memory>
#include <utility>
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/internal/tfprof_stats.h"
#include "tensorflow/core/profiler/internal/tfprof_utils.h"
#include "tensorflow/core/profiler/tfprof_log.pb.h"
#include "tensorflow/core/profiler/tfprof_options.h"
#include "tensorflow/core/profiler/tfprof_output.pb.h"
namespace tensorflow {
namespace tfprof {
class TFProfTensorTest : public ::testing::Test {
protected:
TFProfTensorTest() {
string graph_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/graph.pbtxt");
std::unique_ptr<tensorflow::GraphDef> graph_pb(new tensorflow::GraphDef());
TF_CHECK_OK(
ReadProtoFile(Env::Default(), graph_path, graph_pb.get(), false));
std::unique_ptr<tensorflow::RunMetadata> run_meta_pb;
std::unique_ptr<OpLogProto> op_log_pb;
string ckpt_path = io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/ckpt");
TF_Status* status = TF_NewStatus();
std::unique_ptr<checkpoint::CheckpointReader> ckpt_reader(
new checkpoint::CheckpointReader(ckpt_path, status));
CHECK(TF_GetCode(status) == TF_OK);
TF_DeleteStatus(status);
tf_stats_ =
std::make_unique<TFStats>(std::move(graph_pb), std::move(run_meta_pb),
std::move(op_log_pb), std::move(ckpt_reader));
tf_stats_->BuildAllViews();
}
std::unique_ptr<TFStats> tf_stats_;
};
TEST_F(TFProfTensorTest, Basics) {
Options opts(3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, "name", {"VariableV2"},
{".*"}, {""}, {".*"}, {""}, false,
{"tensor_value"},
"", {});
const GraphNodeProto& root = tf_stats_->ShowGraphNode("scope", opts);
EXPECT_EQ(root.children(0).name(), "DW");
EXPECT_GT(root.children(0).tensor_value().value_double_size(), 10);
EXPECT_EQ(root.children(1).name(), "DW2");
EXPECT_GT(root.children(1).tensor_value().value_double_size(), 10);
EXPECT_EQ(root.children(2).name(), "ScalarW");
EXPECT_EQ(root.children(2).tensor_value().value_double_size(), 1);
}
}
} |
1,459 | cpp | tensorflow/tensorflow | xplane_to_op_stats | tensorflow/core/profiler/convert/xplane_to_op_stats.cc | tensorflow/core/profiler/convert/xplane_to_op_stats_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_CONVERT_XPLANE_TO_OP_STATS_H_
#define TENSORFLOW_CORE_PROFILER_CONVERT_XPLANE_TO_OP_STATS_H_
#include <vector>
#include "tensorflow/core/profiler/convert/repository.h"
#include "tensorflow/core/profiler/protobuf/op_stats.pb.h"
#include "tensorflow/core/profiler/utils/hlo_proto_map.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tensorflow {
namespace profiler {
struct OpStatsOptions {
bool maybe_drop_incomplete_steps = false;
bool generate_op_metrics_db = false;
bool generate_step_db = false;
bool generate_kernel_stats_db = false;
};
OpStats ConvertXSpaceToOpStats(const XSpace& space,
const OpStatsOptions& options);
void SetProgramIdToNameMap(const HloProtoMap& hlo_proto_map,
tensorflow::profiler::OpStats& op_stats);
void SetRunEnvironment(const XSpace& space, RunEnvironment* env);
void PropagateXSpaceDiagnosticsToOpStats(const XSpace& space,
OpStats* op_stats);
PerfEnv MakePerfEnv(double peak_tera_flops_per_second,
std::vector<double> peak_bws);
PerfEnv GetPerfEnvFromXPlane(const XPlane& device_plane);
}
}
#endif
#include "tensorflow/core/profiler/convert/xplane_to_op_stats.h"
#include <string>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/profiler/convert/op_metrics_db_combiner.h"
#include "tensorflow/core/profiler/convert/step_events_to_steps_db.h"
#include "tensorflow/core/profiler/convert/xplane_to_kernel_stats_db.h"
#include "tensorflow/core/profiler/convert/xplane_to_op_metrics_db.h"
#include "tensorflow/core/profiler/convert/xplane_to_step_events.h"
#include "tensorflow/core/profiler/protobuf/diagnostics.pb.h"
#include "tensorflow/core/profiler/protobuf/hardware_types.pb.h"
#include "tensorflow/core/profiler/protobuf/op_metrics.pb.h"
#include "tensorflow/core/profiler/protobuf/op_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
#include "tensorflow/core/profiler/protobuf/tf_function.pb.h"
#include "tensorflow/core/profiler/utils/device_caps_utils.h"
#include "tensorflow/core/profiler/utils/event_span.h"
#include "tensorflow/core/profiler/utils/hardware_type_utils.h"
#include "tensorflow/core/profiler/utils/hlo_proto_map.h"
#include "tensorflow/core/profiler/utils/kernel_stats_utils.h"
#include "tensorflow/core/profiler/utils/math_utils.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_utils.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
#include "tsl/profiler/utils/tf_xplane_visitor.h"
#include "tsl/profiler/utils/tpu_xplane_utils.h"
#include "tsl/profiler/utils/xplane_schema.h"
namespace tensorflow {
namespace profiler {
namespace {
using tsl::profiler::FindTensorCorePlanes;
std::string Hostname(const XSpace& space) {
if (space.hostnames().empty()) return "localhost";
DCHECK_EQ(space.hostnames_size(), 1);
const std::string& hostname = space.hostnames(0);
return hostname;
}
}
PerfEnv MakePerfEnv(double peak_tera_flops_per_second,
std::vector<double> peak_bws) {
PerfEnv result;
result.set_peak_tera_flops_per_second(peak_tera_flops_per_second);
for (const auto bw : peak_bws) {
result.add_peak_bws_giga_bytes_per_second(bw);
}
result.set_ridge_point(tsl::profiler::TeraToGiga(peak_tera_flops_per_second) /
peak_bws[MemBwType::MEM_BW_TYPE_HBM_RW]);
return result;
}
PerfEnv GetPerfEnvFromXPlane(const XPlane& device_plane) {
DeviceCapabilities cap = GetDeviceCaps(device_plane);
if (!absl::StartsWith(device_plane.name(), kTpuPlanePrefix)) {
return MakePerfEnv(
tsl::profiler::GigaToTera(GetFlopMaxThroughputPerSM(cap)) *
cap.num_cores(),
{tsl::profiler::UniToGiga(cap.memory_bandwidth()),
tsl::profiler::UniToGiga(cap.memory_bandwidth()),
tsl::profiler::UniToGiga(cap.memory_bandwidth()),
tsl::profiler::UniToGiga(cap.memory_bandwidth())});
} else {
XPlaneVisitor visitor = tsl::profiler::CreateTfXPlaneVisitor(&device_plane);
auto peak_tera_flops_per_second =
visitor.GetStat(StatType::kDevCapPeakTeraflopsPerSecond);
auto peak_tera_flops_per_second_val =
peak_tera_flops_per_second.has_value()
? peak_tera_flops_per_second->DoubleValue()
: 0.0;
auto peak_hbm_bw_giga_bytes_per_second =
visitor.GetStat(StatType::kDevCapPeakHbmBwGigabytesPerSecond);
auto peak_hbm_bw_giga_bytes_per_second_val =
peak_hbm_bw_giga_bytes_per_second.has_value()
? peak_hbm_bw_giga_bytes_per_second->DoubleValue()
: 0.0;
auto peak_sram_rd_bw_giga_bytes_per_second =
visitor.GetStat(StatType::kDevCapPeakSramRdBwGigabytesPerSecond);
auto peak_sram_rd_bw_giga_bytes_per_second_val =
peak_sram_rd_bw_giga_bytes_per_second.has_value()
? peak_sram_rd_bw_giga_bytes_per_second->DoubleValue()
: 0.0;
auto peak_sram_wr_bw_giga_bytes_per_second =
visitor.GetStat(StatType::kDevCapPeakSramWrBwGigabytesPerSecond);
auto peak_sram_wr_bw_giga_bytes_per_second_val =
peak_sram_wr_bw_giga_bytes_per_second.has_value()
? peak_sram_wr_bw_giga_bytes_per_second->DoubleValue()
: 0.0;
return MakePerfEnv(peak_tera_flops_per_second_val,
{peak_hbm_bw_giga_bytes_per_second_val,
peak_sram_rd_bw_giga_bytes_per_second_val,
peak_sram_wr_bw_giga_bytes_per_second_val});
}
}
void SetRunEnvironment(const XSpace& space, RunEnvironment* env) {
env->set_host_count(1);
env->set_task_count(1);
env->mutable_hostnames()->insert({Hostname(space), true});
std::vector<const XPlane*> gpu_planes =
FindPlanesWithPrefix(space, kGpuPlanePrefix);
if (!gpu_planes.empty()) {
absl::string_view gpu_model =
GpuModelName(GetDeviceCaps(*gpu_planes.front()));
if (!gpu_model.empty()) {
env->set_device_type(std::string(gpu_model));
} else {
env->set_device_type("GPU");
}
env->set_device_core_count(gpu_planes.size());
} else if (std::vector<const XPlane*> tpu_planes =
FindTensorCorePlanes(space);
!tpu_planes.empty()) {
XPlaneVisitor visitor =
tsl::profiler::CreateTfXPlaneVisitor(tpu_planes.at(0));
auto xstat = visitor.GetStat(StatType::kDeviceTypeString);
if (xstat.has_value()) {
env->set_device_type(std::string(xstat->StrOrRefValue()));
}
env->set_device_core_count(tpu_planes.size());
} else {
env->set_device_type("CPU");
env->set_device_core_count(0);
}
}
void PropagateXSpaceDiagnosticsToOpStats(const XSpace& space,
OpStats* op_stats) {
if (!space.errors().empty()) {
absl::flat_hash_set<std::string> unique_errors;
unique_errors.insert(space.errors().begin(), space.errors().end());
*op_stats->mutable_diagnostics()->mutable_errors() = {unique_errors.begin(),
unique_errors.end()};
}
if (!space.warnings().empty()) {
absl::flat_hash_set<std::string> unique_warnings;
unique_warnings.insert(space.warnings().begin(), space.warnings().end());
*op_stats->mutable_diagnostics()->mutable_warnings() = {
unique_warnings.begin(), unique_warnings.end()};
}
}
void SetProgramIdToNameMap(const HloProtoMap& hlo_proto_map,
tensorflow::profiler::OpStats& op_stats) {
auto& program_id_to_name_map = *op_stats.mutable_program_id_to_name_map();
for (const auto& [program_id, hlo_proto] : hlo_proto_map) {
program_id_to_name_map[program_id] = hlo_proto->hlo_module().name();
}
}
OpStats ConvertXSpaceToOpStats(const XSpace& space,
const OpStatsOptions& options) {
std::vector<const XPlane*> device_planes = FindTensorCorePlanes(space);
bool is_tpu = !device_planes.empty();
if (!is_tpu) {
device_planes = FindPlanesWithPrefix(space, kGpuPlanePrefix);
}
OpStats op_stats;
StepEvents step_events;
PropagateXSpaceDiagnosticsToOpStats(space, &op_stats);
OpMetricsDbCombiner op_metrics_db_combiner(
op_stats.mutable_device_op_metrics_db());
SetRunEnvironment(space, op_stats.mutable_run_environment());
KernelReportMap reports;
for (const XPlane* device_trace : device_planes) {
XPlane aggregated_xplane;
bool use_aggregated_xplane = false;
if (options.generate_op_metrics_db) {
if (!op_stats.has_perf_env()) {
*op_stats.mutable_perf_env() = GetPerfEnvFromXPlane(*device_trace);
}
if (!is_tpu) {
OpMetricsDb device_op_metrics_db =
ConvertDeviceTraceXPlaneToOpMetricsDb(*device_trace);
op_metrics_db_combiner.Combine(device_op_metrics_db);
} else {
AggregateXPlane(*device_trace, aggregated_xplane);
use_aggregated_xplane = true;
OpMetricsDb device_op_metrics_db =
ConvertTpuDeviceTraceXPlaneToOpMetricsDb(aggregated_xplane);
op_metrics_db_combiner.Combine(device_op_metrics_db);
}
}
if (options.generate_step_db) {
StepEvents device_step_events = ConvertDeviceTraceXPlaneToStepEvents(
use_aggregated_xplane ? aggregated_xplane : *device_trace);
if (is_tpu) {
IntersectCombineStepEvents(device_step_events, &step_events);
} else {
UnionCombineStepEvents(device_step_events, &step_events);
}
}
if (options.generate_kernel_stats_db) {
ConvertDeviceTraceXPlaneToKernelReports(*device_trace,
{}, &reports);
}
}
if (options.generate_kernel_stats_db) {
CopyTopKDurationKernelReportsToDb(reports,
op_stats.mutable_kernel_stats_db());
}
bool has_device = !device_planes.empty();
const XPlane* host_plane = FindPlaneWithName(space, kHostThreadsPlaneName);
if (host_plane) {
if (options.generate_op_metrics_db) {
*op_stats.mutable_host_op_metrics_db() =
ConvertHostThreadsXPlaneToOpMetricsDb(*host_plane);
}
if (options.generate_step_db && !has_device) {
StepEvents host_step_events =
ConvertHostThreadsXPlaneToStepEvents(*host_plane, nullptr);
UnionCombineStepEvents(host_step_events, &step_events);
}
XPlaneVisitor visitor = tsl::profiler::CreateTfXPlaneVisitor(host_plane);
auto stat = visitor.GetStat(StatType::kMatrixUnitUtilizationPercent);
if (stat.has_value()) {
op_stats.mutable_performance_counter_result()
->set_matrix_unit_utilization_percent(stat->DoubleValue());
}
}
if (options.generate_step_db) {
if (is_tpu) {
*op_stats.mutable_step_db() = ConvertStepEventsToStepDb(
has_device, options.maybe_drop_incomplete_steps, step_events);
*op_stats.mutable_device_op_metrics_db()->mutable_precision_stats() =
ComputePrecisionStats(step_events);
} else {
StepEvents nonoverlapped_step_events =
ToNonOverlappedStepEvents(step_events);
*op_stats.mutable_step_db() = ConvertStepEventsToStepDb(
has_device, options.maybe_drop_incomplete_steps,
nonoverlapped_step_events);
*op_stats.mutable_device_op_metrics_db()->mutable_precision_stats() =
ComputePrecisionStats(nonoverlapped_step_events);
}
}
if (!is_tpu) {
CoreDetails& details =
(*op_stats.mutable_core_id_to_details())[kDefaultGpuLocalCoreId];
details.set_hostname(Hostname(space));
}
HloProtoMap hlo_proto_map;
hlo_proto_map.AddHloProtosFromXSpace(space);
SetProgramIdToNameMap(hlo_proto_map, op_stats);
return op_stats;
}
}
} | #include "tensorflow/core/profiler/convert/xplane_to_op_stats.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/convert/multi_xplanes_to_op_stats.h"
#include "tensorflow/core/profiler/convert/repository.h"
#include "tensorflow/core/profiler/convert/step_events_to_steps_db.h"
#include "tensorflow/core/profiler/protobuf/diagnostics.pb.h"
#include "tensorflow/core/profiler/protobuf/op_metrics.pb.h"
#include "tensorflow/core/profiler/protobuf/op_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
#include "tensorflow/core/profiler/protobuf/tf_function.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_test_utils.h"
#include "tsl/platform/status.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
#include "tsl/profiler/utils/group_events.h"
namespace tensorflow {
namespace profiler {
namespace {
using ::testing::Property;
using ::testing::UnorderedElementsAre;
TEST(ConvertXPlaneToOpStats, GpuPerfEnv) {
auto space = std::make_unique<XSpace>();
constexpr double kMaxError = 0.01;
constexpr int kClockRateKHz = 1530000;
constexpr int kCoreCount = 80;
constexpr uint64 kMemoryBandwidthBytesPerSecond =
uint64{900} * 1000 * 1000 * 1000;
constexpr int kComputeCapMajor = 7;
constexpr int kComputeCapMinor = 0;
XPlaneBuilder device_plane(
GetOrCreateGpuXPlane(space.get(), 0));
device_plane.AddStatValue(*device_plane.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kDevVendor)),
kDeviceVendorNvidia);
device_plane.AddStatValue(*device_plane.GetOrCreateStatMetadata("clock_rate"),
kClockRateKHz);
device_plane.AddStatValue(*device_plane.GetOrCreateStatMetadata("core_count"),
kCoreCount);
device_plane.AddStatValue(
*device_plane.GetOrCreateStatMetadata("memory_bandwidth"),
kMemoryBandwidthBytesPerSecond);
device_plane.AddStatValue(
*device_plane.GetOrCreateStatMetadata("compute_cap_major"),
kComputeCapMajor);
device_plane.AddStatValue(
*device_plane.GetOrCreateStatMetadata("compute_cap_minor"),
kComputeCapMinor);
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStatsOptions options;
options.generate_op_metrics_db = true;
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(session_snapshot_or.value(),
options, &op_stats));
const PerfEnv& perf_env = op_stats.perf_env();
EXPECT_NEAR(141, perf_env.peak_tera_flops_per_second(), kMaxError);
EXPECT_NEAR(
900,
perf_env.peak_bws_giga_bytes_per_second(MemBwType::MEM_BW_TYPE_HBM_RW),
kMaxError);
EXPECT_NEAR(156.67, perf_env.ridge_point(), kMaxError);
}
TEST(ConvertXPlaneToOpStats, GpuRunEnvironment) {
auto space = std::make_unique<XSpace>();
XPlaneBuilder device_plane1(
GetOrCreateGpuXPlane(space.get(), 0));
device_plane1.AddStatValue(*device_plane1.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kDevVendor)),
kDeviceVendorNvidia);
XPlaneBuilder device_plane2(
GetOrCreateGpuXPlane(space.get(), 1));
device_plane2.AddStatValue(*device_plane2.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kDevVendor)),
kDeviceVendorNvidia);
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(
session_snapshot_or.value(), OpStatsOptions(), &op_stats));
const RunEnvironment& run_env = op_stats.run_environment();
EXPECT_EQ("Nvidia GPU", run_env.device_type());
EXPECT_EQ(1, run_env.host_count());
EXPECT_EQ(1, run_env.task_count());
EXPECT_EQ(2, run_env.device_core_count());
}
TEST(ConvertXPlaneToOpStats, CpuOnlyStepDbTest) {
constexpr int64_t kStepNum = 123;
constexpr int64_t kStepId = 0;
auto space = std::make_unique<XSpace>();
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(space.get()));
host_plane_builder.ReserveLines(2);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kTraceContext,
0, 100, {{StatType::kStepNum, kStepNum}});
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kFunctionRun,
10, 90,
{{StatType::kStepId, kStepId},
{StatType::kProducerType, int64_t{1}},
{StatType::kProducerId, kStepId}});
auto tf_executor_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &tf_executor_thread,
HostEventType::kExecutorStateProcess, 20, 80,
{{StatType::kStepId, kStepId},
{StatType::kConsumerType, int64_t{1}},
{StatType::kConsumerId, kStepId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "matmul", 30, 70);
OpStatsOptions options;
options.generate_op_metrics_db = true;
options.generate_step_db = true;
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(session_snapshot_or.value(),
options, &op_stats));
const StepDatabaseResult& step_db = op_stats.step_db();
EXPECT_EQ(step_db.step_sequence_size(), 1);
}
TEST(ConvertXPlaneToOpStats, GpuStepDbTest) {
constexpr int64_t kStepNum = 123;
constexpr int64_t kStepId = 0;
constexpr int64_t kCorrelationId = 100;
auto space = std::make_unique<XSpace>();
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(space.get()));
host_plane_builder.ReserveLines(2);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kTraceContext,
0, 100, {{StatType::kStepNum, kStepNum}});
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kFunctionRun,
10, 90,
{{StatType::kStepId, kStepId},
{StatType::kProducerType, int64_t{1}},
{StatType::kProducerId, kStepId}});
auto tf_executor_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &tf_executor_thread,
HostEventType::kExecutorStateProcess, 20, 20,
{{StatType::kStepId, kStepId},
{StatType::kConsumerType, int64_t{1}},
{StatType::kConsumerId, kStepId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "matmul", 30, 10,
{{StatType::kCorrelationId, kCorrelationId}});
XPlaneBuilder device_plane_builder(
GetOrCreateGpuXPlane(space.get(), 0));
device_plane_builder.ReserveLines(1);
auto stream = device_plane_builder.GetOrCreateLine(0);
CreateXEvent(&device_plane_builder, &stream, "matmul", 50, 40,
{{StatType::kCorrelationId, kCorrelationId}});
OpStatsOptions options;
options.generate_op_metrics_db = true;
options.generate_step_db = true;
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(session_snapshot_or.value(),
options, &op_stats));
const StepDatabaseResult& step_db = op_stats.step_db();
EXPECT_EQ(step_db.step_sequence_size(), 0);
PrecisionStats precision_stats =
op_stats.device_op_metrics_db().precision_stats();
EXPECT_EQ(precision_stats.compute_16bit_ps(), 0);
EXPECT_EQ(precision_stats.compute_32bit_ps(), 40);
}
TEST(ConvertXPlaneToOpStats, PropagateAndDedupErrors) {
XSpace space;
static constexpr char kError[] = "host: error";
*space.add_errors() = kError;
*space.add_errors() = kError;
OpStats op_stats = ConvertXSpaceToOpStats(space, OpStatsOptions());
EXPECT_EQ(1, op_stats.diagnostics().errors_size());
EXPECT_EQ(kError, op_stats.diagnostics().errors(0));
}
TEST(ConvertXPlaneToOpStats, Hostnames) {
XSpace space;
static constexpr char kHost[] = "host1";
*space.add_hostnames() = kHost;
OpStats op_stats = ConvertXSpaceToOpStats(space, OpStatsOptions());
EXPECT_EQ(
kHost,
op_stats.core_id_to_details().at(kDefaultGpuLocalCoreId).hostname());
}
void BuildXSpaceForTest(XSpace& xspace, absl::string_view hostname) {
constexpr int64_t kStepNum = 123;
constexpr int64_t kStepId = 456;
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(&xspace));
host_plane_builder.ReserveLines(2);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kTraceContext,
0, 100, {{StatType::kStepNum, kStepNum}});
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kFunctionRun,
10, 90,
{{StatType::kStepId, kStepId},
{StatType::kProducerType, int64_t{1}},
{StatType::kProducerId, kStepId}});
auto executor_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &executor_thread,
HostEventType::kExecutorStateProcess, 20, 80,
{{StatType::kStepId, kStepId},
{StatType::kConsumerType, int64_t{1}},
{StatType::kConsumerId, kStepId}});
CreateXEvent(&host_plane_builder, &executor_thread, "aaa:bbb", 30, 70);
xspace.add_hostnames(std::string(hostname));
}
TEST(ConvertXPlaneToOpStats, TestConvertMultiXSpacesToCombinedOpStats) {
static constexpr char kHost1[] = "host1";
static constexpr char kHost2[] = "host2";
auto xspace1 = std::make_unique<XSpace>();
auto xspace2 = std::make_unique<XSpace>();
BuildXSpaceForTest(*xspace1, kHost1);
BuildXSpaceForTest(*xspace2, kHost2);
std::vector<std::string> xspace_paths;
xspace_paths.push_back("host1.pb");
xspace_paths.push_back("host2.pb");
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(xspace1));
xspaces.push_back(std::move(xspace2));
auto session_snapshot_or =
SessionSnapshot::Create(std::move(xspace_paths), std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStatsOptions options;
options.generate_op_metrics_db = true;
options.generate_step_db = true;
OpStats combined_op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(session_snapshot_or.value(),
options, &combined_op_stats))
<< "Failed to convert multi XSpace to OpStats";
ASSERT_EQ(combined_op_stats.host_op_metrics_db().metrics_db_size(), 2);
const auto& metric = combined_op_stats.host_op_metrics_db().metrics_db(1);
EXPECT_EQ(metric.name(), "aaa");
EXPECT_EQ(metric.category(), "bbb");
EXPECT_EQ(metric.self_time_ps(), 140);
ASSERT_EQ(combined_op_stats.step_db().step_sequence_size(), 1);
ASSERT_EQ(
combined_op_stats.step_db().step_sequence(0).step_info_per_core_size(),
2);
const auto& step_info_per_core =
combined_op_stats.step_db().step_sequence(0).step_info_per_core();
EXPECT_TRUE(step_info_per_core.contains(kDefaultGpuLocalCoreId));
EXPECT_TRUE(step_info_per_core.contains(1000 + kDefaultGpuLocalCoreId));
const auto& core_details_map = combined_op_stats.core_id_to_details();
EXPECT_EQ(kHost1, core_details_map.at(kDefaultGpuLocalCoreId).hostname());
EXPECT_EQ(kHost2,
core_details_map.at(1000 + kDefaultGpuLocalCoreId).hostname());
}
TEST(ConvertXPlaneToOpStats, RunEnvironmentExtractedFromTpuPlane) {
XSpace xspace;
for (int i : {0, 1, 2, 3}) {
GetOrCreateTpuXPlane(&xspace, i, "TPU V4", 0, 0);
}
OpStats op_stats = ConvertXSpaceToOpStats(xspace, OpStatsOptions());
EXPECT_EQ(op_stats.run_environment().device_type(), "TPU V4");
EXPECT_EQ(op_stats.run_environment().device_core_count(), 4);
}
TEST(ConvertXPlaneToOpStats, TpuPerfEnv) {
auto space = std::make_unique<XSpace>();
constexpr double kMaxError = 0.01;
constexpr int kClockRateKHz = 1530000;
constexpr int kCoreCount = 80;
constexpr uint64 kMemoryBandwidthBytesPerSecond =
uint64{900} * 1000 * 1000 * 1000;
constexpr int kComputeCapMajor = 7;
constexpr int kComputeCapMinor = 0;
constexpr double kDevCapPeakTeraflopsPerSecond = 141.0;
constexpr double kDevCapPeakHbmBwGigabytesPerSecond = 900.0;
XPlaneBuilder device_plane(GetOrCreateTpuXPlane(
space.get(), 0, "TPU V4",
kDevCapPeakTeraflopsPerSecond, kDevCapPeakHbmBwGigabytesPerSecond));
device_plane.AddStatValue(*device_plane.GetOrCreateStatMetadata("clock_rate"),
kClockRateKHz);
device_plane.AddStatValue(*device_plane.GetOrCreateStatMetadata("core_count"),
kCoreCount);
device_plane.AddStatValue(
*device_plane.GetOrCreateStatMetadata("memory_bandwidth"),
kMemoryBandwidthBytesPerSecond);
device_plane.AddStatValue(
*device_plane.GetOrCreateStatMetadata("compute_cap_major"),
kComputeCapMajor);
device_plane.AddStatValue(
*device_plane.GetOrCreateStatMetadata("compute_cap_minor"),
kComputeCapMinor);
OpStatsOptions options;
options.generate_op_metrics_db = true;
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(session_snapshot_or.value(),
options, &op_stats));
const PerfEnv& perf_env = op_stats.perf_env();
EXPECT_NEAR(141, perf_env.peak_tera_flops_per_second(), kMaxError);
EXPECT_NEAR(
900,
perf_env.peak_bws_giga_bytes_per_second(MemBwType::MEM_BW_TYPE_HBM_RW),
kMaxError);
EXPECT_NEAR(156.67, perf_env.ridge_point(), kMaxError);
}
TEST(ConvertXPlaneToOpStats, TpuRunEnvironment) {
auto space = std::make_unique<XSpace>();
XPlaneBuilder device_plane1(
GetOrCreateTpuXPlane(space.get(), 0, "TPU V4", 0, 0));
XPlaneBuilder device_plane2(
GetOrCreateTpuXPlane(space.get(), 1, "TPU V4", 0, 0));
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(
session_snapshot_or.value(), OpStatsOptions(), &op_stats));
const RunEnvironment& run_env = op_stats.run_environment();
EXPECT_EQ("TPU V4", run_env.device_type());
EXPECT_EQ(1, run_env.host_count());
EXPECT_EQ(1, run_env.task_count());
EXPECT_EQ(2, run_env.device_core_count());
}
TEST(ConvertXPlaneToOpStats, TpuDeviceTraceToStepDb) {
auto space = std::make_unique<XSpace>();
constexpr double kDevCapPeakTeraflopsPerSecond = 141.0;
constexpr double kDevCapPeakHbmBwGigabytesPerSecond = 1000.0;
XPlaneBuilder xplane_builder(GetOrCreateTpuXPlane(
space.get(), 0, "TPU V4",
kDevCapPeakTeraflopsPerSecond, kDevCapPeakHbmBwGigabytesPerSecond));
XEventMetadata* event_metadata = xplane_builder.GetOrCreateEventMetadata(1);
event_metadata->set_name("op_name");
XStatsBuilder<XEventMetadata> stats(event_metadata, &xplane_builder);
stats.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kProgramId)),
1);
stats.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kSymbolId)),
1);
stats.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kSelfDurationPs)),
10);
stats.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kTfOp)),
"tf_op_name");
stats.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kHloCategory)),
"category");
XLineBuilder line = xplane_builder.GetOrCreateLine(1);
line.SetName(kTensorFlowOpLineName);
XEventBuilder event = line.AddEvent(*event_metadata);
event.SetOffsetNs(0);
event.SetDurationNs(10);
OpStatsOptions options;
options.generate_op_metrics_db = true;
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(session_snapshot_or.value(),
options, &op_stats));
EXPECT_THAT(op_stats.device_op_metrics_db().metrics_db(),
UnorderedElementsAre(Property(&OpMetrics::name, "op_name"),
Property(&OpMetrics::name, "IDLE")));
}
TEST(ConvertXPlaneToOpStats, TpuMultiDeviceStepDbTest) {
auto space = std::make_unique<XSpace>();
XPlaneBuilder device_plane_builder1(
GetOrCreateTpuXPlane(space.get(), 0, "TPU V4", 0, 0));
XPlaneBuilder device_plane_builder2(
GetOrCreateTpuXPlane(space.get(), 1, "TPU V4", 0, 0));
device_plane_builder1.ReserveLines(1);
device_plane_builder2.ReserveLines(1);
XStatMetadata* kGroupId1 = device_plane_builder1.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kGroupId));
XLineBuilder line = device_plane_builder1.GetOrCreateLine(1);
line.SetName(kXlaOpLineName);
XEventMetadata* event_metadata =
device_plane_builder1.GetOrCreateEventMetadata(1);
event_metadata->set_name("Step 1");
XEventBuilder event_builder = line.AddEvent(*event_metadata);
event_builder.AddStatValue(*kGroupId1, 1);
event_builder.SetDurationNs(100);
event_builder.SetOffsetNs(100);
line = device_plane_builder2.GetOrCreateLine(1);
line.SetName(kXlaOpLineName);
XStatMetadata* kGroupId2 = device_plane_builder2.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kGroupId));
XEventMetadata* event_metadata2 =
device_plane_builder2.GetOrCreateEventMetadata(2);
event_metadata2->set_name("Step 1");
XEventBuilder event_builder2 = line.AddEvent(*event_metadata2);
event_builder2.AddStatValue(*kGroupId2, 1);
event_builder2.SetDurationNs(100);
event_builder2.SetOffsetNs(300);
XStatMetadata* kGroupId3 = device_plane_builder2.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kGroupId));
XEventMetadata* event_metadata3 =
device_plane_builder2.GetOrCreateEventMetadata(2);
event_metadata3->set_name("Step 2");
XEventBuilder event_builder3 = line.AddEvent(*event_metadata3);
event_builder3.AddStatValue(*kGroupId3, 2);
event_builder3.SetDurationNs(100);
event_builder3.SetOffsetNs(300);
OpStatsOptions options;
options.generate_op_metrics_db = true;
options.generate_step_db = true;
OpStats op_stats = ConvertXSpaceToOpStats(*space, options);
const StepDatabaseResult& step_db = op_stats.step_db();
EXPECT_EQ(step_db.step_sequence_size(), 1);
}
}
}
} |
1,460 | cpp | tensorflow/tensorflow | hlo_proto_to_graph_view | tensorflow/core/profiler/convert/hlo_proto_to_graph_view.cc | tensorflow/core/profiler/convert/hlo_proto_to_graph_view_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_CONVERT_HLO_PROTO_TO_GRAPH_VIEW_H_
#define TENSORFLOW_CORE_PROFILER_CONVERT_HLO_PROTO_TO_GRAPH_VIEW_H_
#include <string>
#include <string_view>
#include <vector>
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_graph_dumper.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/profiler/convert/tool_options.h"
namespace tensorflow {
namespace profiler {
struct GraphViewerParams {
std::string type;
std::string node_name;
int graph_width;
xla::HloRenderOptions render_options;
xla::RenderedGraphFormat format;
bool verbose;
bool show_metadata;
};
absl::StatusOr<GraphViewerParams> ParseGraphViewerParams(
const ToolOptions& options);
xla::RenderedGraphFormat GetRenderFormat(const std::string& format_string);
absl::StatusOr<std::string> ConvertHloProtoToGraph(
const xla::HloProto& hlo_proto, const std::string& node_name,
int graph_width, const xla::HloRenderOptions& render_options,
const xla::RenderedGraphFormat& format);
absl::StatusOr<std::string> RenderGraphView(
const xla::HloComputation& computation, absl::string_view label,
const xla::DebugOptions& debug_options, xla::RenderedGraphFormat format,
xla::HloRenderOptions hlo_render_options = {});
absl::StatusOr<std::string> RenderGraphNeighborhoodAround(
const xla::HloInstruction& node, int radius,
xla::RenderedGraphFormat format,
xla::HloRenderOptions hlo_render_options = {},
const absl::flat_hash_set<const xla::HloInstruction*>& boundary = {});
absl::StatusOr<std::string> ConvertHloProtoToStringView(
const xla::HloProto& hlo_proto, bool verbose, bool metadata);
absl::StatusOr<std::string> WrapDotInFormat(std::string dot,
xla::RenderedGraphFormat format);
std::string WrapDotInHtml(std::string dot);
void RegisterGraphvizURLRenderer(
std::function<absl::StatusOr<std::string>(absl::string_view dot)> renderer);
}
}
#endif
#include "tensorflow/core/profiler/convert/hlo_proto_to_graph_view.h"
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_graph_dumper.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/profiler/convert/tool_options.h"
#include "tensorflow/core/profiler/utils/hlo_module_utils.h"
#include "tensorflow/core/profiler/utils/hlo_proto_to_module.h"
namespace tensorflow {
namespace profiler {
namespace {
using ::tensorflow::StatusOr;
using ::tensorflow::errors::InvalidArgument;
using ::xla::HloComputation;
using ::xla::HloInstruction;
using ::xla::HloModule;
using ::xla::HloPrintOptions;
using ::xla::HloProto;
using ::xla::HloRenderOptions;
using ::xla::RenderedGraphFormat;
void CleanUpHloModuleForGraphviz(HloModule* hlo_module) {
for (HloComputation* computation : hlo_module->computations()) {
for (HloInstruction* inst : computation->instructions()) {
if (inst->opcode() == xla::HloOpcode::kInfeed) {
inst->set_infeed_config("");
} else if (inst->opcode() == xla::HloOpcode::kOutfeed) {
inst->set_outfeed_config("");
}
}
}
}
absl::StatusOr<std::string> Plot(std::unique_ptr<HloModule> module,
const std::string& node_name, int graph_width,
const HloRenderOptions& render_options,
const RenderedGraphFormat& format) {
if (node_name.empty()) {
return InvalidArgument("node_name should not be empty");
}
const HloInstruction* instr = FindInstruction(*module, node_name);
const HloComputation* comp = FindComputation(*module, node_name);
if (!instr && !comp) {
return InvalidArgument(
absl::StrCat("Couldn't find HloInstruction or HloComputation named ",
node_name, "."));
}
absl::StatusOr<std::string> graph_handle;
CleanUpHloModuleForGraphviz(module.get());
if (comp) {
graph_handle =
RenderGraphView(*comp, "", comp->parent()->config().debug_options(),
format, render_options);
} else {
graph_handle = RenderGraphNeighborhoodAround(*instr, graph_width, format,
render_options);
}
if (graph_handle.ok()) {
VLOG(1) << graph_handle.value();
} else {
LOG(ERROR) << "Unable to render graph: " << graph_handle.status();
}
return graph_handle;
}
static constexpr char kGraphTypeName[] = "graph";
static constexpr char kShortTxtTypeName[] = "short_txt";
static constexpr char kLongTxtTypeName[] = "long_txt";
static constexpr char kDefaultFormatString[] = "url";
static constexpr int kDefaultWidth = 3;
static constexpr int kDefaultShowMetadata = 0;
static constexpr int kDefaultMergeFusion = 0;
}
absl::StatusOr<GraphViewerParams> ParseGraphViewerParams(
const ToolOptions& options) {
GraphViewerParams params;
std::optional<std::string> type = GetParam<std::string>(options, "type");
if (!type.has_value()) {
return errors::InvalidArgument("Graph viewer must provide a type option.");
}
if (type == kGraphTypeName) {
params.type = type.value();
if (std::optional<std::string> node_name =
GetParam<std::string>(options, "node_name")) {
params.node_name = node_name.value();
}
params.graph_width =
GetParamWithDefault<int>(options, "graph_width", kDefaultWidth);
params.render_options.show_backend_config = GetParamWithDefault<int>(
options, "show_metadata", kDefaultShowMetadata);
params.render_options.show_fusion_subcomputations =
!GetParamWithDefault<int>(options, "merge_fusion", kDefaultMergeFusion);
params.format = GetRenderFormat(GetParamWithDefault<std::string>(
options, "format", kDefaultFormatString));
return params;
}
if (type == kShortTxtTypeName || type == kLongTxtTypeName) {
params.type = type.value();
params.verbose = (type == kLongTxtTypeName);
params.show_metadata =
GetParamWithDefault(options, "show_metadata", kDefaultShowMetadata);
return params;
}
return errors::InvalidArgument("Unknown graph viewer type option: ",
type.value());
}
xla::RenderedGraphFormat GetRenderFormat(const std::string& format_string) {
if (format_string == "html") {
return xla::RenderedGraphFormat::kHtml;
} else if (format_string == "dot") {
return xla::RenderedGraphFormat::kDot;
} else if (format_string == "url") {
return xla::RenderedGraphFormat::kUrl;
} else {
LOG(ERROR) << "Invalid graph format argument: " << format_string
<< ", fallback to default url";
return xla::RenderedGraphFormat::kUrl;
}
}
absl::StatusOr<std::string> ConvertHloProtoToGraph(
const HloProto& hlo_proto, const std::string& node_name, int graph_width,
const HloRenderOptions& render_options, const RenderedGraphFormat& format) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> hlo_module,
ConvertHloProtoToModule(hlo_proto));
return Plot(std::move(hlo_module), node_name, graph_width, render_options,
format);
}
absl::StatusOr<std::string> ConvertHloProtoToStringView(
const HloProto& hlo_proto, bool verbose, bool metadata) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> hlo_module,
ConvertHloProtoToModule(hlo_proto));
HloPrintOptions options;
if (!verbose) {
options = HloPrintOptions::ShortParsable();
}
options.set_print_large_constants(verbose);
options.set_print_metadata(metadata);
return hlo_module->ToString(options);
}
std::function<absl::StatusOr<std::string>(absl::string_view)>* url_renderer =
nullptr;
absl::Status CheckPrecondition(xla::RenderedGraphFormat format) {
if (format == xla::RenderedGraphFormat::kUrl && url_renderer == nullptr) {
return absl::FailedPreconditionError(
"Can't render as URL; no URL renderer was registered.");
}
return absl::OkStatus();
}
absl::StatusOr<std::string> RenderGraphView(
const xla::HloComputation& computation, absl::string_view label,
const xla::DebugOptions& debug_options, xla::RenderedGraphFormat format,
xla::HloRenderOptions hlo_render_options) {
auto precheck_status = CheckPrecondition(format);
if (!precheck_status.ok()) {
return precheck_status;
}
auto rendered_dot =
xla::RenderGraph(computation, label, debug_options,
RenderedGraphFormat::kDot, hlo_render_options);
if (!rendered_dot.ok()) {
return rendered_dot.status();
}
return WrapDotInFormat(rendered_dot.value(), format);
}
absl::StatusOr<std::string> RenderGraphNeighborhoodAround(
const xla::HloInstruction& node, int radius,
xla::RenderedGraphFormat format, xla::HloRenderOptions hlo_render_options,
const absl::flat_hash_set<const xla::HloInstruction*>& boundary) {
auto precheck_status = CheckPrecondition(format);
if (!precheck_status.ok()) {
return precheck_status;
}
auto rendered_dot = xla::RenderNeighborhoodAround(
node, radius, RenderedGraphFormat::kDot, hlo_render_options, boundary);
if (!rendered_dot.ok()) {
return rendered_dot.status();
}
return WrapDotInFormat(rendered_dot.value(), format);
}
absl::StatusOr<std::string> WrapDotInFormat(std::string dot,
xla::RenderedGraphFormat format) {
switch (format) {
case xla::RenderedGraphFormat::kUrl:
if (url_renderer == nullptr) {
return absl::InternalError("url_renderer is null");
}
return (*url_renderer)(dot);
case xla::RenderedGraphFormat::kHtml:
return WrapDotInHtml(dot);
case xla::RenderedGraphFormat::kDot:
return std::string(dot);
}
}
std::string WrapDotInHtml(std::string dot) {
return absl::StrReplaceAll(R"html(
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<style type="text/css">
body {
height: 100vh;
margin: 0;
}
#graph-container {height:95vh;width:100%;padding:10px;display:block;}
#graph-container svg { height: 100% !important; width: 100% !important;}
.node, .cluster {cursor:pointer;}
.cluster:hover, .node:hover {outline: solid 3px black;}
</style>
</head>
<body>
<script src="https:
integrity="sha384-LigJPbR3TOfU/Xbb+PjiN1dGJYPweLk7kiGnaMgmxnUmKWaCFKbb5tH6iLlyVhPZ"
crossorigin="anonymous"></script>
<script src="https:
<div id="graph-container"></div>
<script>
const cssregex = new RegExp('stylesheet=<([^]*)\n>\n', 'gm');
const hpccWasm = window["@hpcc-js/wasm"];
const data = `$DOT`;
const results = cssregex.exec(data);
let dot_data = data;
let css_data = '';
if (results !== null) {
css_data = results[1].replace(/\s*data:.*\s*,/,'');
css_data = unescape(css_data);
dot_data = data.replace(cssregex, '');
}
var render_start = performance.now()
function add_controls(svg) {
var htmlblob = new Blob([document.documentElement.innerHTML],
{type: 'text/html'});
var savehtml = document.createElement('a');
savehtml.setAttribute('href', URL.createObjectURL(htmlblob));
savehtml.setAttribute('download', 'graph.html');
savehtml.innerHTML = " [Save HTML+SVG] ";
document.body.append(savehtml);
var svgblob = new Blob([svg.outerHTML], {type: 'image/svg'});
var savesvg = document.createElement('a');
savesvg.setAttribute('href', URL.createObjectURL(svgblob));
savesvg.setAttribute('download', 'graph.svg');
savesvg.innerHTML = " [Save SVG] ";
document.body.append(savesvg);
var dotblob = new Blob([data], {type: 'text/dot'});
var savedot = document.createElement('a');
savedot.setAttribute('href', URL.createObjectURL(dotblob));
savedot.setAttribute('download', 'graph.dot');
savedot.innerHTML = " [Save DOT] ";
document.body.append(savedot);
var render_end = performance.now();
var render_note = document.createElement('div')
render_note.innerHTML = 'Rendering took '
+ (render_end - render_start).toFixed(2) + "ms."
document.body.append(render_note);
}
const render_callback = svg => {
const container = document.getElementById('graph-container')
container.innerHTML = `${svg}<style>${css_data}</style>`;
const panZoom = svgPanZoom(container.children[0], {
zoomEnabled: true,
controlIconsEnabled: true,
maxZoom: 200,
minZoom: 0,
});
add_controls(svg);
};
hpccWasm.graphviz.layout(dot_data, "svg", "dot").then(render_callback);
</script>
</body>
</html>
)html",
{
{"$DOT", dot},
});
}
void RegisterGraphvizURLRenderer(
std::function<absl::StatusOr<std::string>(absl::string_view)> renderer) {
if (url_renderer != nullptr) {
LOG(WARNING) << "Multiple calls to RegisterGraphToURLRenderer. Last call "
"wins, but because order of initialization in C++ is "
"nondeterministic, this may not be what you want.";
}
delete url_renderer;
url_renderer =
new std::function<absl::StatusOr<std::string>(absl::string_view)>(
std::move(renderer));
}
}
} | #include "tensorflow/core/profiler/convert/hlo_proto_to_graph_view.h"
#include <string>
#include <variant>
#include "xla/service/hlo_graph_dumper.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/convert/tool_options.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
using ::tensorflow::testing::StatusIs;
using ::testing::HasSubstr;
TEST(GraphViewerParamsTest, GraphType) {
ToolOptions options1;
options1["type"] = "graph";
TF_ASSERT_OK_AND_ASSIGN(GraphViewerParams params1,
ParseGraphViewerParams(options1));
EXPECT_EQ(params1.type, "graph");
EXPECT_EQ(params1.node_name, "");
EXPECT_EQ(params1.graph_width, 3);
EXPECT_EQ(params1.render_options.show_backend_config, false);
EXPECT_EQ(params1.render_options.show_fusion_subcomputations, true);
EXPECT_EQ(params1.format, xla::RenderedGraphFormat::kUrl);
ToolOptions options2;
options2["type"] = "graph";
options2["node_name"] = "fusion.111";
options2["graph_width"] = 10;
options2["show_metadata"] = 1;
options2["merge_fusion"] = 1;
options2["format"] = "html";
TF_ASSERT_OK_AND_ASSIGN(GraphViewerParams params2,
ParseGraphViewerParams(options2));
EXPECT_EQ(params2.type, "graph");
EXPECT_EQ(params2.node_name, "fusion.111");
EXPECT_EQ(params2.graph_width, 10);
EXPECT_EQ(params2.render_options.show_backend_config, true);
EXPECT_EQ(params2.render_options.show_fusion_subcomputations, false);
EXPECT_EQ(params2.format, xla::RenderedGraphFormat::kHtml);
}
TEST(GraphViewerParamsTest, ShortTxtType) {
ToolOptions options1;
options1["type"] = "short_txt";
TF_ASSERT_OK_AND_ASSIGN(GraphViewerParams params1,
ParseGraphViewerParams(options1));
EXPECT_EQ(params1.type, "short_txt");
EXPECT_EQ(params1.verbose, false);
EXPECT_EQ(params1.show_metadata, false);
ToolOptions options2;
options2["type"] = "short_txt";
options2["show_metadata"] = 1;
TF_ASSERT_OK_AND_ASSIGN(GraphViewerParams params2,
ParseGraphViewerParams(options2));
EXPECT_EQ(params2.type, "short_txt");
EXPECT_EQ(params2.verbose, false);
EXPECT_EQ(params2.show_metadata, true);
}
TEST(GraphViewerParamsTest, LongTxtType) {
ToolOptions options1;
options1["type"] = "long_txt";
TF_ASSERT_OK_AND_ASSIGN(GraphViewerParams params1,
ParseGraphViewerParams(options1));
EXPECT_EQ(params1.type, "long_txt");
EXPECT_EQ(params1.verbose, true);
EXPECT_EQ(params1.show_metadata, false);
ToolOptions options2;
options2["type"] = "long_txt";
options2["show_metadata"] = 1;
TF_ASSERT_OK_AND_ASSIGN(GraphViewerParams params2,
ParseGraphViewerParams(options2));
EXPECT_EQ(params2.type, "long_txt");
EXPECT_EQ(params2.verbose, true);
EXPECT_EQ(params2.show_metadata, true);
}
TEST(GraphViewerParamsTest, OtherTypes) {
ToolOptions options1;
EXPECT_THAT(ParseGraphViewerParams(options1),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Graph viewer must provide a type option")));
ToolOptions options2;
options2["type"] = "abcd";
EXPECT_THAT(ParseGraphViewerParams(options2),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Unknown graph viewer type option: abcd")));
}
}
}
} |
1,461 | cpp | tensorflow/tensorflow | xplane_to_op_metrics_db | tensorflow/core/profiler/convert/xplane_to_op_metrics_db.cc | tensorflow/core/profiler/convert/xplane_to_op_metrics_db_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_CONVERT_XPLANE_TO_OP_METRICS_DB_H_
#define TENSORFLOW_CORE_PROFILER_CONVERT_XPLANE_TO_OP_METRICS_DB_H_
#include "absl/container/flat_hash_map.h"
#include "absl/types/optional.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/convert/op_metrics_db_combiner.h"
#include "tensorflow/core/profiler/protobuf/op_metrics.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/op_utils.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
#include "tsl/profiler/utils/tf_op_utils.h"
namespace tensorflow {
namespace profiler {
struct TfMetricsDbData {
OpMetricsDb tf_metrics_db;
HostOpMetricsDbBuilder tf_metrics_db_builder{&tf_metrics_db};
};
absl::flat_hash_map<int64_t, tsl::profiler::TfOp>
CollectTfOpsFromHostThreadsXPlane(const XPlane& host_trace);
TfMetricsDbData ConvertHostThreadsXLineToTfMetricsDbData(
const XLineVisitor& line,
const absl::flat_hash_map<int64_t, tsl::profiler::TfOp>& tf_ops);
void ConsumeTfMetricsDbData(TfMetricsDbData src, OpMetricsDbCombiner* dst);
OpMetricsDb ConvertHostThreadsXPlaneToOpMetricsDb(const XPlane& host_trace);
OpMetricsDb ConvertDeviceTraceXPlaneToOpMetricsDb(const XPlane& device_trace);
OpMetricsDb ConvertTpuDeviceTraceXPlaneToOpMetricsDb(
const XPlane& device_trace);
}
}
#endif
#include "tensorflow/core/profiler/convert/xplane_to_op_metrics_db.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/convert/op_metrics_db_combiner.h"
#include "tensorflow/core/profiler/convert/op_stack.h"
#include "tensorflow/core/profiler/protobuf/op_metrics.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/cost_utils.h"
#include "tensorflow/core/profiler/utils/op_metrics_db_utils.h"
#include "tensorflow/core/profiler/utils/op_utils.h"
#include "tensorflow/core/profiler/utils/trace_utils.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
#include "tsl/profiler/utils/tf_op_utils.h"
#include "tsl/profiler/utils/tf_xplane_visitor.h"
#include "tsl/profiler/utils/timespan.h"
#include "tsl/profiler/utils/xplane_schema.h"
namespace tensorflow {
namespace profiler {
namespace {
constexpr uint64_t kRootSymbolId = 0;
enum TfActivityType { kTfOpBegin, kTfOpEnd };
struct TfActivity {
uint64 timestamp_ps;
uint32 tf_op_id;
TfActivityType activity_type;
tsl::profiler::TfOp tf_op;
bool is_eager;
};
struct TfOpInfo {
explicit TfOpInfo(uint64 ts) : start_timestamp_ps(ts) {}
uint64 start_timestamp_ps;
uint64 children_duration_ps = 0;
};
void ProcessOneTfActivity(const TfActivity& activity,
OpStack<TfOpInfo>* tf_op_stack,
TfMetricsDbData* tf_metrics_data) {
uint32 tf_op_id = activity.tf_op_id;
switch (activity.activity_type) {
case kTfOpBegin: {
tf_op_stack->Push(tf_op_id,
std::make_unique<TfOpInfo>(activity.timestamp_ps));
break;
}
case kTfOpEnd: {
std::unique_ptr<TfOpInfo> info = tf_op_stack->Pop(tf_op_id);
if (info == nullptr) {
VLOG(1) << "No begin event found for TF activity id=" << tf_op_id
<< " name=" << activity.tf_op.name
<< " type=" << activity.tf_op.type;
break;
}
tsl::profiler::Timespan tf_op_span = tsl::profiler::PicoSpan(
info->start_timestamp_ps, activity.timestamp_ps);
tf_metrics_data->tf_metrics_db_builder.EnterOp(
activity.tf_op.name, activity.tf_op.type, activity.is_eager,
tf_op_span.duration_ps(), info->children_duration_ps);
TfOpInfo* parent_info = tf_op_stack->Top();
if (parent_info != nullptr) {
parent_info->children_duration_ps += tf_op_span.duration_ps();
}
if (tsl::profiler::IsInfeedEnqueueOp(activity.tf_op.type)) {
tf_metrics_data->tf_metrics_db_builder.EnterHostInfeedEnqueue(
tf_op_span);
}
break;
}
}
}
void ProcessTfActivities(std::vector<TfActivity>* tf_activities,
TfMetricsDbData* tf_metrics_db_data) {
if (tf_activities->empty()) return;
absl::c_stable_sort(*tf_activities,
[](const TfActivity& a, const TfActivity& b) {
return a.timestamp_ps < b.timestamp_ps;
});
OpStack<TfOpInfo> tf_op_stack;
for (const auto& tf_activity : *tf_activities) {
ProcessOneTfActivity(tf_activity, &tf_op_stack, tf_metrics_db_data);
}
SetTotalTimePs(
tf_metrics_db_data->tf_metrics_db,
tf_activities->back().timestamp_ps - tf_activities->front().timestamp_ps);
}
void CollectTfActivities(
const XLineVisitor& line,
const absl::flat_hash_map<int64_t, tsl::profiler::TfOp>& tf_ops,
std::vector<TfActivity>* tf_activities) {
uint32 tf_op_id = 0;
tf_activities->reserve(line.NumEvents() * 2);
line.ForEachEvent(
[&tf_ops, &tf_op_id, &tf_activities](const XEventVisitor& event) {
const tsl::profiler::TfOp* tf_op = gtl::FindOrNull(tf_ops, event.Id());
if (tf_op != nullptr) {
++tf_op_id;
bool is_eager = false;
if (std::optional<XStatVisitor> stat =
event.GetStat(StatType::kIsEager)) {
is_eager = stat->IntValue();
}
tsl::profiler::Timespan span = event.GetTimespan();
tf_activities->push_back(
{span.begin_ps(), tf_op_id, kTfOpBegin, *tf_op, is_eager});
tf_activities->push_back(
{span.end_ps(), tf_op_id, kTfOpEnd, *tf_op, is_eager});
}
});
}
}
absl::flat_hash_map<int64_t, tsl::profiler::TfOp>
CollectTfOpsFromHostThreadsXPlane(const XPlane& host_trace) {
absl::flat_hash_map<int64_t, tsl::profiler::TfOp> tf_ops;
for (const auto& id_metadata : host_trace.event_metadata()) {
const XEventMetadata& metadata = id_metadata.second;
tsl::profiler::TfOp tf_op =
tsl::profiler::ParseTfOpFullname(metadata.name());
if (tf_op.category != tsl::profiler::Category::kUnknown) {
tf_ops.try_emplace(metadata.id(), tf_op);
}
}
return tf_ops;
}
TfMetricsDbData ConvertHostThreadsXLineToTfMetricsDbData(
const XLineVisitor& line,
const absl::flat_hash_map<int64_t, tsl::profiler::TfOp>& tf_ops) {
TfMetricsDbData tf_metrics_db_data;
if (!tf_ops.empty()) {
std::vector<TfActivity> tf_activities;
CollectTfActivities(line, tf_ops, &tf_activities);
ProcessTfActivities(&tf_activities, &tf_metrics_db_data);
}
return tf_metrics_db_data;
}
void ConsumeTfMetricsDbData(TfMetricsDbData src, OpMetricsDbCombiner* dst) {
AddIdleOp(src.tf_metrics_db);
dst->Combine(src.tf_metrics_db, false);
src.tf_metrics_db.Clear();
}
OpMetricsDb ConvertHostThreadsXPlaneToOpMetricsDb(const XPlane& host_trace) {
absl::flat_hash_map<int64_t, tsl::profiler::TfOp> tf_ops =
CollectTfOpsFromHostThreadsXPlane(host_trace);
OpMetricsDb result;
OpMetricsDbCombiner combiner(&result);
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(&host_trace);
plane.ForEachLine([&tf_ops, &combiner](const XLineVisitor& line) {
ConsumeTfMetricsDbData(
ConvertHostThreadsXLineToTfMetricsDbData(line, tf_ops), &combiner);
});
return result;
}
OpMetricsDb ConvertTpuDeviceTraceXPlaneToOpMetricsDb(
const XPlane& device_trace) {
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(&device_trace);
using OpMetricBySymbol =
absl::flat_hash_map<uint64_t, OpMetrics>;
absl::flat_hash_map<uint64_t, OpMetricBySymbol> flat_op_metric;
XEventsOpMetricsDbBuilder builder;
plane.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent(
[&](const XEventVisitor& event) { builder.AddOpMetric(event); });
});
return builder.Finalize(
plane.GetStat(StatType::kTotalProfileDurationPs)->IntOrUintValue());
}
OpMetricsDb ConvertDeviceTraceXPlaneToOpMetricsDb(const XPlane& device_trace) {
OpMetricsDb result;
DeviceOpMetricsDbBuilder device_op_metrics_db_builder(&result);
int64_t first_op_offset_ps = kint64max;
int64_t last_op_offset_ps = 0;
TfOpRoofLineCostEstimator op_level_cost_estimator;
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(&device_trace);
plane.ForEachLine([&](const XLineVisitor& line) {
if (IsDerivedThreadId(line.Id())) return;
line.ForEachEvent([&](const XEventVisitor& event) {
first_op_offset_ps = std::min(first_op_offset_ps, event.OffsetPs());
last_op_offset_ps = std::max(last_op_offset_ps, event.EndOffsetPs());
absl::string_view tf_op_full_name;
bool is_eager = false;
int64_t program_id = 0;
absl::string_view deduplicated_name = "";
event.ForEachStat([&](const XStatVisitor& stat) {
if (stat.Type() == StatType::kTfOp) {
tf_op_full_name = stat.StrOrRefValue();
} else if (stat.Type() == StatType::kIsEager) {
is_eager = stat.IntValue();
} else if (stat.Type() == StatType::kProgramId) {
program_id = stat.IntOrUintValue();
} else if (stat.Type() == StatType::kDeduplicatedName) {
deduplicated_name = stat.StrOrRefValue();
}
});
if (tf_op_full_name.empty()) return;
tsl::profiler::TfOp tf_op =
tsl::profiler::ParseTfOpFullname(tf_op_full_name);
TfOpRoofLineCostEstimator::OpRoofLineStats costs;
if (tf_op.category != tsl::profiler::Category::kUnknown) {
costs = op_level_cost_estimator.Predict(event);
}
device_op_metrics_db_builder.EnterOp(
program_id,
absl::StrCat(tf_op.name, "/", event.Name()),
tf_op.type,
tf_op_full_name, deduplicated_name, is_eager,
1, event.DurationPs(),
0, costs.flops, costs.bytes_accessed);
});
});
SetTotalTimePs(
result, last_op_offset_ps ? last_op_offset_ps - first_op_offset_ps : 0);
AddIdleOp(result);
return result;
}
}
} | #include "tensorflow/core/profiler/convert/xplane_to_op_metrics_db.h"
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/op_metrics.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/math_utils.h"
#include "tensorflow/core/profiler/utils/op_metrics_db_utils.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_test_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
#if defined(PLATFORM_GOOGLE)
using ::testing::EqualsProto;
#endif
void AddTensorFlowTpuOpEvent(std::string&& name, std::string&& tf_op_fullname,
int64_t start_timestamp_ns, int64_t duration_ns,
std::string&& hlo_category, uint64 flops,
uint64 bytes_accessed, int64_t occurences,
int64_t self_duration, int64_t program_id,
int64_t symbol_id, XPlaneBuilder* plane,
XLineBuilder* line) {
XEventBuilder event = line->AddEvent(*plane->GetOrCreateEventMetadata(name));
event.SetTimestampNs(start_timestamp_ns);
event.SetDurationNs(duration_ns);
event.SetNumOccurrences(occurences);
XStatsBuilder<XEventMetadata> event_metadata(
plane->GetOrCreateEventMetadata(name), plane);
event_metadata.AddStatValue(
*plane->GetOrCreateStatMetadata(GetStatTypeStr(StatType::kTfOp)),
tf_op_fullname);
event_metadata.AddStatValue(
*plane->GetOrCreateStatMetadata(GetStatTypeStr(StatType::kHloCategory)),
hlo_category);
event_metadata.AddStatValue(
*plane->GetOrCreateStatMetadata(GetStatTypeStr(StatType::kFlops)), flops);
event_metadata.AddStatValue(
*plane->GetOrCreateStatMetadata(GetStatTypeStr(StatType::kSymbolId)),
symbol_id);
event_metadata.AddStatValue(
*plane->GetOrCreateStatMetadata(GetStatTypeStr(StatType::kProgramId)),
program_id);
}
void AddTensorFlowOpEvent(std::string&& tf_op_fullname,
int64_t start_timestamp_ns, int64_t duration_ns,
bool on_device, absl::string_view kernel_name,
XPlaneBuilder* plane, XLineBuilder* line) {
absl::string_view name = on_device ? kernel_name : tf_op_fullname;
XEventBuilder event = line->AddEvent(*plane->GetOrCreateEventMetadata(name));
event.SetTimestampNs(start_timestamp_ns);
event.SetDurationNs(duration_ns);
if (!on_device) return;
event.AddStatValue(
*plane->GetOrCreateStatMetadata(GetStatTypeStr(StatType::kTfOp)),
*plane->GetOrCreateStatMetadata(std::move(tf_op_fullname)));
}
TEST(ConvertXPlaneToOpMetricsDb, HostOpMetricsDb) {
static constexpr char kTfOp1[] = "TfOp1";
static constexpr char kTfOp2[] = "TfOp2";
constexpr int64_t kTfOp1StartNs = 100000;
constexpr int64_t kTfOp1DurationNs = 8000;
constexpr int64_t kTfOp2StartNs = 110000;
constexpr int64_t kTfOp2DurationNs = 10000;
XSpace xspace;
XPlane* xplane = GetOrCreateHostXPlane(&xspace);
XPlaneBuilder host_plane(xplane);
XLineBuilder thread1 = host_plane.GetOrCreateLine(10);
AddTensorFlowOpEvent(absl::StrCat(kTfOp1, ":", kTfOp1), kTfOp1StartNs,
kTfOp1DurationNs, false,
"", &host_plane, &thread1);
XLineBuilder thread2 = host_plane.GetOrCreateLine(20);
AddTensorFlowOpEvent(absl::StrCat(kTfOp1, ":", kTfOp1), kTfOp1StartNs,
kTfOp1DurationNs, false,
"", &host_plane, &thread2);
AddTensorFlowOpEvent(absl::StrCat(kTfOp2, ":", kTfOp2), kTfOp2StartNs,
kTfOp2DurationNs, false,
"", &host_plane, &thread2);
OpMetricsDb op_metrics = ConvertHostThreadsXPlaneToOpMetricsDb(*xplane);
EXPECT_EQ(3, op_metrics.metrics_db_size());
uint64 total_op_duration =
tsl::profiler::NanoToPico(kTfOp1DurationNs * 2 + kTfOp2DurationNs);
EXPECT_EQ(total_op_duration, op_metrics.total_op_time_ps());
uint64 total_duration = tsl::profiler::NanoToPico(
kTfOp2StartNs - kTfOp1StartNs + kTfOp2DurationNs + kTfOp1DurationNs);
EXPECT_EQ(total_duration, op_metrics.total_time_ps());
const OpMetrics& op_1 = op_metrics.metrics_db().at(0);
EXPECT_EQ(kTfOp1, op_1.name());
EXPECT_EQ(kTfOp1, op_1.category());
EXPECT_EQ(2, op_1.occurrences());
EXPECT_EQ(tsl::profiler::NanoToPico(kTfOp1DurationNs) * 2, op_1.time_ps());
const OpMetrics& idle = op_metrics.metrics_db().at(1);
EXPECT_EQ(kIdle, idle.name());
EXPECT_EQ(kIdle, idle.category());
EXPECT_EQ(tsl::profiler::NanoToPico(2000), idle.time_ps());
const OpMetrics& op_2 = op_metrics.metrics_db().at(2);
EXPECT_EQ(kTfOp2, op_2.name());
EXPECT_EQ(kTfOp2, op_2.category());
EXPECT_EQ(1, op_2.occurrences());
EXPECT_EQ(tsl::profiler::NanoToPico(kTfOp2DurationNs), op_2.time_ps());
}
TEST(ConvertXPlaneToOpMetricsDb, DeviceOpMetricsDb) {
static constexpr char kTfOp1[] = "TfOp1";
static constexpr char kTfOp2[] = "TfOp2";
static constexpr char kKernel1[] = "kernel1";
static constexpr char kKernel2[] = "kernel2";
static constexpr char kKernel3[] = "kernel3";
constexpr int64_t kKernel1StartNs = 100000;
constexpr int64_t kKernel1DurationNs = 8000;
constexpr int64_t kKernel2StartNs = 110000;
constexpr int64_t kKernel2DurationNs = 10000;
constexpr int64_t kKernel3StartNs = 120000;
constexpr int64_t kKernel3DurationNs = 10000;
XSpace xspace;
XPlane* xplane = GetOrCreateGpuXPlane(&xspace, 0);
XPlaneBuilder device_plane(xplane);
XLineBuilder stream1 = device_plane.GetOrCreateLine(10);
AddTensorFlowOpEvent(absl::StrCat(kTfOp1, ":", kTfOp1), kKernel1StartNs,
kKernel1DurationNs, true, kKernel1,
&device_plane, &stream1);
AddTensorFlowOpEvent(absl::StrCat(kTfOp1, ":", kTfOp1), kKernel2StartNs,
kKernel2DurationNs, true, kKernel2,
&device_plane, &stream1);
XLineBuilder stream2 = device_plane.GetOrCreateLine(20);
AddTensorFlowOpEvent(absl::StrCat(kTfOp1, ":", kTfOp1), kKernel1StartNs,
kKernel1DurationNs, true, kKernel1,
&device_plane, &stream2);
AddTensorFlowOpEvent(absl::StrCat(kTfOp1, ":", kTfOp1), kKernel2StartNs,
kKernel2DurationNs, true, kKernel2,
&device_plane, &stream2);
AddTensorFlowOpEvent(absl::StrCat(kTfOp2, ":", kTfOp2), kKernel3StartNs,
kKernel3DurationNs, true, kKernel3,
&device_plane, &stream2);
OpMetricsDb op_metrics = ConvertDeviceTraceXPlaneToOpMetricsDb(*xplane);
EXPECT_EQ(4, op_metrics.metrics_db_size());
uint64 total_op_duration = tsl::profiler::NanoToPico(
kKernel1DurationNs * 2 + kKernel2DurationNs * 2 + kKernel3DurationNs);
EXPECT_EQ(total_op_duration, op_metrics.total_op_time_ps());
uint64 total_duration = tsl::profiler::NanoToPico(
kKernel3StartNs + kKernel3DurationNs - kKernel1StartNs);
EXPECT_EQ(std::max(total_duration, total_op_duration),
op_metrics.total_time_ps());
const OpMetrics& op_1 = op_metrics.metrics_db().at(0);
EXPECT_EQ(absl::StrCat(kTfOp1, "/", kKernel1), op_1.name());
EXPECT_EQ(kTfOp1, op_1.category());
EXPECT_EQ(2, op_1.occurrences());
EXPECT_EQ(tsl::profiler::NanoToPico(kKernel1DurationNs) * 2, op_1.time_ps());
const OpMetrics& op_2 = op_metrics.metrics_db().at(1);
EXPECT_EQ(absl::StrCat(kTfOp1, "/", kKernel2), op_2.name());
EXPECT_EQ(kTfOp1, op_2.category());
EXPECT_EQ(2, op_2.occurrences());
EXPECT_EQ(tsl::profiler::NanoToPico(kKernel2DurationNs) * 2, op_2.time_ps());
const OpMetrics& op_3 = op_metrics.metrics_db().at(2);
EXPECT_EQ(absl::StrCat(kTfOp2, "/", kKernel3), op_3.name());
EXPECT_EQ(kTfOp2, op_3.category());
EXPECT_EQ(1, op_3.occurrences());
EXPECT_EQ(tsl::profiler::NanoToPico(kKernel3DurationNs), op_3.time_ps());
const OpMetrics& idle = op_metrics.metrics_db().at(3);
EXPECT_EQ(kIdle, idle.name());
EXPECT_EQ(kIdle, idle.category());
EXPECT_EQ(tsl::profiler::NanoToPico(0), idle.time_ps());
}
TEST(ConvertXPlaneToOpMetricsDb, TpuDeviceOpMetricsDb) {
XSpace xspace;
XPlane* xplane = GetOrCreateTpuXPlane(&xspace, 0, "TPU V4",
0,
0);
XPlaneBuilder device_plane(xplane);
device_plane.AddStatValue(
*device_plane.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kTotalProfileDurationPs)),
1000);
XLineBuilder stream1 = device_plane.GetOrCreateLine(10);
AddTensorFlowTpuOpEvent("MatMul", "while:MatMul", 0, 10, "MatMul", 34, 45, 2,
5, 1, 1, &device_plane, &stream1);
OpMetricsDb op_metrics = ConvertTpuDeviceTraceXPlaneToOpMetricsDb(*xplane);
#if defined(PLATFORM_GOOGLE)
EXPECT_THAT(op_metrics,
EqualsProto(R"pb(metrics_db {
hlo_module_id: 1
self_time_ps: 10000
flops: 68
occurrences: 2
name: "MatMul"
time_ps: 10000
category: "MatMul"
provenance: "while:MatMul"
min_time_ps: 10000
}
metrics_db { name: "IDLE" category: "IDLE" }
total_time_ps: 10000
total_op_time_ps: 10000
)pb"));
#endif
}
}
}
} |
1,462 | cpp | tensorflow/tensorflow | repository | tensorflow/core/profiler/convert/repository.cc | tensorflow/core/profiler/convert/repository_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_CONVERT_REPOSITORY_H_
#define TENSORFLOW_CORE_PROFILER_CONVERT_REPOSITORY_H_
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/statusor.h"
#include "tsl/platform/env.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
#include "tsl/profiler/utils/file_system_utils.h"
namespace tensorflow {
namespace profiler {
constexpr char kAllHostsIdentifier[] = "ALL_HOSTS";
constexpr char kNoHostIdentifier[] = "NO_HOST";
enum StoredDataType {
DCN_COLLECTIVE_STATS,
};
static auto* kHostDataSuffixes =
new std::vector<std::pair<StoredDataType, const char*>>(
{{StoredDataType::DCN_COLLECTIVE_STATS, ".dcn_collective_stats.pb"}});
class SessionSnapshot {
public:
static absl::StatusOr<SessionSnapshot> Create(
std::vector<std::string> xspace_paths,
std::optional<std::vector<std::unique_ptr<XSpace>>> xspaces);
size_t XSpaceSize() const { return xspace_paths_.size(); }
absl::StatusOr<std::unique_ptr<XSpace>> GetXSpace(size_t index) const;
absl::StatusOr<std::unique_ptr<XSpace>> GetXSpaceByName(
absl::string_view name) const;
std::string GetHostname(size_t index) const;
absl::string_view GetSessionRunDir() const { return session_run_dir_; }
bool HasAccessibleRunDir() const { return has_accessible_run_dir_; }
std::optional<std::string> GetFilePath(absl::string_view toolname,
absl::string_view host) const;
absl::StatusOr<std::string> GetHostDataFileName(StoredDataType data_type,
std::string host) const;
absl::StatusOr<std::optional<std::string>> GetHostDataFilePath(
StoredDataType data_type, std::string host) const;
absl::StatusOr<std::pair<bool, std::string>> HasCacheFile(
StoredDataType data_type) const;
template <typename T>
absl::Status WriteBinaryProto(const StoredDataType data_type,
const std::string host, T& proto) const {
TF_ASSIGN_OR_RETURN(std::string filename,
GetHostDataFileName(data_type, host));
std::string filepath =
tsl::profiler::ProfilerJoinPath(GetSessionRunDir(), filename);
return tensorflow::WriteBinaryProto(tsl::Env::Default(), filepath, proto);
}
template <typename T>
absl::Status ReadBinaryProto(const StoredDataType data_type,
const std::string host, T* proto) const {
TF_ASSIGN_OR_RETURN(std::optional<std::string> filepath,
GetHostDataFilePath(data_type, host));
if (filepath) {
return tensorflow::ReadBinaryProto(tsl::Env::Default(), filepath.value(),
proto);
}
return absl::NotFoundError(
absl::StrCat("No binary proto found for ", host, " and ", data_type));
}
private:
SessionSnapshot(std::vector<std::string> xspace_paths,
std::optional<std::vector<std::unique_ptr<XSpace>>> xspaces)
: xspace_paths_(std::move(xspace_paths)),
has_accessible_run_dir_(!xspaces.has_value()),
xspaces_(std::move(xspaces)) {
session_run_dir_ = tensorflow::io::Dirname(xspace_paths_.at(0));
for (size_t i = 0; i < xspace_paths_.size(); ++i) {
std::string host_name = GetHostname(i);
hostname_map_[host_name] = i;
}
}
std::vector<std::string> xspace_paths_;
absl::string_view session_run_dir_;
absl::flat_hash_map<std::string , size_t >
hostname_map_;
const bool has_accessible_run_dir_;
mutable std::optional<std::vector<std::unique_ptr<XSpace>>> xspaces_;
};
template <typename T>
absl::Status WriteBinaryProto(const SessionSnapshot& session_snapshot,
const StoredDataType data_type,
const std::string& host, T& proto) {
return session_snapshot.WriteBinaryProto(data_type, host, proto);
}
template <typename T>
absl::Status ReadBinaryProto(const SessionSnapshot& session_snapshot,
const StoredDataType data_type,
const std::string& host, T* proto) {
return session_snapshot.ReadBinaryProto(data_type, host, proto);
}
}
}
#endif
#include "tensorflow/core/profiler/convert/repository.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tsl/platform/errors.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
#include "tsl/profiler/utils/file_system_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
std::string GetHostnameByPath(absl::string_view xspace_path) {
std::string_view file_name = tensorflow::io::Basename(xspace_path);
absl::ConsumeSuffix(&file_name, ".xplane.pb");
return std::string(file_name);
}
}
absl::StatusOr<SessionSnapshot> SessionSnapshot::Create(
std::vector<std::string> xspace_paths,
std::optional<std::vector<std::unique_ptr<XSpace>>> xspaces) {
if (xspace_paths.empty()) {
return errors::InvalidArgument("Can not find XSpace path.");
}
if (xspaces.has_value()) {
if (xspaces->size() != xspace_paths.size()) {
return errors::InvalidArgument(
"The size of the XSpace paths: ", xspace_paths.size(),
" is not equal ",
"to the size of the XSpace proto: ", xspaces->size());
}
for (size_t i = 0; i < xspace_paths.size(); ++i) {
auto host_name = GetHostnameByPath(xspace_paths.at(i));
if (xspaces->at(i)->hostnames_size() > 0 && !host_name.empty()) {
if (!absl::StrContains(host_name, xspaces->at(i)->hostnames(0))) {
return errors::InvalidArgument(
"The hostname of xspace path and preloaded xpace don't match at "
"index: ",
i, ". \nThe host name of xpace path is ", host_name,
" but the host name of preloaded xpace is ",
xspaces->at(i)->hostnames(0), ".");
}
}
}
}
return SessionSnapshot(std::move(xspace_paths), std::move(xspaces));
}
absl::StatusOr<std::unique_ptr<XSpace>> SessionSnapshot::GetXSpace(
size_t index) const {
if (index >= xspace_paths_.size()) {
return errors::InvalidArgument("Can not get the ", index,
"th XSpace. The total number of XSpace is ",
xspace_paths_.size());
}
if (xspaces_.has_value()) {
if (xspaces_->at(index) == nullptr) {
return errors::Internal("");
}
return std::move(xspaces_->at(index));
}
auto xspace_from_file = std::make_unique<XSpace>();
TF_RETURN_IF_ERROR(tensorflow::ReadBinaryProto(tensorflow::Env::Default(),
xspace_paths_.at(index),
xspace_from_file.get()));
return xspace_from_file;
}
absl::StatusOr<std::unique_ptr<XSpace>> SessionSnapshot::GetXSpaceByName(
absl::string_view name) const {
if (auto it = hostname_map_.find(name); it != hostname_map_.end()) {
return GetXSpace(it->second);
}
return errors::InvalidArgument("Can not find the XSpace by name: ", name,
". The total number of XSpace is ",
xspace_paths_.size());
}
std::string SessionSnapshot::GetHostname(size_t index) const {
return GetHostnameByPath(xspace_paths_.at(index));
}
std::optional<std::string> SessionSnapshot::GetFilePath(
absl::string_view toolname, absl::string_view hostname) const {
if (!has_accessible_run_dir_) return std::nullopt;
std::string file_name = "";
if (toolname == "trace_viewer@")
file_name = absl::StrCat(hostname, ".", "SSTABLE");
if (!file_name.empty())
return tensorflow::io::JoinPath(session_run_dir_, file_name);
return std::nullopt;
}
absl::StatusOr<std::string> SessionSnapshot::GetHostDataFileName(
const StoredDataType data_type, const std::string host) const {
for (const auto& format : *kHostDataSuffixes) {
if (data_type == format.first) return absl::StrCat(host, format.second);
}
return absl::InternalError(&"Unknown StoredDataType: "[data_type]);
}
absl::StatusOr<std::optional<std::string>> SessionSnapshot::GetHostDataFilePath(
const StoredDataType data_type, const std::string host) const {
std::vector<std::string> results;
TF_RETURN_IF_ERROR(::tsl::Env::Default()->GetChildren(
std::string(GetSessionRunDir()), &results));
TF_ASSIGN_OR_RETURN(std::string filename,
GetHostDataFileName(data_type, host));
for (const std::string& path : results) {
if (absl::EndsWith(path, filename)) {
return ::tsl::profiler::ProfilerJoinPath(GetSessionRunDir(), filename);
}
}
return std::nullopt;
}
absl::StatusOr<std::pair<bool, std::string>> SessionSnapshot::HasCacheFile(
const StoredDataType data_type) const {
std::optional<std::string> filepath;
TF_ASSIGN_OR_RETURN(filepath,
GetHostDataFilePath(data_type, kNoHostIdentifier));
if (filepath) {
return std::pair<bool, std::string>(true, std::string());
}
TF_ASSIGN_OR_RETURN(filepath,
GetHostDataFilePath(data_type, kAllHostsIdentifier));
if (filepath) {
return std::pair<bool, std::string>(true, filepath.value());
}
return std::pair<bool, std::string>(false, std::string());
}
}
} | #include "tensorflow/core/profiler/convert/repository.h"
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/platform/errors.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
using ::testing::Eq;
TEST(Repository, GetHostName) {
auto session_snapshot_or =
SessionSnapshot::Create({"log/plugins/profile/hostname0.xplane.pb",
"log/plugins/profile/hostname1.xplane.pb"},
std::nullopt);
TF_CHECK_OK(session_snapshot_or.status());
EXPECT_THAT(session_snapshot_or.value().GetHostname(0), Eq("hostname0"));
EXPECT_THAT(session_snapshot_or.value().GetHostname(1), Eq("hostname1"));
EXPECT_TRUE(session_snapshot_or.value().HasAccessibleRunDir());
}
TEST(Repository, GetHostNameWithPeriods) {
auto session_snapshot_or =
SessionSnapshot::Create({"log/plugins/profile/127.0.0.1_6009.xplane.pb"},
std::nullopt);
TF_CHECK_OK(session_snapshot_or.status());
EXPECT_THAT(session_snapshot_or.value().GetHostname(0), Eq("127.0.0.1_6009"));
EXPECT_TRUE(session_snapshot_or.value().HasAccessibleRunDir());
}
TEST(Repository, GetSpaceByHostName) {
std::vector<std::unique_ptr<XSpace>> xspaces;
auto space1 = std::make_unique<XSpace>();
*(space1->add_hostnames()) = "hostname1";
xspaces.push_back(std::move(space1));
auto space0 = std::make_unique<XSpace>();
*(space0->add_hostnames()) = "hostname0";
xspaces.push_back(std::move(space0));
auto session_snapshot_or =
SessionSnapshot::Create({"log/plugins/profile/hostname1.xplane.pb",
"log/plugins/profile/hostname0.xplane.pb"},
std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
auto xspace0_or = session_snapshot_or.value().GetXSpaceByName("hostname0");
TF_CHECK_OK(xspace0_or.status());
auto xspace1_or = session_snapshot_or.value().GetXSpaceByName("hostname1");
EXPECT_FALSE(session_snapshot_or.value().HasAccessibleRunDir());
TF_CHECK_OK(xspace1_or.status());
EXPECT_THAT(xspace0_or.value()->hostnames(0), Eq("hostname0"));
EXPECT_THAT(xspace1_or.value()->hostnames(0), Eq("hostname1"));
}
TEST(Repository, GetSSTableFile) {
auto session_snapshot_or =
SessionSnapshot::Create({"log/plugins/profile/hostname0.xplane.pb"},
std::nullopt);
TF_CHECK_OK(session_snapshot_or.status());
auto sstable_path =
session_snapshot_or.value().GetFilePath("trace_viewer@", "hostname0");
auto not_found_path =
session_snapshot_or.value().GetFilePath("memory_viewer", "hostname0");
EXPECT_THAT(sstable_path, Eq("log/plugins/profile/hostname0.SSTABLE"));
EXPECT_THAT(not_found_path, Eq(std::nullopt));
}
TEST(Repository, GetSSTableFileWithXSpace) {
std::vector<std::unique_ptr<XSpace>> xspaces;
auto space0 = std::make_unique<XSpace>();
*(space0->add_hostnames()) = "hostname0";
xspaces.push_back(std::move(space0));
auto session_snapshot_or = SessionSnapshot::Create(
{"log/plugins/profile/hostname0.xplane.pb"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
auto file_path_init_by_xspace =
session_snapshot_or.value().GetFilePath("trace_viewer@", "hostname0");
EXPECT_THAT(file_path_init_by_xspace, Eq(std::nullopt));
}
TEST(Repository, MismatchedXSpaceAndPath) {
std::vector<std::unique_ptr<XSpace>> xspaces;
auto space1 = std::make_unique<XSpace>();
*(space1->add_hostnames()) = "hostname1";
xspaces.push_back(std::move(space1));
auto space0 = std::make_unique<XSpace>();
*(space0->add_hostnames()) = "hostname0";
xspaces.push_back(std::move(space0));
auto session_snapshot_or =
SessionSnapshot::Create({"log/plugins/profile/hostname0.xplane.pb",
"log/plugins/profile/hostname1.xplane.pb"},
std::move(xspaces));
auto error =
R"(The hostname of xspace path and preloaded xpace don't match at index: 0.
The host name of xpace path is hostname0 but the host name of preloaded xpace is hostname1.)";
EXPECT_THAT(session_snapshot_or.status(), Eq(errors::InvalidArgument(error)));
}
}
}
} |
1,463 | cpp | tensorflow/tensorflow | xplane_to_tool_names | tensorflow/core/profiler/convert/xplane_to_tool_names.cc | tensorflow/core/profiler/convert/xplane_to_tool_names_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_CONVERT_XPLANE_TO_TOOL_NAMES_H_
#define TENSORFLOW_CORE_PROFILER_CONVERT_XPLANE_TO_TOOL_NAMES_H_
#include <string>
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/profiler/convert/repository.h"
namespace tensorflow {
namespace profiler {
absl::StatusOr<std::string> GetAvailableToolNames(
const SessionSnapshot& session_snapshot);
}
}
#endif
#include "tensorflow/core/profiler/convert/xplane_to_tool_names.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/str_join.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/profiler/convert/repository.h"
#include "tensorflow/core/profiler/convert/xplane_to_dcn_collective_stats.h"
#include "tensorflow/core/profiler/convert/xplane_to_hlo.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_utils.h"
namespace tensorflow {
namespace profiler {
absl::StatusOr<std::string> GetAvailableToolNames(
const SessionSnapshot& session_snapshot) {
std::vector<std::string> tools;
bool is_cloud_vertex_ai = !session_snapshot.HasAccessibleRunDir();
if (session_snapshot.XSpaceSize() != 0) {
tools.reserve(11);
tools.push_back(is_cloud_vertex_ai ? "trace_viewer" : "trace_viewer@");
tools.push_back("overview_page");
tools.push_back("input_pipeline_analyzer");
tools.push_back("framework_op_stats");
tools.push_back("memory_profile");
tools.push_back("pod_viewer");
tools.push_back("tf_data_bottleneck_analysis");
tools.push_back("op_profile");
TF_ASSIGN_OR_RETURN(std::unique_ptr<XSpace> xspace,
session_snapshot.GetXSpace(0));
if (!FindPlanesWithPrefix(*xspace, kGpuPlanePrefix).empty()) {
tools.push_back("kernel_stats");
}
TF_ASSIGN_OR_RETURN(bool has_hlo,
ConvertMultiXSpaceToHloProto(session_snapshot));
if (has_hlo) {
tools.push_back("memory_viewer");
tools.push_back("graph_viewer");
}
TF_ASSIGN_OR_RETURN(bool has_dcn_collective_stats,
HasDcnCollectiveStatsInMultiXSpace(session_snapshot));
if (has_dcn_collective_stats) {
tools.push_back("dcn_collective_stats");
}
}
return absl::StrJoin(tools, ",");
}
}
} | #include "tensorflow/core/profiler/convert/xplane_to_tool_names.h"
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/convert/repository.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_utils.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
struct XPlaneToToolsTestCase {
std::string test_name;
std::string_view plane_name;
bool has_hlo_module;
bool has_dcn_collective_stats;
std::vector<std::string> expected_tools;
};
SessionSnapshot CreateSessionSnapshot(std::unique_ptr<XSpace> xspace,
bool has_hlo_module,
bool has_dcn_collective_stats) {
std::string test_name =
::testing::UnitTest::GetInstance()->current_test_info()->name();
std::string path = absl::StrCat("ram:
std::unique_ptr<WritableFile> xplane_file;
tensorflow::Env::Default()
->NewAppendableFile(absl::StrCat(path, "hostname.xplane.pb"),
&xplane_file)
.IgnoreError();
std::vector<std::string> paths = {path};
if (has_hlo_module) {
tensorflow::Env::Default()
->NewAppendableFile(absl::StrCat(path, "module_name.hlo_proto.pb"),
&xplane_file)
.IgnoreError();
} else {
tensorflow::Env::Default()
->NewAppendableFile(absl::StrCat(path, "NO_MODULE.hlo_proto.pb"),
&xplane_file)
.IgnoreError();
}
if (has_dcn_collective_stats) {
tensorflow::Env::Default()
->NewAppendableFile(
absl::StrCat(path, "hostname.dcn_collective_stats.pb"),
&xplane_file)
.IgnoreError();
tensorflow::Env::Default()
->NewAppendableFile(
absl::StrCat(path, "ALL_HOSTS.dcn_collective_stats.pb"),
&xplane_file)
.IgnoreError();
} else {
tensorflow::Env::Default()
->NewAppendableFile(
absl::StrCat(path, "NO_HOST.dcn_collective_stats.pb"), &xplane_file)
.IgnoreError();
}
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(xspace));
absl::StatusOr<SessionSnapshot> session_snapshot =
SessionSnapshot::Create(paths, std::move(xspaces));
TF_CHECK_OK(session_snapshot.status());
return std::move(session_snapshot.value());
}
using XPlaneToToolsTest = ::testing::TestWithParam<XPlaneToToolsTestCase>;
TEST_P(XPlaneToToolsTest, ToolsList) {
const XPlaneToToolsTestCase& test_case = GetParam();
auto xspace = std::make_unique<XSpace>();
FindOrAddMutablePlaneWithName(xspace.get(), test_case.plane_name);
SessionSnapshot sessionSnapshot =
CreateSessionSnapshot(std::move(xspace), test_case.has_hlo_module,
test_case.has_dcn_collective_stats);
absl::StatusOr<std::string> toolsString =
GetAvailableToolNames(sessionSnapshot);
ASSERT_TRUE(toolsString.ok());
std::vector<std::string> tools = absl::StrSplit(toolsString.value(), ',');
std::vector<std::string> expected_tools = {"trace_viewer",
"overview_page",
"input_pipeline_analyzer",
"framework_op_stats",
"memory_profile",
"pod_viewer",
"tf_data_bottleneck_analysis",
"op_profile"};
expected_tools.insert(expected_tools.end(), test_case.expected_tools.begin(),
test_case.expected_tools.end());
EXPECT_THAT(tools, ::testing::UnorderedElementsAreArray(expected_tools));
}
INSTANTIATE_TEST_SUITE_P(
XPlaneToToolsTests, XPlaneToToolsTest,
::testing::ValuesIn<XPlaneToToolsTestCase>({
{"ToolsForTpuWithoutHloModule", kTpuPlanePrefix, false, false, {}},
{"ToolsForTpuWithHloModule",
kTpuPlanePrefix,
true,
false,
{"graph_viewer", "memory_viewer"}},
{"ToolsForGpuWithoutHloModule",
kGpuPlanePrefix,
false,
false,
{"kernel_stats"}},
{"ToolsForGpuWithHloModule",
kGpuPlanePrefix,
true,
false,
{"kernel_stats", "graph_viewer", "memory_viewer"}},
{"ToolsForTpuWithDcnCollectiveStats",
kTpuPlanePrefix,
false,
true,
{"dcn_collective_stats"}},
}),
[](const ::testing::TestParamInfo<XPlaneToToolsTest::ParamType>& info) {
return info.param.test_name;
});
}
}
} |
1,464 | cpp | tensorflow/tensorflow | hlo_proto_to_memory_visualization_utils | tensorflow/core/profiler/convert/hlo_proto_to_memory_visualization_utils.cc | tensorflow/core/profiler/convert/hlo_proto_to_memory_visualization_utils_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_CONVERT_HLO_PROTO_TO_MEMORY_VISUALIZATION_UTILS_H_
#define TENSORFLOW_CORE_PROFILER_CONVERT_HLO_PROTO_TO_MEMORY_VISUALIZATION_UTILS_H_
#include <cstdint>
#include "absl/status/statusor.h"
#include "xla/service/hlo.pb.h"
#include "tensorflow/core/profiler/protobuf/memory_viewer_preprocess.pb.h"
namespace tensorflow {
namespace profiler {
constexpr int kSmallBufferSize = 16 * 1024;
absl::StatusOr<PreprocessResult> ConvertHloProtoToPreprocessResult(
const xla::HloProto& hlo_proto,
int64_t small_buffer_size = kSmallBufferSize, int64_t memory_color = 0);
}
}
#endif
#include "tensorflow/core/profiler/convert/hlo_proto_to_memory_visualization_utils.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <list>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/layout_util.h"
#include "xla/service/hlo.pb.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/profiler/protobuf/memory_viewer_preprocess.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
using ::xla::BufferAllocationProto;
using ::xla::HeapSimulatorTrace;
using ::xla::HloInstructionProto;
using ::xla::HloProto;
using ::xla::LayoutUtil;
using ::xla::LogicalBufferProto;
using ::xla::Shape;
using ::xla::ShapeUtil;
Shape ResolveShapeIndex(const xla::ShapeProto& shape_proto,
absl::Span<const int64_t> shape_index) {
if (shape_index.empty()) return Shape(shape_proto);
int64_t i = shape_index.back();
if (i >= shape_proto.tuple_shapes_size()) {
return Shape(shape_proto);
}
return Shape(shape_proto.tuple_shapes(i));
}
std::string ShapeDescription(const Shape& shape) {
return ShapeUtil::HumanStringWithLayout(shape);
}
int64_t ShapeUnpaddedSize(Shape shape) {
LayoutUtil::SetToDefaultLayout(&shape);
return ShapeUtil::ByteSizeOf(shape, sizeof(void*));
}
class BufferAllocationStruct {
public:
explicit BufferAllocationStruct(const BufferAllocationProto& proto)
: buffer_allocation_((proto)) {}
bool IsIndefinite() const {
return buffer_allocation_.is_thread_local() ||
buffer_allocation_.is_entry_computation_parameter() ||
buffer_allocation_.is_constant() ||
buffer_allocation_.maybe_live_out();
}
const BufferAllocationProto& proto() const { return buffer_allocation_; }
size_t size() const { return buffer_allocation_.size(); }
int64_t color() const { return buffer_allocation_.color(); }
int64_t index() const { return buffer_allocation_.index(); }
std::optional<int64_t> heap_simulator_trace_id() const {
return heap_simulator_trace_id_;
}
void set_heap_simulator_trace_id(int64_t id) {
heap_simulator_trace_id_ = id;
}
std::string category() const {
if (buffer_allocation_.is_entry_computation_parameter()) {
return "Parameter";
} else if (buffer_allocation_.maybe_live_out()) {
return "Output";
} else if (buffer_allocation_.is_thread_local()) {
return "Thread-local";
} else if (buffer_allocation_.is_constant()) {
return "Constant";
} else {
return "Temporary";
}
}
std::string description() const {
return absl::StrFormat(
"buffer_allocation_id:%d\nsize:%d\nbuffer_counts:%d\n",
buffer_allocation_.index(), size(), buffer_allocation_.assigned_size());
}
private:
const BufferAllocationProto& buffer_allocation_;
std::optional<int64_t> heap_simulator_trace_id_;
};
struct LogicalBufferStruct {
LogicalBufferStruct(const LogicalBufferProto& p,
const BufferAllocationStruct& b,
const ::xla::HloInstructionProto& i, uint64_t offset)
: proto(p),
buffer_allocation(b),
hlo_instruction(i),
offset(offset),
shape(ResolveShapeIndex(hlo_instruction.shape(),
proto.defined_at().shape_index())) {}
absl::string_view instruction_name() const { return hlo_instruction.name(); }
int64_t color() const { return proto.color(); }
size_t size() const { return proto.size(); }
size_t unpadded_size() const { return ShapeUnpaddedSize(shape); }
int64_t inc() {
if (canonical_buffer) return canonical_buffer->inc();
return ++ref_count;
}
int64_t dec() {
if (canonical_buffer) return canonical_buffer->dec();
return --ref_count;
}
int64_t share_with(LogicalBufferStruct* buffer) {
canonical_buffer = buffer;
return canonical_buffer->inc();
}
LogicalBufferStruct* get_canonical_buffer() {
return canonical_buffer ? canonical_buffer->get_canonical_buffer() : this;
}
std::string GetInstructionNameWithShapeIndex() const {
if (proto.defined_at().shape_index().empty()) {
return std::string(instruction_name());
} else {
return absl::StrCat(instruction_name(), "{",
absl::StrJoin(proto.defined_at().shape_index(), ","),
"}");
}
}
std::string description() const {
return absl::StrFormat(
"buffer_id:%d\nhlo_op:%s\nshape:%s\nsize:%d\nunpadded_size:%d\n"
"offset:%d\nspan:(%lld,%lld)",
proto.id(), instruction_name(), ShapeDescription(shape), size(),
unpadded_size(), offset, span ? span->first : -1,
span ? span->second : -1);
}
const LogicalBufferProto& proto;
const BufferAllocationStruct& buffer_allocation;
const ::xla::HloInstructionProto& hlo_instruction;
uint64_t offset;
std::optional<std::pair<uint64_t, uint64_t>> span;
xla::Shape shape;
int64_t ref_count = 0;
LogicalBufferStruct* canonical_buffer = nullptr;
};
class HloProtoBufferWrapper {
public:
explicit HloProtoBufferWrapper(const ::xla::HloProto& hlo_proto)
: hlo_proto_(hlo_proto) {
Init();
}
int64_t GetHeapSimulatorTraceId(const int64_t memory_color) const {
int64_t id = GetHeapSimulatorTraceIdFromBufferAllocationIndex(memory_color);
if (id != -1) {
return id;
}
return GetHeapSimulatorTraceIdFromEvents(memory_color);
}
const ::xla::HloProto& GetHloProto() const { return hlo_proto_; }
std::vector<const BufferAllocationStruct*> GetBufferAllocations(
int64_t memory_color) const {
std::vector<const BufferAllocationStruct*> buffer_allocations;
for (const auto& iter : id_to_buffer_allocation_) {
if (iter.second->proto().color() != memory_color) continue;
buffer_allocations.push_back(iter.second.get());
}
return buffer_allocations;
}
LogicalBufferStruct* GetLogicalBuffer(int64_t logical_buffer_id) const {
if (!id_to_logical_buffer_.contains(logical_buffer_id)) {
LOG(DFATAL) << "logical_buffer_id " << logical_buffer_id << "not found.";
return nullptr;
}
return id_to_logical_buffer_.at(logical_buffer_id).get();
}
std::vector<const LogicalBufferStruct*> LogicalBuffersWithIndefiniteLifetime(
int64_t memory_color) const {
std::vector<const LogicalBufferStruct*> indefinite_logical_buffers;
for (const auto& buffer_assignment : GetBufferAllocations(memory_color)) {
if (!buffer_assignment->IsIndefinite()) continue;
if (buffer_assignment->proto().is_thread_local()) continue;
const LogicalBufferStruct* best_logical_buffer = nullptr;
size_t best_size = 0;
for (const auto& assigned : buffer_assignment->proto().assigned()) {
const LogicalBufferStruct* logical_buffer_struct =
GetLogicalBuffer(assigned.logical_buffer_id());
if (logical_buffer_struct == nullptr) continue;
if (logical_buffer_struct->size() > best_size) {
best_size = logical_buffer_struct->size();
best_logical_buffer = logical_buffer_struct;
}
}
if (best_logical_buffer) {
indefinite_logical_buffers.push_back(best_logical_buffer);
}
}
return indefinite_logical_buffers;
}
private:
void Init() {
absl::flat_hash_map<absl::string_view, const ::xla::HloInstructionProto*>
name_to_hlo;
absl::flat_hash_map<uint64_t, const ::xla::HloInstructionProto*>
unique_id_to_hlo;
for (const auto& computation : hlo_proto_.hlo_module().computations()) {
for (const auto& instruction : computation.instructions()) {
name_to_hlo[instruction.name()] = &instruction;
unique_id_to_hlo[instruction.id()] = &instruction;
}
}
absl::flat_hash_map<int64_t, const LogicalBufferProto*>
id_to_logical_buffer_proto;
for (const auto& logical_buffer :
hlo_proto_.buffer_assignment().logical_buffers()) {
id_to_logical_buffer_proto[logical_buffer.id()] = &logical_buffer;
}
for (const auto& buffer_allocation :
hlo_proto_.buffer_assignment().buffer_allocations()) {
auto& buffer_allocation_s =
id_to_buffer_allocation_[buffer_allocation.index()];
buffer_allocation_s =
std::make_unique<BufferAllocationStruct>(buffer_allocation);
for (const auto& assigned : buffer_allocation.assigned()) {
const auto id = assigned.logical_buffer_id();
if (!id_to_logical_buffer_proto.contains(id)) {
LOG(DFATAL) << "logical_buffer_id " << id << " not found.";
continue;
}
const auto* logical_buffer = id_to_logical_buffer_proto.at(id);
int64_t inst_id = logical_buffer->defined_at().instruction_id();
if (!unique_id_to_hlo.contains(inst_id)) {
LOG(DFATAL) << "instruction_id " << inst_id << " not found.";
continue;
}
const auto* instruction = unique_id_to_hlo.at(inst_id);
id_to_logical_buffer_[id] = std::make_unique<LogicalBufferStruct>(
*logical_buffer, *buffer_allocation_s, *instruction,
assigned.offset());
}
}
const auto& heap_simulator_traces =
hlo_proto_.buffer_assignment().heap_simulator_traces();
for (int64_t i = 0; i < heap_simulator_traces.size(); i++) {
if (heap_simulator_traces[i].events().empty()) continue;
int logical_buffer_id = heap_simulator_traces[i].events(0).buffer_id();
if (!id_to_logical_buffer_.contains(logical_buffer_id)) continue;
auto* logical_buffer = id_to_logical_buffer_[logical_buffer_id].get();
auto buffer_allocation_index = logical_buffer->buffer_allocation.index();
id_to_buffer_allocation_[buffer_allocation_index]
->set_heap_simulator_trace_id(i);
}
}
int64_t GetHeapSimulatorTraceIdFromEvents(const int64_t memory_color) const {
int64_t best_index = -1;
int64_t best_event_count = 0;
for (int64_t i = 0;
i < hlo_proto_.buffer_assignment().heap_simulator_traces_size(); i++) {
const auto& heap_simulator_trace =
hlo_proto_.buffer_assignment().heap_simulator_traces(i);
int64_t event_count = 0;
for (const auto& event : heap_simulator_trace.events()) {
if (!id_to_logical_buffer_.contains(event.buffer_id())) {
LOG(DFATAL) << "buffer_id " << event.buffer_id() << "not found.";
continue;
}
const auto& logical_buffer =
id_to_logical_buffer_.at(event.buffer_id());
if (logical_buffer->color() == memory_color) {
event_count++;
}
}
if (event_count > best_event_count) {
best_index = i;
best_event_count = event_count;
}
}
return best_index;
}
int64_t GetHeapSimulatorTraceIdFromBufferAllocationIndex(
const int64_t memory_color) const {
auto buffer_allocations = GetBufferAllocations(memory_color);
for (const auto* buffer_allocation : buffer_allocations) {
if (buffer_allocation->IsIndefinite()) continue;
if (buffer_allocation->heap_simulator_trace_id()) {
return *buffer_allocation->heap_simulator_trace_id();
}
}
return -1;
}
const ::xla::HloProto& hlo_proto_;
absl::flat_hash_map<int64_t, std::unique_ptr<LogicalBufferStruct>>
id_to_logical_buffer_;
absl::flat_hash_map<int64_t, std::unique_ptr<BufferAllocationStruct>>
id_to_buffer_allocation_;
};
double BytesToMiB(int64_t bytes) {
return static_cast<double>(bytes) / (1ULL << 20);
}
HeapObject MakeHeapObjectCommon(std::string label, int32_t color,
int64_t logical_buffer_id,
int64_t logical_buffer_size_bytes,
int64_t unpadded_shape_bytes) {
HeapObject result;
result.set_numbered(color);
result.set_label(std::move(label));
result.set_logical_buffer_id(logical_buffer_id);
result.set_logical_buffer_size_mib(BytesToMiB(logical_buffer_size_bytes));
result.set_unpadded_shape_mib(BytesToMiB(unpadded_shape_bytes));
return result;
}
HeapObject MakeHeapObject(const LogicalBufferStruct& logical_buffer,
int32_t color) {
const HloInstructionProto& hlo_instruction = logical_buffer.hlo_instruction;
std::string shape_string = ShapeDescription(logical_buffer.shape);
std::string label =
absl::StrFormat("%s: %s # %s", logical_buffer.instruction_name(),
shape_string, hlo_instruction.metadata().op_name());
HeapObject result = MakeHeapObjectCommon(
std::move(label), color, logical_buffer.proto.id(), logical_buffer.size(),
logical_buffer.unpadded_size());
result.set_instruction_name(
logical_buffer.GetInstructionNameWithShapeIndex());
result.set_group_name(logical_buffer.buffer_allocation.category());
result.set_tf_op_name(hlo_instruction.metadata().op_name());
result.set_shape_string(shape_string);
result.set_op_code(hlo_instruction.opcode());
return result;
}
BufferSpan MakeBufferSpan(int32 start, int32 limit) {
BufferSpan result;
result.set_start(start);
result.set_limit(limit);
return result;
}
void Convert(const xla::BufferAllocationProto_Assigned& assigned,
const HloProtoBufferWrapper& wrapper, LogicalBuffer* result) {
result->set_id(assigned.logical_buffer_id()),
result->set_size_mib(BytesToMiB(assigned.size()));
const LogicalBufferStruct* logical_buffer =
wrapper.GetLogicalBuffer(assigned.logical_buffer_id());
if (logical_buffer == nullptr) return;
result->set_hlo_name(std::string(logical_buffer->instruction_name()));
result->mutable_shape_index()->CopyFrom(
logical_buffer->proto.defined_at().shape_index());
result->set_shape(ShapeDescription(logical_buffer->shape));
}
bool IsReusable(const BufferAllocationProto& buffer_allocation) {
return !buffer_allocation.is_thread_local() && !buffer_allocation.is_tuple();
}
void Convert(const BufferAllocationProto& proto,
const HloProtoBufferWrapper& wrapper, BufferAllocation* result) {
result->set_id(proto.index());
result->set_size_mib(BytesToMiB(proto.size()));
if (proto.is_entry_computation_parameter()) {
result->add_attributes("entry computation parameter");
}
if (proto.maybe_live_out()) {
result->add_attributes("may-be live out");
}
if (IsReusable(proto)) {
result->add_attributes("reusable");
}
for (const auto& assigned : proto.assigned()) {
Convert(assigned, wrapper, result->add_logical_buffers());
}
if (!result->logical_buffers().empty()) {
std::string common_shape = result->logical_buffers(0).shape();
for (int64_t i = 1; i < result->logical_buffers_size(); ++i) {
if (result->logical_buffers(i).shape() != common_shape) {
common_shape = "";
break;
}
}
if (!common_shape.empty()) {
result->set_common_shape(common_shape);
}
}
}
void NoteSpecialAllocations(const HloProtoBufferWrapper& wrapper,
int64_t memory_color, int64_t small_buffer_size,
PreprocessResult* result) {
int64_t entry_parameters_bytes = 0;
int64_t non_reusable_bytes = 0;
int64_t maybe_live_out_bytes = 0;
int64_t total_buffer_allocation_bytes = 0;
int64_t indefinite_buffer_allocation_bytes = 0;
for (const auto* buffer_allocation_struct :
wrapper.GetBufferAllocations(memory_color)) {
const auto& buffer_allocation = buffer_allocation_struct->proto();
if (buffer_allocation.is_entry_computation_parameter()) {
entry_parameters_bytes += buffer_allocation.size();
}
if (!IsReusable(buffer_allocation)) {
non_reusable_bytes += buffer_allocation.size();
}
if (buffer_allocation.maybe_live_out()) {
if (buffer_allocation.size() > small_buffer_size) {
VLOG(1) << "Maybe live out buffer allocation: "
<< buffer_allocation.size()
<< " bytes :: " << buffer_allocation.ShortDebugString();
}
maybe_live_out_bytes += buffer_allocation.size();
}
if (buffer_allocation_struct->IsIndefinite()) {
indefinite_buffer_allocation_bytes += buffer_allocation.size();
Convert(buffer_allocation, wrapper, result->add_indefinite_lifetimes());
}
total_buffer_allocation_bytes += buffer_allocation.size();
}
result->set_entry_computation_parameters_mib(
BytesToMiB(entry_parameters_bytes));
result->set_non_reusable_mib(BytesToMiB(non_reusable_bytes));
result->set_maybe_live_out_mib(BytesToMiB(maybe_live_out_bytes));
result->set_total_buffer_allocation_mib(
BytesToMiB(total_buffer_allocation_bytes));
result->set_indefinite_buffer_allocation_mib(
BytesToMiB(indefinite_buffer_allocation_bytes));
}
struct HeapSimulatorStats {
explicit HeapSimulatorStats(const HloProtoBufferWrapper& wrapper)
: wrapper(wrapper) {}
void SetSimulatorTraceEventSize(int64_t size) {
simulator_trace_event_size = size;
}
void UpdateOnSimulatorEvent(const HeapSimulatorTrace::Event& event) {
heap_size_bytes_timeline.push_back(heap_size_bytes);
unpadded_heap_size_bytes_timeline.push_back(unpadded_heap_size_bytes);
const LogicalBufferStruct* logical_buffer =
wrapper.GetLogicalBuffer(event.buffer_id());
if (logical_buffer == nullptr) return;
seen_logical_buffers.insert(logical_buffer);
seen_buffer_allocations.insert(&logical_buffer->buffer_allocation.proto());
}
void IncreaseMemoryUsage(LogicalBufferStruct* canonical_logical_buffer,
bool init_buffer_span) {
logical_buffers.push_back(canonical_logical_buffer->proto.id());
heap_size_bytes += canonical_logical_buffer->size();
unpadded_heap_size_bytes += canonical_logical_buffer->unpadded_size();
int64_t prior_peak_heap_size_bytes = peak_heap_size_bytes;
peak_heap_size_bytes = std::max(peak_heap_size_bytes, heap_size_bytes);
if (prior_peak_heap_size_bytes != peak_heap_size_bytes) {
peak_heap_size_position = heap_size_bytes_timeline.size() - 1;
peak_unpadded_heap_size_bytes = unpadded_heap_size_bytes;
VLOG(1) << absl::StrFormat("New peak heap size on %d :: %d bytes",
peak_heap_size_position, peak_heap_size_bytes);
peak_logical_buffers = logical_buffers;
}
if (init_buffer_span) {
canonical_logical_buffer->span.emplace(
heap_size_bytes_timeline.size() - 1, simulator_trace_event_size - 1);
}
}
Status DecreaseMemoryUsage(LogicalBufferStruct* canonical_logical_buffer) {
int64_t canonical_buffer_id = canonical_logical_buffer->proto.id();
logical_buffers.remove(canonical_buffer_id);
heap_size_bytes -= canonical_logical_buffer->size();
if (heap_size_bytes < 0) {
return errors::InvalidArgument(absl::StrCat(
"Heap size should be non-negative, but get: ", heap_size_bytes));
}
unpadded_heap_size_bytes -= canonical_logical_buffer->unpadded_size();
if (canonical_logical_buffer->span) {
canonical_logical_buffer->span->second =
heap_size_bytes_timeline.size() - 1;
}
return absl::OkStatus();
}
Status FinalizeMemoryUsage() {
heap_size_bytes_timeline.push_back(heap_size_bytes);
unpadded_heap_size_bytes_timeline.push_back(unpadded_heap_size_bytes);
if (seen_buffer_allocations.size() != 1) {
return errors::InvalidArgument(
absl::StrCat("All heap simulation should work out of a single buffer "
"allocation, actual seen_buffer_allocations.size():",
seen_buffer_allocations.size()));
}
VLOG(1) << "Found " << peak_logical_buffers.size()
<< " logical buffers alive at point of peak heap usage.";
VLOG(1) << "Peak logical buffers: ["
<< absl::StrJoin(peak_logical_buffers, ", ") << "]";
return absl::OkStatus();
}
int64_t heap_size_bytes = 0;
int64_t unpadded_heap_size_bytes = 0;
int64_t peak_heap_size_bytes = 0;
int64_t peak_unpadded_heap_size_bytes = 0;
std::list<int64_t> logical_buffers;
std::list<int64_t> peak_logical_buffers;
std::vector<int64_t> heap_size_bytes_timeline;
std::vector<int64_t> unpadded_heap_size_bytes_timeline;
int64_t peak_heap_size_position = 0;
absl::flat_hash_set<const LogicalBufferStruct*> seen_logical_buffers;
absl::flat_hash_set<const BufferAllocationProto*> seen_buffer_allocations;
const HloProtoBufferWrapper& wrapper;
int64_t simulator_trace_event_size;
};
Status ProcessHeapSimulatorTrace(const HloProtoBufferWrapper& wrapper,
const int64_t memory_color,
HeapSimulatorStats* stats) {
int64_t heap_simulator_trace_id =
wrapper.GetHeapSimulatorTraceId(memory_color);
if (heap_simulator_trace_id < 0 ||
heap_simulator_trace_id >= wrapper.GetHloProto()
.buffer_assignment()
.heap_simulator_traces_size()) {
return absl::OkStatus();
}
const auto& trace =
wrapper.GetHloProto().buffer_assignment().heap_simulator_traces(
heap_simulator_trace_id);
stats->SetSimulatorTraceEventSize(trace.events_size());
for (const auto& event : trace.events()) {
stats->UpdateOnSimulatorEvent(event);
LogicalBufferStruct* logical_buffer =
wrapper.GetLogicalBuffer(event.buffer_id());
if (logical_buffer == nullptr) {
continue;
}
if (event.kind() == HeapSimulatorTrace::Event::ALLOC) {
logical_buffer->inc();
stats->IncreaseMemoryUsage(logical_buffer,
true);
} else if (event.kind() == HeapSimulatorTrace::Event::FREE) {
auto ref_count = logical_buffer->dec();
if (ref_count < 0) {
return errors::InvalidArgument(absl::StrCat(
"Buffer ", logical_buffer->proto.id(), "is freed multiple times."));
}
if (ref_count == 0) {
auto& canonical_buffer = *logical_buffer->get_canonical_buffer();
TF_RETURN_IF_ERROR(stats->DecreaseMemoryUsage(&canonical_buffer));
}
} else if (event.kind() == HeapSimulatorTrace::Event::SHARE_WITH) {
int64_t canonical_buffer_id = event.share_with_canonical_id();
LogicalBufferStruct* canonical_buffer =
wrapper.GetLogicalBuffer(canonical_buffer_id);
if (canonical_buffer == nullptr) {
continue;
}
auto ref_count = logical_buffer->share_with(canonical_buffer);
if (ref_count == 1) {
stats->IncreaseMemoryUsage(canonical_buffer,
false);
}
} else {
return errors::InvalidArgument(
absl::StrCat("Unhandled event kind: ", event.kind()));
}
}
TF_RETURN_IF_ERROR(stats->FinalizeMemoryUsage());
return absl::OkStatus();
}
struct PeakUsageSnapshot {
PeakUsageSnapshot(const HloProtoBufferWrapper& wrapper,
const HeapSimulatorStats& simulator_stats,
int64_t small_buffer_size)
: wrapper(wrapper),
simulator_stats(simulator_stats),
small_buffer_size(small_buffer_size) {}
void AddHeapObject(const LogicalBufferStruct& logical_buffer) {
if (logical_buffer.size() < small_buffer_size) {
total_small_buffer_size_bytes += logical_buffer.size();
} else {
max_heap_objects.push_back(MakeHeapObject(logical_buffer, colorno++));
}
}
void FinalizeBufferUsage() {
for (const int64_t logical_buffer_id :
simulator_stats.peak_logical_buffers) {
const LogicalBufferStruct* logical_buffer =
wrapper.GetLogicalBuffer(logical_buffer_id);
if (logical_buffer == nullptr) return;
AddHeapObject(*logical_buffer);
}
if (total_small_buffer_size_bytes != 0) {
max_heap_objects.push_back(MakeHeapObjectCommon(
absl::StrFormat("small (<%d bytes)", small_buffer_size), colorno++,
-1, total_small_buffer_size_bytes,
0));
}
}
std::vector<HeapObject> max_heap_objects;
int64_t indefinite_memory_usage_bytes = 0;
int64_t total_small_buffer_size_bytes = 0;
int32_t colorno = 0;
const HloProtoBufferWrapper& wrapper;
const HeapSimulatorStats& simulator_stats;
const int64_t small_buffer_size;
};
void CreatePeakUsageSnapshot(const HloProtoBufferWrapper& wrapper,
int64_t memory_color,
PeakUsageSnapshot* peak_snapshot) {
for (const auto* logical_buffer :
wrapper.LogicalBuffersWithIndefiniteLifetime(memory_color)) {
const auto& buffer_allocation = logical_buffer->buffer_allocation;
peak_snapshot->indefi | #include "tensorflow/core/profiler/convert/hlo_proto_to_memory_visualization_utils.h"
#include <string>
#include <gmock/gmock.h>
#include "absl/strings/str_format.h"
#include "xla/service/hlo.pb.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/protobuf/memory_viewer_preprocess.pb.h"
#include "tensorflow/core/util/proto/proto_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
static constexpr char kHLOBase[] = R"pb(
hlo_module {
name: "test_module"
entry_computation_name: "test_computation"
computations {
name: "test_computation"
instructions {
name: "fusion.1"
id: 0
shape { tuple_shapes { element_type: U64 } }
}
instructions {
name: "fusion.2"
id: 1
shape { tuple_shapes { element_type: U64 } }
}
}
}
buffer_assignment {
buffer_allocations {
index: 0
size: 1048576
color: 0
assigned { logical_buffer_id: 1 offset: 0 size: 524288 }
assigned { logical_buffer_id: 2 offset: 524288 size: 524288 }
}
logical_buffers {
id: 1
size: 524288
color: 0
defined_at { instruction_id: 0 shape_index: 0 }
}
logical_buffers {
id: 2
size: 524288
color: 0
defined_at { instruction_id: 1 shape_index: 0 }
}
heap_simulator_traces { %s }
}
)pb";
TEST(MemoryViewerTest, TestHeapSimulatorTraceShareWith_1) {
static constexpr char kHeapSimulatorTrace[] = R"pb(
events { kind: ALLOC buffer_id: 1 }
events { kind: SHARE_WITH buffer_id: 2 share_with_canonical_id: 1 }
events { kind: FREE buffer_id: 1 }
events { kind: FREE buffer_id: 2 }
)pb";
std::string hlo_string = absl::StrFormat(kHLOBase, kHeapSimulatorTrace);
xla::HloProto hlo_proto;
ASSERT_TRUE(
proto_utils::ParseTextFormatFromString(hlo_string, &hlo_proto).ok());
TF_ASSERT_OK_AND_ASSIGN(
PreprocessResult preprocess_result,
ConvertHloProtoToPreprocessResult(hlo_proto, 0));
EXPECT_EQ(preprocess_result.peak_heap_mib(), 0.5);
}
TEST(MemoryViewerTest, TestHeapSimulatorTraceShareWith_2) {
static constexpr char kHeapSimulatorTrace[] = R"pb(
events { kind: ALLOC buffer_id: 1 }
events { kind: FREE buffer_id: 1 }
events { kind: SHARE_WITH buffer_id: 2 share_with_canonical_id: 1 }
events { kind: FREE buffer_id: 2 }
)pb";
std::string hlo_string = absl::StrFormat(kHLOBase, kHeapSimulatorTrace);
xla::HloProto hlo_proto;
ASSERT_TRUE(
proto_utils::ParseTextFormatFromString(hlo_string, &hlo_proto).ok());
TF_ASSERT_OK_AND_ASSIGN(
PreprocessResult preprocess_result,
ConvertHloProtoToPreprocessResult(hlo_proto, 0));
EXPECT_EQ(preprocess_result.peak_heap_mib(), 0.5);
EXPECT_FALSE(preprocess_result.allocation_timeline().empty());
}
}
}
} |
1,465 | cpp | tensorflow/tensorflow | xplane_to_tf_data_stats | tensorflow/core/profiler/convert/xplane_to_tf_data_stats.cc | tensorflow/core/profiler/convert/xplane_to_tf_data_stats_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_CONVERT_XPLANE_TO_TF_DATA_STATS_H_
#define TENSORFLOW_CORE_PROFILER_CONVERT_XPLANE_TO_TF_DATA_STATS_H_
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/tf_data_stats.pb.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tensorflow {
namespace profiler {
TF_CONST_INIT extern const int64_t kSlowCallThresholdPs;
enum class BottleneckType {
kSlowSource,
kSlowDataService,
kSlowRemoteSource,
kSlowTransformationWithParallelVersion,
kSlowTransformationWithoutParallelVersion,
kOther,
};
BottleneckType GetBottleneckType(absl::string_view bottleneck_iterator_name);
class CombinedTfDataStatsBuilder {
public:
explicit CombinedTfDataStatsBuilder(
CombinedTfDataStats* combined_tf_data_stats,
bool generate_suggestion = true)
: combined_tf_data_stats_(combined_tf_data_stats),
generate_suggestion_(generate_suggestion) {}
void Add(absl::string_view host_name, XPlane* host_plane);
void Finalize();
private:
CombinedTfDataStats* combined_tf_data_stats_;
bool generate_suggestion_;
};
}
}
#endif
#include "tensorflow/core/profiler/convert/xplane_to_tf_data_stats.h"
#include <algorithm>
#include <optional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/profiler/protobuf/tf_data_stats.pb.h"
#include "tensorflow/core/profiler/utils/html_utils.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
#include "tsl/profiler/utils/group_events.h"
#include "tsl/profiler/utils/tf_op_utils.h"
#include "tsl/profiler/utils/tf_xplane_visitor.h"
#include "tsl/profiler/utils/timespan.h"
namespace tensorflow {
namespace profiler {
const int64_t kSlowCallThresholdPs = 50 * 1000000;
namespace {
bool IsRootIteratorEvent(const XEventVisitor& iterator_event) {
std::vector<absl::string_view> split_result =
absl::StrSplit(iterator_event.Name(), "::");
return split_result.size() == 2;
}
bool IsAsyncIterator(absl::string_view iterator_event_name) {
static auto* kAsyncIterators = new absl::flat_hash_set<absl::string_view>(
{"Prefetch", "ParallelInterleave", "ParallelMap", "ParseExample",
"MapAndBatch", "DataService", "LegacyParallelInterleave",
"ParallelBatch"});
return kAsyncIterators->contains(iterator_event_name);
}
void SetIteratorMetadata(int64_t id, const XEventVisitor& event,
IteratorMetadata* metadata) {
metadata->set_id(id);
auto parent_id_stat = event.GetStat(StatType::kParentId);
if (parent_id_stat.has_value()) {
metadata->set_parent_id(parent_id_stat->IntValue());
}
metadata->set_name(tsl::profiler::IteratorName(event.Name()));
metadata->set_long_name(event.Name().data(), event.Name().size());
metadata->set_is_async(IsAsyncIterator(metadata->name()));
}
std::optional<int64_t> FindDeviceInputPipeline(const XEventVisitor& event) {
if (event.Type() == HostEventType::kDeviceInputPipelineSecondIterator) {
auto parent_id_stat = event.GetStat(StatType::kParentId);
if (parent_id_stat.has_value()) return parent_id_stat->IntValue();
}
return std::nullopt;
}
void ProcessEventForest(
const tsl::profiler::EventForest& event_forest,
absl::flat_hash_set<int64_t>* device_input_pipeline_ids,
absl::flat_hash_map<int64_t, std::vector<const tsl::profiler::EventNode*>>*
root_iterator_event_map,
TfDataStats* tf_data_stats) {
const tsl::profiler::EventNodeMap& event_node_map =
event_forest.GetEventNodeMap();
auto* iterator_event_list =
gtl::FindOrNull(event_node_map, HostEventType::kIterator);
if (!iterator_event_list) return;
for (const tsl::profiler::EventNode& iterator_event : *iterator_event_list) {
const XEventVisitor& iterator_event_visitor =
iterator_event.GetEventVisitor();
auto iterator_id_stat = iterator_event_visitor.GetStat(StatType::kStepId);
if (!iterator_id_stat.has_value()) continue;
int64_t iterator_id = iterator_id_stat->IntValue();
auto result = tf_data_stats->mutable_iterator_metadata()->insert(
{iterator_id, IteratorMetadata()});
IteratorMetadata& metadata = result.first->second;
if (result.second) {
SetIteratorMetadata(iterator_id, iterator_event_visitor, &metadata);
}
if (IsRootIteratorEvent(iterator_event_visitor)) {
(*root_iterator_event_map)[iterator_id].push_back(&iterator_event);
}
}
auto* device_input_pipeline_second_iterator_events = gtl::FindOrNull(
event_node_map, HostEventType::kDeviceInputPipelineSecondIterator);
if (!device_input_pipeline_second_iterator_events) return;
for (const tsl::profiler::EventNode& iterator_event :
*device_input_pipeline_second_iterator_events) {
const XEventVisitor& iterator_event_visitor =
iterator_event.GetEventVisitor();
auto iterator_id_stat = iterator_event_visitor.GetStat(StatType::kStepId);
if (!iterator_id_stat.has_value()) continue;
int64_t iterator_id = iterator_id_stat->IntValue();
auto result = tf_data_stats->mutable_iterator_metadata()->insert(
{iterator_id, IteratorMetadata()});
IteratorMetadata& metadata = result.first->second;
if (result.second) {
SetIteratorMetadata(iterator_id, iterator_event_visitor, &metadata);
std::optional<int64_t> device_input_pipeline_id =
FindDeviceInputPipeline(iterator_event_visitor);
if (device_input_pipeline_id.has_value()) {
device_input_pipeline_ids->insert(*device_input_pipeline_id);
}
}
}
}
void SetInputPipelineMetadata(int64_t id, int64_t name_id,
bool is_device_input_pipeline,
InputPipelineMetadata* metadata) {
constexpr absl::string_view kHostInputPipelinePrefix = "Host:";
constexpr absl::string_view kDeviceInputPipelinePrefix = "Device:";
metadata->set_id(id);
if (is_device_input_pipeline) {
metadata->set_type(InputPipelineMetadata::DEVICE);
metadata->set_name(absl::StrCat(kDeviceInputPipelinePrefix, name_id));
} else {
metadata->set_type(InputPipelineMetadata::HOST);
metadata->set_name(absl::StrCat(kHostInputPipelinePrefix, name_id));
}
}
void ProcessIteratorEvent(const tsl::profiler::EventNode& iterator_event,
InputPipelineStat* input_pipeline_stat,
bool is_blocking, int level = 0) {
if (level > 100) return;
const XEventVisitor& visitor = iterator_event.GetEventVisitor();
auto iterator_id_stat = visitor.GetStat(StatType::kStepId);
if (!iterator_id_stat.has_value()) return;
int64_t iterator_id = iterator_id_stat->IntValue();
auto result = input_pipeline_stat->mutable_iterator_stats()->insert(
{iterator_id, IteratorStat()});
IteratorStat& iterator_stat = result.first->second;
if (result.second) {
iterator_stat.set_id(iterator_id);
iterator_stat.set_start_time_ps(visitor.TimestampPs());
}
iterator_stat.set_duration_ps(iterator_stat.duration_ps() +
visitor.DurationPs());
int64_t self_time_ps = visitor.DurationPs();
tsl::profiler::Timespan self_time_span = visitor.GetTimespan();
for (const tsl::profiler::EventNode* child : iterator_event.GetChildren()) {
const XEventVisitor& child_visitor = child->GetEventVisitor();
if (tsl::profiler::ParseTfOpFullname(child_visitor.Name()).category ==
tsl::profiler::Category::kTfData) {
int64_t overlap_duration_ps =
self_time_span.OverlappedDurationPs(child_visitor.GetTimespan());
ProcessIteratorEvent(*child, input_pipeline_stat,
is_blocking && overlap_duration_ps, level + 1);
self_time_ps -= overlap_duration_ps;
}
}
iterator_stat.set_self_time_ps(iterator_stat.self_time_ps() + self_time_ps);
iterator_stat.set_is_blocking(iterator_stat.is_blocking() || is_blocking);
iterator_stat.set_num_calls(iterator_stat.num_calls() + 1);
}
void SetBottleneckIteratorId(InputPipelineStat* input_pipeline_stat) {
int64_t bottleneck_iterator_id = 0;
int64_t max_self_time = 0;
for (const auto& pair : input_pipeline_stat->iterator_stats()) {
const auto& id = pair.first;
const auto& iterator_stat = pair.second;
if (iterator_stat.is_blocking() &&
iterator_stat.self_time_ps() > max_self_time) {
bottleneck_iterator_id = id;
max_self_time = iterator_stat.self_time_ps();
}
}
input_pipeline_stat->set_bottleneck_iterator_id(bottleneck_iterator_id);
input_pipeline_stat->set_bottleneck_iterator_latency_ps(max_self_time);
}
void ProcessInputPipelines(
const absl::flat_hash_set<int64_t>& device_input_pipeline_ids,
absl::flat_hash_map<int64_t, std::vector<const tsl::profiler::EventNode*>>*
root_iterator_event_map,
TfDataStats* tf_data_stats) {
auto* input_pipelines = tf_data_stats->mutable_input_pipelines();
int64_t num_host_input_pipelines = 0;
int64_t num_device_input_pipelines = 0;
for (auto& id_and_events : *root_iterator_event_map) {
auto& root_iterator_id = id_and_events.first;
auto& root_iterator_events = id_and_events.second;
absl::c_sort(root_iterator_events, [](const tsl::profiler::EventNode* lhs,
const tsl::profiler::EventNode* rhs) {
return lhs->GetEventVisitor().DurationPs() >
rhs->GetEventVisitor().DurationPs();
});
auto result =
input_pipelines->insert({root_iterator_id, InputPipelineStats()});
InputPipelineStats& input_pipeline_stats = result.first->second;
InputPipelineMetadata* metadata = input_pipeline_stats.mutable_metadata();
if (result.second) {
bool is_device_input_pipeline =
device_input_pipeline_ids.contains(root_iterator_id);
int64_t name_id = is_device_input_pipeline ? num_device_input_pipelines++
: num_host_input_pipelines++;
SetInputPipelineMetadata(root_iterator_id, name_id,
is_device_input_pipeline, metadata);
}
int64_t sum_latency_ps = 0;
int64_t min_latency_ps = INT64_MAX;
int64_t max_latency_ps = 0;
int64_t num_slow_calls = 0;
for (const tsl::profiler::EventNode* root_iterator_event :
root_iterator_events) {
InputPipelineStat* stat = input_pipeline_stats.add_stats();
ProcessIteratorEvent(*root_iterator_event, stat,
true);
SetBottleneckIteratorId(stat);
int64_t latency_ps = root_iterator_event->GetEventVisitor().DurationPs();
sum_latency_ps += latency_ps;
min_latency_ps = std::min(min_latency_ps, latency_ps);
max_latency_ps = std::max(max_latency_ps, latency_ps);
if (latency_ps > kSlowCallThresholdPs) num_slow_calls++;
}
input_pipeline_stats.set_avg_latency_ps(sum_latency_ps /
root_iterator_events.size());
input_pipeline_stats.set_min_latency_ps(min_latency_ps);
input_pipeline_stats.set_max_latency_ps(max_latency_ps);
input_pipeline_stats.set_num_slow_calls(num_slow_calls);
}
}
void SetBottleneckAnalysis(CombinedTfDataStats* combined_tf_data_stats) {
struct InputPipeline {
InputPipeline(absl::string_view host_name,
absl::string_view input_pipeline_name, int64_t max_latency_ps,
absl::string_view iterator_name,
absl::string_view iterator_long_name,
int64_t iterator_latency_ps)
: host_name(host_name),
input_pipeline_name(input_pipeline_name),
max_latency_ps(max_latency_ps),
iterator_name(iterator_name),
iterator_long_name(iterator_long_name),
iterator_latency_ps(iterator_latency_ps) {}
absl::string_view host_name;
absl::string_view input_pipeline_name;
int64_t max_latency_ps;
absl::string_view iterator_name;
absl::string_view iterator_long_name;
int64_t iterator_latency_ps;
bool operator<(const InputPipeline& rhs) const {
return max_latency_ps > rhs.max_latency_ps;
}
};
std::vector<InputPipeline> slow_input_pipelines;
for (const auto& host_name_and_tf_data_stats :
combined_tf_data_stats->tf_data_stats()) {
absl::string_view host_name = host_name_and_tf_data_stats.first;
const TfDataStats& tf_data_stats = host_name_and_tf_data_stats.second;
for (const auto& id_and_stats : tf_data_stats.input_pipelines()) {
const InputPipelineStats& input_pipeline_stats = id_and_stats.second;
if (input_pipeline_stats.metadata().type() ==
InputPipelineMetadata::DEVICE) {
continue;
}
const InputPipelineStat& input_pipeline_stat =
input_pipeline_stats.stats(0);
const IteratorMetadata& metadata = tf_data_stats.iterator_metadata().at(
input_pipeline_stat.bottleneck_iterator_id());
slow_input_pipelines.emplace_back(
host_name, input_pipeline_stats.metadata().name(),
input_pipeline_stats.max_latency_ps(), metadata.name(),
metadata.long_name(),
input_pipeline_stat.bottleneck_iterator_latency_ps());
}
}
std::sort(slow_input_pipelines.begin(), slow_input_pipelines.end());
for (const auto& input_pipeline : slow_input_pipelines) {
TfDataBottleneckAnalysis* bottleneck_analysis =
combined_tf_data_stats->add_bottleneck_analysis();
bottleneck_analysis->set_host(input_pipeline.host_name.data(),
input_pipeline.host_name.size());
bottleneck_analysis->set_input_pipeline(
input_pipeline.input_pipeline_name.data(),
input_pipeline.input_pipeline_name.size());
bottleneck_analysis->set_max_latency_ps(input_pipeline.max_latency_ps);
bottleneck_analysis->set_iterator_name(input_pipeline.iterator_name.data(),
input_pipeline.iterator_name.size());
bottleneck_analysis->set_iterator_long_name(
input_pipeline.iterator_long_name.data(),
input_pipeline.iterator_long_name.size());
bottleneck_analysis->set_iterator_latency_ps(
input_pipeline.iterator_latency_ps);
}
}
std::string GetSuggestion(BottleneckType type) {
constexpr absl::string_view kPlaybookLink =
"https:
constexpr absl::string_view kPlaybookSourceDatasetLink =
"https:
"data_performance_analysis#source_datasets";
constexpr absl::string_view kPlaybookCpuUtilizationLink =
"https:
"data_performance_analysis#3_are_you_reaching_high_cpu_utilization";
constexpr absl::string_view kPlaybookTransformationLink =
"https:
"data_performance_analysis#transformation_datasets";
constexpr absl::string_view kTfGuideParallelDataExtractionLink =
"https:
"data_performance#parallelizing_data_extraction";
constexpr absl::string_view kTfGuideParallelTransformationLink =
"https:
"data_performance#parallelizing_data_transformation";
constexpr absl::string_view kTfGuideCacheLink =
"https:
constexpr absl::string_view kTfDataServiceLink =
"https:
"service?version=nightly";
switch (type) {
case BottleneckType::kSlowSource:
return absl::StrFormat(
"1. Check the locality of a host and input data. Ideally, they "
"should be in the same cell (or very close, like the same "
"region).<br/>"
"2. Parallelize reading from this dataset source. See %s and %s for "
"more details.<br/>",
AnchorElement(kPlaybookSourceDatasetLink, "here"),
AnchorElement(kTfGuideParallelDataExtractionLink, "here"));
case BottleneckType::kSlowDataService:
return absl::StrFormat(
"1. Fetching data from tf.data service took a while. Profile the "
"tf.data service worker to analyze the issue further.<br/>"
"2. See %s for more details on tf.data service.<br/>"
"3. See %s for other suggestions.",
AnchorElement(kTfDataServiceLink, "this"),
AnchorElement(kPlaybookLink, "this"));
case BottleneckType::kSlowRemoteSource:
return absl::StrFormat(
"1. The remote data source is slow. Profile its host to analyze the "
"issue further.<br/>"
"2. See %s for other suggestions.",
AnchorElement(kPlaybookLink, "this"));
case BottleneckType::kSlowTransformationWithParallelVersion:
return absl::StrFormat(
"1. Parallelize this transformation by setting "
"<code>num_parallel_calls=tf.data.experimental.AUTOTUNE</code>. See "
"%s for more details.<br/>"
"2. Consider adding <code>cache</code> after this transformation if "
"your data fits into memory and it is appropriate (e.g., there is no "
"randomness in upstream transformations like <code>shuffle</code>). "
"See %s for more details.<br/>"
"3. Find more resources %s.",
AnchorElement(kTfGuideParallelTransformationLink, "this"),
AnchorElement(kTfGuideCacheLink, "this"),
AnchorElement(kPlaybookTransformationLink, "here"));
case BottleneckType::kSlowTransformationWithoutParallelVersion:
return absl::StrFormat(
"1. This transformation is inherently sequential. Add outer "
"parallelism by running multiple copies of the input pipeline over "
"sharded inputs and combining the results. See %s for more "
"details.<br/>"
"2. Consider adding <code>cache</code> after this transformation if "
"your data fits into memory and it is appropriate (e.g., there is no "
"randomness in upstream transformations like <code>shuffle</code>). "
"See %s for more details.<br/>"
"3. Find more resources %s.",
AnchorElement(kPlaybookTransformationLink, "this"),
AnchorElement(kTfGuideCacheLink, "this"),
AnchorElement(kPlaybookCpuUtilizationLink, "here"));
default:
return absl::StrFormat("See %s for suggestions.",
AnchorElement(kPlaybookLink, "this"));
}
}
void SetSuggestion(CombinedTfDataStats* combined_tf_data_stats) {
for (TfDataBottleneckAnalysis& bottleneck_analysis :
*combined_tf_data_stats->mutable_bottleneck_analysis()) {
bottleneck_analysis.set_suggestion(
GetSuggestion(GetBottleneckType(bottleneck_analysis.iterator_name())));
}
}
void SetSummary(CombinedTfDataStats* combined_tf_data_stats) {
int64_t max_latency_ps = 0;
if (combined_tf_data_stats->bottleneck_analysis_size()) {
max_latency_ps =
combined_tf_data_stats->bottleneck_analysis().at(0).max_latency_ps();
}
if (max_latency_ps > kSlowCallThresholdPs) {
combined_tf_data_stats->set_is_input_bound(true);
combined_tf_data_stats->set_summary(
"Your profile has a tf.data input pipeline slower than 50 us. For each "
"slow input pipeline, below shows a bottleneck in the input pipeline "
"and a suggestion on how to fix it.");
} else if (max_latency_ps > 0) {
combined_tf_data_stats->set_is_input_bound(false);
combined_tf_data_stats->set_summary(
"Your profile does not have any tf.data input pipeline slower than 50 "
"us. Your job could be still input bound if this profile didn't "
"capture all workers.");
} else {
combined_tf_data_stats->set_is_input_bound(false);
combined_tf_data_stats->set_summary(
"No tf.data activity captured in your profile. If your job uses "
"tf.data, try to capture a longer profile.");
}
}
}
BottleneckType GetBottleneckType(absl::string_view bottleneck_iterator_name) {
static auto* kBottleneckTypeMap = new absl::flat_hash_map<absl::string_view,
BottleneckType>(
{
{"TFRecord", BottleneckType::kSlowSource},
{"SSTable", BottleneckType::kSlowSource},
{"RecordIO", BottleneckType::kSlowSource},
{"Spanner", BottleneckType::kSlowSource},
{"TFColumn", BottleneckType::kSlowSource},
{"SleepwalkRemoteDataset", BottleneckType::kSlowSource},
{"TextLine", BottleneckType::kSlowSource},
{"StitchedTimelineDataset", BottleneckType::kSlowSource},
{"DateKeyDataset", BottleneckType::kSlowSource},
{"CapacitorProto", BottleneckType::kSlowSource},
{"LMDB", BottleneckType::kSlowSource},
{"ExternalDataset", BottleneckType::kSlowSource},
{"PearModel", BottleneckType::kSlowSource},
{"FixedLengthRecordV2", BottleneckType::kSlowSource},
{"FromTensor", BottleneckType::kSlowSource},
{"TensorSlice", BottleneckType::kSlowSource},
{"Generator", BottleneckType::kSlowSource},
{"SyntheticDatasetOp", BottleneckType::kSlowSource},
{"DataService", BottleneckType::kSlowDataService},
{"GuzzlerDataGuzzlerRemoteDataset", BottleneckType::kSlowRemoteSource},
{"ReverbDataset", BottleneckType::kSlowRemoteSource},
{"DatasetSampleGame", BottleneckType::kSlowRemoteSource},
{"Courier", BottleneckType::kSlowRemoteSource},
{"ReverbEpisodeDataset", BottleneckType::kSlowRemoteSource},
{"Map", BottleneckType::kSlowTransformationWithParallelVersion},
{"Interleave", BottleneckType::kSlowTransformationWithParallelVersion},
{"Filter", BottleneckType::kSlowTransformationWithoutParallelVersion},
{"Batch", BottleneckType::kSlowTransformationWithoutParallelVersion},
{"Unbatch", BottleneckType::kSlowTransformationWithoutParallelVersion}});
if (auto type =
gtl::FindOrNull(*kBottleneckTypeMap, bottleneck_iterator_name)) {
return *type;
}
return BottleneckType::kOther;
}
void CombinedTfDataStatsBuilder::Add(absl::string_view host_name,
XPlane* host_plane) {
TfDataStats& tf_data_stats =
(*combined_tf_data_stats_
->mutable_tf_data_stats())[std::string(host_name)];
tsl::profiler::EventForest event_forest;
event_forest.AddPlanes(tsl::profiler::CreateTfXPlaneVisitor, {host_plane});
event_forest.ConnectEvents();
event_forest.ConnectTfDataEvents();
absl::flat_hash_set<int64_t> device_input_pipeline_ids;
absl::flat_hash_map<int64_t, std::vector<const tsl::profiler::EventNode*>>
root_iterator_event_map;
ProcessEventForest(event_forest, &device_input_pipeline_ids,
&root_iterator_event_map, &tf_data_stats);
ProcessInputPipelines(device_input_pipeline_ids, &root_iterator_event_map,
&tf_data_stats);
}
void CombinedTfDataStatsBuilder::Finalize() {
SetBottleneckAnalysis(combined_tf_data_stats_);
if (generate_suggestion_) SetSuggestion(combined_tf_data_stats_);
SetSummary(combined_tf_data_stats_);
}
}
} | #include "tensorflow/core/profiler/convert/xplane_to_tf_data_stats.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/protobuf/tf_data_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_test_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
using ::testing::EqualsProto;
TEST(XPlaneToTfDataStatsTest, HostInputPipeline) {
constexpr int64_t kPrefetchIteratorId = 123;
constexpr int64_t kRangeIteratorId = 456;
constexpr int64_t kFirstElementId = 100;
constexpr int64_t kSecondElementId = 200;
XPlane host_plane;
XPlaneBuilder host_plane_builder(&host_plane);
host_plane_builder.ReserveLines(2);
auto consumer_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &consumer_thread, "Iterator::Prefetch", 0,
100000000, {{StatType::kStepId, kPrefetchIteratorId}});
CreateXEvent(&host_plane_builder, &consumer_thread,
HostEventType::kPrefetchConsume, 80000000, 20000000,
{{StatType::kElementId, kFirstElementId}});
CreateXEvent(&host_plane_builder, &consumer_thread, "Iterator::Prefetch",
200000000, 20000000, {{StatType::kStepId, kPrefetchIteratorId}});
CreateXEvent(&host_plane_builder, &consumer_thread,
HostEventType::kPrefetchConsume, 210000000, 10000000,
{{StatType::kElementId, kSecondElementId}});
auto producer_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &producer_thread,
HostEventType::kPrefetchProduce, 0, 80000000,
{{StatType::kElementId, kFirstElementId}});
CreateXEvent(&host_plane_builder, &producer_thread,
"Iterator::Prefetch::Range", 0, 80000000,
{{StatType::kStepId, kRangeIteratorId},
{StatType::kParentId, kPrefetchIteratorId}});
CreateXEvent(&host_plane_builder, &producer_thread,
HostEventType::kPrefetchProduce, 100000000, 80000000,
{{StatType::kElementId, kSecondElementId}});
CreateXEvent(&host_plane_builder, &producer_thread,
"Iterator::Prefetch::Range", 100000000, 80000000,
{{StatType::kStepId, kRangeIteratorId},
{StatType::kParentId, kPrefetchIteratorId}});
CombinedTfDataStats combined_tf_data_stats;
CombinedTfDataStatsBuilder builder(&combined_tf_data_stats);
builder.Add("host1", &host_plane);
builder.Finalize();
EXPECT_THAT(
combined_tf_data_stats, EqualsProto(R"pb(
bottleneck_analysis: {
host: "host1"
input_pipeline: "Host:0"
max_latency_ps: 100000000
iterator_name: "Range"
iterator_long_name: "Iterator::Prefetch::Range"
iterator_latency_ps: 80000000
suggestion: "See <a href=\"https:
}
tf_data_stats: {
key: "host1"
value: {
iterator_metadata: {
key: 123,
value: {
id: 123
name: "Prefetch"
long_name: "Iterator::Prefetch"
is_async: true
}
}
iterator_metadata: {
key: 456,
value: {
id: 456
parent_id: 123
name: "Range"
long_name: "Iterator::Prefetch::Range"
is_async: false
}
}
input_pipelines {
key: 123,
value: {
metadata { id: 123 type: HOST name: "Host:0" }
avg_latency_ps: 60000000
min_latency_ps: 20000000
max_latency_ps: 100000000
num_slow_calls: 1
stats {
bottleneck_iterator_id: 456
bottleneck_iterator_latency_ps: 80000000
iterator_stats {
key: 123,
value: {
id: 123
start_time_ps: 0
duration_ps: 100000000
self_time_ps: 20000000
is_blocking: true
num_calls: 1
}
}
iterator_stats {
key: 456,
value: {
id: 456
start_time_ps: 0
duration_ps: 80000000
self_time_ps: 80000000
is_blocking: true
num_calls: 1
}
}
}
stats {
bottleneck_iterator_id: 123
bottleneck_iterator_latency_ps: 20000000
iterator_stats {
key: 123,
value: {
id: 123
start_time_ps: 200000000
duration_ps: 20000000
self_time_ps: 20000000
is_blocking: true
num_calls: 1
}
}
iterator_stats {
key: 456,
value: {
id: 456
start_time_ps: 100000000
duration_ps: 80000000
self_time_ps: 80000000
is_blocking: false
num_calls: 1
}
}
}
}
}
}
}
is_input_bound: true
summary: "Your profile has a tf.data input pipeline slower than 50 us. For each slow input pipeline, below shows a bottleneck in the input pipeline and a suggestion on how to fix it."
)pb"));
}
TEST(XPlaneToTfDataStatsTest, DeviceInputPipeline) {
constexpr int64_t kPrefetchIteratorId = 123;
constexpr int64_t kRangeIteratorId = 456;
constexpr int64_t kElementId = 100;
XPlane host_plane;
XPlaneBuilder host_plane_builder(&host_plane);
host_plane_builder.ReserveLines(2);
auto consumer_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &consumer_thread, "Iterator::Prefetch", 0,
30000000, {{StatType::kStepId, kPrefetchIteratorId}});
CreateXEvent(&host_plane_builder, &consumer_thread, "Iterator::Prefetch",
100000000, 100000000,
{{StatType::kStepId, kPrefetchIteratorId}});
CreateXEvent(&host_plane_builder, &consumer_thread,
HostEventType::kPrefetchConsume, 180000000, 20000000,
{{StatType::kElementId, kElementId}});
auto producer_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &producer_thread,
HostEventType::kPrefetchProduce, 100000000, 80000000,
{{StatType::kElementId, kElementId}});
CreateXEvent(&host_plane_builder, &producer_thread,
"Iterator::Prefetch::Generator", 100000000, 80000000,
{{StatType::kStepId, kRangeIteratorId},
{StatType::kParentId, kPrefetchIteratorId}});
CombinedTfDataStats combined_tf_data_stats;
CombinedTfDataStatsBuilder builder(&combined_tf_data_stats);
builder.Add("host1", &host_plane);
builder.Finalize();
EXPECT_THAT(
combined_tf_data_stats, EqualsProto(R"pb(
tf_data_stats: {
key: "host1"
value: {
iterator_metadata: {
key: 123,
value: {
id: 123
name: "Prefetch"
long_name: "Iterator::Prefetch"
is_async: true
}
}
iterator_metadata: {
key: 456,
value: {
id: 456
parent_id: 123
name: "Generator"
long_name: "Iterator::Prefetch::Generator"
is_async: false
}
}
input_pipelines {
key: 123,
value: {
metadata { id: 123 type: DEVICE name: "Device:0" }
avg_latency_ps: 65000000
min_latency_ps: 30000000
max_latency_ps: 100000000
num_slow_calls: 1
stats {
bottleneck_iterator_id: 456
bottleneck_iterator_latency_ps: 80000000
iterator_stats {
key: 123,
value: {
id: 123
start_time_ps: 100000000
duration_ps: 100000000
self_time_ps: 20000000
is_blocking: true
num_calls: 1
}
}
iterator_stats {
key: 456,
value: {
id: 456
start_time_ps: 100000000
duration_ps: 80000000
self_time_ps: 80000000
is_blocking: true
num_calls: 1
}
}
}
stats {
bottleneck_iterator_id: 123
bottleneck_iterator_latency_ps: 30000000
iterator_stats {
key: 123,
value: {
id: 123
start_time_ps: 0
duration_ps: 30000000
self_time_ps: 30000000
is_blocking: true
num_calls: 1
}
}
}
}
}
}
}
summary: "No tf.data activity captured in your profile. If your job uses tf.data, try to capture a longer profile."
)pb"));
}
TEST(XPlaneToTfDataStatsTest, MapAndBatch) {
constexpr int64_t kMapAndBatchIteratorId = 123;
constexpr int64_t kRangeIteratorId = 456;
constexpr int64_t kElementId = 100;
XPlane host_plane;
XPlaneBuilder host_plane_builder(&host_plane);
host_plane_builder.ReserveLines(2);
XLineBuilder consumer_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &consumer_thread, "Iterator::MapAndBatch",
0, 100000000, {{StatType::kStepId, kMapAndBatchIteratorId}});
CreateXEvent(&host_plane_builder, &consumer_thread,
HostEventType::kMapAndBatchConsume, 80000000, 20000000,
{{StatType::kElementId, kElementId}});
XLineBuilder producer_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &producer_thread,
HostEventType::kMapAndBatchProduce, 0, 30000000,
{{StatType::kElementId, kElementId}});
CreateXEvent(&host_plane_builder, &producer_thread,
"Iterator::MapAndBatch::Range", 0, 30000000,
{{StatType::kStepId, kRangeIteratorId},
{StatType::kParentId, kMapAndBatchIteratorId}});
CreateXEvent(&host_plane_builder, &producer_thread,
HostEventType::kMapAndBatchProduce, 40000000, 30000000,
{{StatType::kElementId, kElementId}});
CreateXEvent(&host_plane_builder, &producer_thread,
"Iterator::MapAndBatch::Range", 40000000, 30000000,
{{StatType::kStepId, kRangeIteratorId},
{StatType::kParentId, kMapAndBatchIteratorId}});
CombinedTfDataStats combined_tf_data_stats;
CombinedTfDataStatsBuilder builder(&combined_tf_data_stats);
builder.Add("host1", &host_plane);
builder.Finalize();
EXPECT_THAT(
combined_tf_data_stats, EqualsProto(R"pb(
bottleneck_analysis: {
host: "host1"
input_pipeline: "Host:0"
max_latency_ps: 100000000
iterator_name: "Range"
iterator_long_name: "Iterator::MapAndBatch::Range"
iterator_latency_ps: 60000000
suggestion: "See <a href=\"https:
}
tf_data_stats: {
key: "host1"
value: {
iterator_metadata: {
key: 123,
value: {
id: 123
name: "MapAndBatch"
long_name: "Iterator::MapAndBatch"
is_async: true
}
}
iterator_metadata: {
key: 456,
value: {
id: 456
parent_id: 123
name: "Range"
long_name: "Iterator::MapAndBatch::Range"
is_async: false
}
}
input_pipelines {
key: 123,
value: {
metadata { id: 123 type: HOST name: "Host:0" }
avg_latency_ps: 100000000
min_latency_ps: 100000000
max_latency_ps: 100000000
num_slow_calls: 1
stats {
bottleneck_iterator_id: 456
bottleneck_iterator_latency_ps: 60000000
iterator_stats {
key: 123,
value: {
id: 123
start_time_ps: 0
duration_ps: 100000000
self_time_ps: 40000000
is_blocking: true
num_calls: 1
}
}
iterator_stats {
key: 456,
value: {
id: 456
start_time_ps: 0
duration_ps: 60000000
self_time_ps: 60000000
is_blocking: true
num_calls: 2
}
}
}
}
}
}
}
is_input_bound: true
summary: "Your profile has a tf.data input pipeline slower than 50 us. For each slow input pipeline, below shows a bottleneck in the input pipeline and a suggestion on how to fix it."
)pb"));
}
}
}
} |
1,466 | cpp | tensorflow/tensorflow | op_stats_combiner | tensorflow/core/profiler/convert/op_stats_combiner.cc | tensorflow/core/profiler/convert/op_stats_combiner_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_CONVERT_OP_STATS_COMBINER_H_
#define TENSORFLOW_CORE_PROFILER_CONVERT_OP_STATS_COMBINER_H_
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/profiler/convert/op_metrics_db_combiner.h"
#include "tensorflow/core/profiler/protobuf/hardware_types.pb.h"
#include "tensorflow/core/profiler/protobuf/op_stats.pb.h"
#include "tensorflow/core/profiler/utils/step_intersection.h"
namespace tensorflow {
namespace profiler {
bool IsCoordinator(bool no_accelerator_in_system, HardwareType hardware_type);
uint32 GlobalCoreId(int host_id, uint32 device_ordinal);
template <typename CoreIdMap>
void CombineCoreIdMap(int src_host_id, const CoreIdMap& src, CoreIdMap* dst) {
for (const auto& core_id_and_value : src) {
uint32 global_core_id = GlobalCoreId(src_host_id, core_id_and_value.first);
auto iter_and_inserted =
dst->insert({global_core_id, core_id_and_value.second});
DCHECK(iter_and_inserted.second)
<< "Duplicated core_id: " << iter_and_inserted.first->first;
}
}
struct OpStatsInfo {
OpStatsInfo(const OpStats* op_stats, HardwareType hardware_type,
int src_host_id)
: op_stats(op_stats),
hardware_type(hardware_type),
src_host_id(src_host_id) {}
const OpStats* op_stats;
HardwareType hardware_type;
int src_host_id;
};
bool NoAcceleratorInSystem(const std::vector<OpStatsInfo>& all_op_stats_info);
StepIntersection ComputeStepIntersectionToMergeOpStats(
const std::vector<OpStatsInfo>& all_op_stats_info,
uint32 max_step_per_host);
void CombineAllOpStats(const std::vector<OpStatsInfo>& all_op_stats_info,
const StepIntersection& step_intersection,
OpStats* combined_op_stats);
}
}
#endif
#include "tensorflow/core/profiler/convert/op_stats_combiner.h"
#include <algorithm>
#include <cstddef>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/profiler/convert/op_metrics_db_combiner.h"
#include "tensorflow/core/profiler/convert/xplane_to_tf_functions.h"
#include "tensorflow/core/profiler/protobuf/diagnostics.pb.h"
#include "tensorflow/core/profiler/protobuf/hardware_types.pb.h"
#include "tensorflow/core/profiler/protobuf/kernel_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/op_metrics.pb.h"
#include "tensorflow/core/profiler/protobuf/op_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/power_metrics.pb.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
#include "tensorflow/core/profiler/protobuf/topology.pb.h"
#include "tensorflow/core/profiler/utils/hardware_type_utils.h"
#include "tensorflow/core/profiler/utils/kernel_stats_utils.h"
#include "tensorflow/core/profiler/utils/step_intersection.h"
namespace tensorflow {
namespace profiler {
namespace {
void CombinePerCoreStepInfo(
int src_host_id, const PerCoreStepInfo& src, bool use_incomplete_step,
PerCoreStepInfo* dst,
OpMetricsDbCombiner* hlo_metrics_db_complete_steps_only_combiner,
OpMetricsDbCombiner* hlo_metrics_db_per_step_combiner) {
CombineCoreIdMap(src_host_id, src.step_info_per_core(),
dst->mutable_step_info_per_core());
uint32 new_step_num = dst->step_num();
for (auto& percore_stepinfo : *dst->mutable_step_info_per_core()) {
auto& stepinfo = percore_stepinfo.second;
stepinfo.set_step_num(new_step_num);
}
if (!use_incomplete_step) {
hlo_metrics_db_complete_steps_only_combiner->Combine(src.hlo_metrics_db());
}
hlo_metrics_db_per_step_combiner->Combine(src.hlo_metrics_db());
CombineCoreIdMap(src_host_id, src.all_reduce_db_per_core(),
dst->mutable_all_reduce_db_per_core());
CombineCoreIdMap(src_host_id, src.core_id_to_replica_id_map(),
dst->mutable_core_id_to_replica_id_map());
}
void CombineStepDatabase(
int src_host_id, const StepIntersection& step_intersection,
const StepDatabaseResult& src, StepDatabaseResult* dst,
OpMetricsDbCombiner* hlo_metrics_db_complete_steps_only_combiner,
std::vector<OpMetricsDbCombiner>* hlo_metrics_db_per_step_combiners) {
if (src.use_incomplete_step()) dst->set_use_incomplete_step(true);
uint32 src_first_step_idx = step_intersection.FirstStepIndex(src_host_id);
for (uint32 i = 0; i < step_intersection.NumSteps(); i++) {
CombinePerCoreStepInfo(
src_host_id, src.step_sequence(src_first_step_idx + i),
src.use_incomplete_step(), dst->mutable_step_sequence(i),
hlo_metrics_db_complete_steps_only_combiner,
&(*hlo_metrics_db_per_step_combiners)[i]);
}
}
void CombinePowerMetrics(const RunEnvironment& src, RunEnvironment* dst) {
const size_t src_hosts = src.hostnames_size();
const size_t dst_hosts = dst->hostnames_size();
const double src_weight = src_hosts * 1.0 / (src_hosts + dst_hosts);
const double dst_weight = dst_hosts * 1.0 / (src_hosts + dst_hosts);
for (const auto& src_metric : src.power_metrics().power_component_metrics()) {
for (auto& dst_metric :
*dst->mutable_power_metrics()->mutable_power_component_metrics()) {
if (src_metric.component_name() != dst_metric.component_name()) continue;
dst_metric.set_max_power(
std::max(src_metric.max_power(), dst_metric.max_power()));
dst_metric.set_avg_power(src_metric.avg_power() * src_weight +
dst_metric.avg_power() * dst_weight);
}
}
}
void CombineRunEnvironment(const RunEnvironment& src, RunEnvironment* dst) {
dst->mutable_hostnames()->insert(src.hostnames().begin(),
src.hostnames().end());
dst->set_host_count(dst->hostnames_size());
if (src.device_type() != "CPU" && src.device_type() != "Device") {
dst->set_device_type(src.device_type());
dst->set_device_core_count(src.device_core_count() +
dst->device_core_count());
dst->set_replica_count(std::max(src.replica_count(), dst->replica_count()));
dst->set_num_cores_per_replica(
std::max(src.num_cores_per_replica(), dst->num_cores_per_replica()));
*dst->mutable_system_topology() = src.system_topology();
} else if (dst->device_type().empty()) {
dst->set_device_type(src.device_type());
}
dst->set_task_count(src.task_count() + dst->task_count());
if (src.host_independent_job_info().profile_duration_ms() > 0) {
(*dst->mutable_host_independent_job_info()) =
src.host_independent_job_info();
}
for (const auto& job_info : src.host_dependent_job_info()) {
*(dst->add_host_dependent_job_info()) = job_info;
}
dst->set_host_trace_level(src.host_trace_level());
dst->set_is_training(src.is_training());
CombinePowerMetrics(src, dst);
}
void CombinePerfEnv(const PerfEnv& src, PerfEnv* dst) {
dst->set_peak_tera_flops_per_second(src.peak_tera_flops_per_second());
if (src.peak_bws_giga_bytes_per_second_size() > 0 &&
dst->peak_bws_giga_bytes_per_second_size() == 0) {
*dst->mutable_peak_bws_giga_bytes_per_second() =
src.peak_bws_giga_bytes_per_second();
}
dst->set_ridge_point(src.ridge_point());
}
void CombineDiagnostics(const Diagnostics& src, Diagnostics* dst) {
dst->mutable_info()->MergeFrom(src.info());
dst->mutable_warnings()->MergeFrom(src.warnings());
dst->mutable_errors()->MergeFrom(src.errors());
}
void CombineOpStats(
bool no_accelerator_in_system, int src_host_id, HardwareType hardware_type,
const StepIntersection& step_intersection, const OpStats& src, OpStats* dst,
OpMetricsDbCombiner* host_op_metrics_db_combiner,
OpMetricsDbCombiner* device_op_metrics_db_combiner,
OpMetricsDbCombiner* hlo_metrics_db_complete_steps_only_combiner,
std::vector<OpMetricsDbCombiner>* hlo_metrics_db_per_step_combiners) {
host_op_metrics_db_combiner->Combine(src.host_op_metrics_db(),
false);
device_op_metrics_db_combiner->Combine(src.device_op_metrics_db());
if (!IsCoordinator(no_accelerator_in_system, hardware_type)) {
CombineStepDatabase(src_host_id, step_intersection, src.step_db(),
dst->mutable_step_db(),
hlo_metrics_db_complete_steps_only_combiner,
hlo_metrics_db_per_step_combiners);
}
CombineRunEnvironment(src.run_environment(), dst->mutable_run_environment());
CombinePerfEnv(src.perf_env(), dst->mutable_perf_env());
CombineDiagnostics(src.diagnostics(), dst->mutable_diagnostics());
dst->mutable_kernel_stats_db()->mutable_reports()->MergeFrom(
src.kernel_stats_db().reports());
CombineTfFunctionDb(src.tf_function_db(), dst->mutable_tf_function_db());
CombineCoreIdMap(src_host_id, src.core_id_to_details(),
dst->mutable_core_id_to_details());
dst->mutable_performance_counter_result()
->set_matrix_unit_utilization_percent(
dst->performance_counter_result().matrix_unit_utilization_percent() +
src.performance_counter_result().matrix_unit_utilization_percent());
}
}
bool IsCoordinator(bool no_accelerator_in_system, HardwareType hardware_type) {
return !HasDevice(hardware_type) && !no_accelerator_in_system;
}
bool NoAcceleratorInSystem(const std::vector<OpStatsInfo>& all_op_stats_info) {
for (const auto& op_stats_info : all_op_stats_info) {
if (HasDevice(op_stats_info.hardware_type)) {
return false;
}
}
return true;
}
uint32 GlobalCoreId(int host_id, uint32 device_ordinal) {
constexpr uint32 kMaxDevicesPerHost = 1000;
return host_id * kMaxDevicesPerHost + device_ordinal;
}
StepIntersection ComputeStepIntersectionToMergeOpStats(
const std::vector<OpStatsInfo>& all_op_stats_info,
uint32 max_step_per_host) {
bool no_accelerator_in_system = NoAcceleratorInSystem(all_op_stats_info);
absl::flat_hash_map<uint32, const StepDatabaseResult*> per_host_step_db;
for (const auto& op_stats_info : all_op_stats_info) {
if (IsCoordinator(no_accelerator_in_system, op_stats_info.hardware_type))
continue;
per_host_step_db[op_stats_info.src_host_id] =
&op_stats_info.op_stats->step_db();
}
return StepIntersection(max_step_per_host, per_host_step_db);
}
void CombineAllOpStats(const std::vector<OpStatsInfo>& all_op_stats_info,
const StepIntersection& step_intersection,
OpStats* combined_op_stats) {
if (all_op_stats_info.size() == 1) {
*combined_op_stats = *all_op_stats_info[0].op_stats;
return;
}
StepDatabaseResult* combined_step_db = combined_op_stats->mutable_step_db();
for (uint32 dst_step_num : step_intersection.DstStepNumbers()) {
combined_step_db->add_step_sequence()->set_step_num(dst_step_num);
}
combined_step_db->set_num_steps_dropped(step_intersection.StepsDropped());
combined_step_db->set_empty_intersect(step_intersection.EmptyIntersect());
OpMetricsDbCombiner host_op_metrics_db_combiner(
combined_op_stats->mutable_host_op_metrics_db());
OpMetricsDbCombiner device_op_metrics_db_combiner(
combined_op_stats->mutable_device_op_metrics_db());
OpMetricsDbCombiner hlo_metrics_db_complete_steps_only_combiner(
combined_op_stats->mutable_hlo_metrics_db_complete_steps_only());
std::vector<OpMetricsDbCombiner> hlo_metrics_db_per_step_combiners;
hlo_metrics_db_per_step_combiners.reserve(
combined_step_db->step_sequence_size());
for (PerCoreStepInfo& step_info :
*combined_step_db->mutable_step_sequence()) {
hlo_metrics_db_per_step_combiners.emplace_back(
step_info.mutable_hlo_metrics_db());
}
bool no_accelerator_in_system = NoAcceleratorInSystem(all_op_stats_info);
for (const auto& op_stats_info : all_op_stats_info) {
CombineOpStats(no_accelerator_in_system, op_stats_info.src_host_id,
op_stats_info.hardware_type, step_intersection,
*op_stats_info.op_stats, combined_op_stats,
&host_op_metrics_db_combiner, &device_op_metrics_db_combiner,
&hlo_metrics_db_complete_steps_only_combiner,
&hlo_metrics_db_per_step_combiners);
}
SortAndKeepTopKDurationKernelReportsInDb(
combined_op_stats->mutable_kernel_stats_db());
combined_op_stats->mutable_performance_counter_result()
->set_matrix_unit_utilization_percent(
combined_op_stats->performance_counter_result()
.matrix_unit_utilization_percent() /
all_op_stats_info.size());
}
}
} | #include "tensorflow/core/profiler/convert/op_stats_combiner.h"
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/hardware_types.pb.h"
#include "tensorflow/core/profiler/protobuf/op_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
#include "tensorflow/core/profiler/utils/step_intersection.h"
namespace tensorflow {
namespace profiler {
namespace {
TEST(CombineAllOpStatsTest, CombineRunEnvironment) {
OpStats dst_op_stats, op_stats_1, op_stats_2;
op_stats_1.mutable_run_environment()
->mutable_host_independent_job_info()
->set_profile_duration_ms(100);
op_stats_2.mutable_run_environment()
->mutable_host_independent_job_info()
->set_profile_duration_ms(0);
OpStatsInfo op_stats_info_1(&op_stats_1, TPU, 0),
op_stats_info_2(&op_stats_2, TPU, 0);
std::vector<OpStatsInfo> all_op_stats_info = {op_stats_info_1,
op_stats_info_2};
StepDatabaseResult dummy_step_db_result;
absl::flat_hash_map<uint32 , const StepDatabaseResult*> result;
result.insert({0, &dummy_step_db_result});
StepIntersection dummy_step_intersection = StepIntersection(1, result);
CombineAllOpStats(all_op_stats_info, dummy_step_intersection, &dst_op_stats);
EXPECT_EQ(100, dst_op_stats.run_environment()
.host_independent_job_info()
.profile_duration_ms());
}
TEST(CombineAllOpStatsTest, CombineRunEnvironmentWithUnknownDevice) {
OpStats dst_op_stats, op_stats_1, op_stats_2;
op_stats_1.mutable_run_environment()->set_device_type("TPU");
op_stats_2.mutable_run_environment()->set_device_type("Device");
OpStatsInfo op_stats_info_1(&op_stats_1, TPU, 0),
op_stats_info_2(&op_stats_2, TPU, 0);
std::vector<OpStatsInfo> all_op_stats_info = {op_stats_info_1,
op_stats_info_2};
StepDatabaseResult dummy_step_db_result;
absl::flat_hash_map<uint32 , const StepDatabaseResult*> result;
result.insert({0, &dummy_step_db_result});
StepIntersection dummy_step_intersection = StepIntersection(1, result);
CombineAllOpStats(all_op_stats_info, dummy_step_intersection, &dst_op_stats);
EXPECT_EQ("TPU", dst_op_stats.run_environment().device_type());
}
}
}
} |
1,467 | cpp | tensorflow/tensorflow | xplane_to_kernel_stats_db | tensorflow/core/profiler/convert/xplane_to_kernel_stats_db.cc | tensorflow/core/profiler/convert/xplane_to_kernel_stats_db_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_CONVERT_XPLANE_TO_KERNEL_STATS_DB_H_
#define TENSORFLOW_CORE_PROFILER_CONVERT_XPLANE_TO_KERNEL_STATS_DB_H_
#include <functional>
#include "tensorflow/core/profiler/protobuf/kernel_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/gpu_event_stats.h"
#include "tensorflow/core/profiler/utils/kernel_stats_utils.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
namespace tensorflow {
namespace profiler {
void ConvertDeviceTraceXPlaneToKernelReports(
const XPlane& device_trace,
const std::function<void(const GpuEventStats&, KernelReport*)>&
on_kernel_fn,
KernelReportMap* reports);
}
}
#endif
#include "tensorflow/core/profiler/convert/xplane_to_kernel_stats_db.h"
#include <functional>
#include <ostream>
#include <string>
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/kernel_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/gpu_event_stats.h"
#include "tensorflow/core/profiler/utils/kernel_stats_utils.h"
#include "tensorflow/core/profiler/utils/trace_utils.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
#include "tsl/profiler/utils/tf_op_utils.h"
#include "tsl/profiler/utils/tf_xplane_visitor.h"
namespace tensorflow {
namespace profiler {
void ConvertDeviceTraceXPlaneToKernelReports(
const XPlane& device_trace,
const std::function<void(const GpuEventStats&, KernelReport*)>&
on_kernel_fn,
KernelReportMap* reports) {
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(&device_trace);
plane.ForEachLine([&](const XLineVisitor& line) {
if (IsDerivedThreadId(line.Id())) {
return;
}
line.ForEachEvent([&](const XEventVisitor& event) {
if (event.DurationNs() == 0) return;
KernelReport kernel;
GpuEventStats stats(&event);
if (!stats.IsKernel()) return;
kernel.set_name(std::string(event.Name()));
kernel.set_is_kernel_using_tensor_core(
IsKernelUsingTensorCore(event.Name()));
kernel.set_total_duration_ns(event.DurationNs());
kernel.set_min_duration_ns(event.DurationNs());
kernel.set_max_duration_ns(event.DurationNs());
ParseKernelLaunchParams(stats.kernel_details, &kernel);
if (stats.IsTfOp()) {
tsl::profiler::TfOp tf_op =
tsl::profiler::ParseTfOpFullname(stats.tf_op_fullname);
kernel.set_op_name(std::string(tf_op.name));
bool tensor_core_eligible =
IsEinsumTensorCoreEligible(stats.equation) ||
IsOpTensorCoreEligible(kernel.op_name());
if (!tensor_core_eligible && kernel.is_kernel_using_tensor_core()) {
VLOG(1) << "Detected new Op using TensorCores: " << kernel.op_name()
<< std::endl;
tensor_core_eligible = true;
}
kernel.set_is_op_tensor_core_eligible(tensor_core_eligible);
}
if (on_kernel_fn) {
on_kernel_fn(stats, &kernel);
}
KernelReportValue value;
value.total_duration_ns = event.DurationNs();
value.min_duration_ns = event.DurationNs();
value.max_duration_ns = event.DurationNs();
value.occurrences = 1;
InsertOrUpdateKernelReport(kernel, value, reports);
});
});
}
}
} | #include "tensorflow/core/profiler/convert/xplane_to_kernel_stats_db.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/kernel_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/kernel_stats_utils.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_test_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
TEST(ConvertXplaneToKernelStats, MultiKernels) {
XSpace space;
XPlane* device_trace = space.add_planes();
XPlaneBuilder device_trace_builder(device_trace);
device_trace_builder.GetOrCreateLine(0);
XLineBuilder line_builder = device_trace_builder.GetOrCreateLine(0);
CreateXEvent(&device_trace_builder, &line_builder, "kernel_name_shortest",
10000, 1000,
{{StatType::kTfOp, "mul_786"},
{StatType::kKernelDetails, R"MULTI(regs:16
static_shared:0
dynamic_shared:0
grid:1,1,1
block:1,1,1
occ_pct:50.0)MULTI"},
{StatType::kEquation, ""}});
CreateXEvent(&device_trace_builder, &line_builder, "kernel_name_middle",
20000, 2000,
{{StatType::kTfOp, "Conv2D"},
{StatType::kKernelDetails, R"MULTI(regs:32
static_shared:0
dynamic_shared:16384
grid:2,1,1
block:32,1,1
occ_pct=13.0)MULTI"},
{StatType::kEquation, ""}});
CreateXEvent(&device_trace_builder, &line_builder,
"volta_fp16_s884gemm_fp16_128x128_ldg8_f2f_tn",
30000, 3000,
{{StatType::kTfOp, "Einsum_80"},
{StatType::kKernelDetails, R"MULTI(regs:32
static_shared:0
dynamic_shared:16384
grid:3,1,1
block:64,1,1
occ_pct:25.0)MULTI"},
{StatType::kEquation, ""}});
KernelReportMap reports;
ConvertDeviceTraceXPlaneToKernelReports(*device_trace, {}, &reports);
KernelStatsDb kernel_stats;
CopyTopKDurationKernelReportsToDb(reports, &kernel_stats);
EXPECT_EQ(kernel_stats.reports_size(), 3);
{
const auto& kernel = kernel_stats.reports().at(2);
EXPECT_EQ(kernel.name(), "kernel_name_shortest");
EXPECT_EQ(kernel.registers_per_thread(), 16);
EXPECT_EQ(kernel.static_shmem_bytes(), 0);
EXPECT_EQ(kernel.dynamic_shmem_bytes(), 0);
EXPECT_EQ(kernel.grid_dim().at(0), 1);
EXPECT_EQ(kernel.grid_dim().at(1), 1);
EXPECT_EQ(kernel.grid_dim().at(2), 1);
EXPECT_EQ(kernel.block_dim().at(0), 1);
EXPECT_EQ(kernel.block_dim().at(1), 1);
EXPECT_EQ(kernel.block_dim().at(2), 1);
EXPECT_EQ(kernel.total_duration_ns(), 1);
EXPECT_FALSE(kernel.is_kernel_using_tensor_core());
EXPECT_FALSE(kernel.is_op_tensor_core_eligible());
EXPECT_EQ(kernel.op_name(), "mul_786");
}
{
const auto& kernel = kernel_stats.reports().at(1);
EXPECT_EQ(kernel.name(), "kernel_name_middle");
EXPECT_EQ(kernel.registers_per_thread(), 32);
EXPECT_EQ(kernel.static_shmem_bytes(), 0);
EXPECT_EQ(kernel.dynamic_shmem_bytes(), 16384);
EXPECT_EQ(kernel.grid_dim().at(0), 2);
EXPECT_EQ(kernel.grid_dim().at(1), 1);
EXPECT_EQ(kernel.grid_dim().at(2), 1);
EXPECT_EQ(kernel.block_dim().at(0), 32);
EXPECT_EQ(kernel.block_dim().at(1), 1);
EXPECT_EQ(kernel.block_dim().at(2), 1);
EXPECT_EQ(kernel.total_duration_ns(), 2);
EXPECT_FALSE(kernel.is_kernel_using_tensor_core());
EXPECT_TRUE(kernel.is_op_tensor_core_eligible());
EXPECT_EQ(kernel.op_name(), "Conv2D");
}
{
const auto& kernel = kernel_stats.reports().at(0);
EXPECT_EQ(kernel.name(), "volta_fp16_s884gemm_fp16_128x128_ldg8_f2f_tn");
EXPECT_EQ(kernel.registers_per_thread(), 32);
EXPECT_EQ(kernel.static_shmem_bytes(), 0);
EXPECT_EQ(kernel.dynamic_shmem_bytes(), 16384);
EXPECT_EQ(kernel.grid_dim().at(0), 3);
EXPECT_EQ(kernel.grid_dim().at(1), 1);
EXPECT_EQ(kernel.grid_dim().at(2), 1);
EXPECT_EQ(kernel.block_dim().at(0), 64);
EXPECT_EQ(kernel.block_dim().at(1), 1);
EXPECT_EQ(kernel.block_dim().at(2), 1);
EXPECT_EQ(kernel.total_duration_ns(), 3);
EXPECT_TRUE(kernel.is_kernel_using_tensor_core());
EXPECT_TRUE(kernel.is_op_tensor_core_eligible());
EXPECT_EQ(kernel.op_name(), "Einsum_80");
}
}
}
}
} |
1,468 | cpp | tensorflow/tensorflow | dcn_analysis | tensorflow/core/profiler/convert/dcn_analysis.cc | tensorflow/core/profiler/convert/dcn_analysis_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_CONVERT_DCN_ANALYSIS_H_
#define TENSORFLOW_CORE_PROFILER_CONVERT_DCN_ANALYSIS_H_
#include <array>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/profiler/convert/dcn_utils.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
namespace tensorflow {
namespace profiler {
struct TimestampEvent {
uint64_t timestamp_ns;
uint64_t duration_ns;
int32_t message_diff;
size_t size_diff;
int32_t src_slice_id;
};
typedef std::multimap<uint64_t, std::shared_ptr<TimestampEvent>> TimestampMap;
typedef absl::flat_hash_map<std::string, TimestampMap> CollectiveTimestampMap;
struct Straggler {
uint64_t duration_ns;
uint64_t end_timestamp_ns;
size_t size_bytes;
int32_t src_slice_id;
};
static constexpr uint32_t kMaxStragglersPerBurst = 4;
struct DcnBurst {
uint64_t start_timestamp_ns;
uint64_t end_timestamp_ns;
uint64_t burst_size_bytes;
uint64_t num_messages;
uint64_t max_overlapping_messages;
std::array<Straggler, kMaxStragglersPerBurst> stragglers;
};
class DcnBurstManager {
public:
DcnBurstManager() = default;
uint64_t TotalLatency() const { return total_latency_; }
void SetToDisplay(bool to_display) { to_display_ = to_display; }
bool ToDisplay() const { return to_display_; }
const std::vector<DcnBurst> &GetBursts() const { return bursts_; }
void CreateBursts(const TimestampMap &tm_events);
void PrintBursts() {
for (const auto &burst : bursts_) {
LOG(INFO) << burst.start_timestamp_ns << " " << burst.end_timestamp_ns
<< " " << burst.num_messages << " " << burst.burst_size_bytes
<< " " << burst.max_overlapping_messages;
}
}
private:
std::vector<DcnBurst> bursts_;
uint64_t total_latency_ = 0;
bool to_display_ = false;
int32_t active_burst_messages_;
DcnBurst active_burst_;
uint32_t straggler_idx_;
void ResetBurstState();
};
typedef absl::flat_hash_map<std::string, DcnBurstManager>
CollectiveBurstManager;
class DcnEventsProcessor {
public:
DcnEventsProcessor() = delete;
DcnEventsProcessor(uint32_t num_tpu_tensor_cores, bool is_megacore);
uint32_t NumTpuTensorCores() const { return num_tpu_tensor_cores_; }
bool IsMegacore() const { return is_megacore_; }
void SetupMessageInfo(const tensorflow::profiler::XPlaneVisitor &plane);
std::optional<int32_t> MegaScaleMessageId(absl::string_view msg_name) const {
auto iter = megascale_msg_.find(msg_name);
if (iter != megascale_msg_.end()) {
return iter->second;
}
return std::nullopt;
}
uint32_t NumReceivedMessages() const { return received_messages_.size(); }
const tensorflow::profiler::DcnMessage &GetMessage(uint32_t i) const {
return received_messages_[i];
}
bool HasDcnMessages(absl::string_view msg_name) const {
return (megascale_msg_.find(msg_name) != megascale_msg_.end());
}
const TimestampMap &HostTsMap() const { return host_ts_map_; }
const std::vector<DcnBurst> &GetHostBursts() const {
return host_dcn_bursts_.GetBursts();
}
void ProcessReceiveMessages(const tensorflow::profiler::XPlaneVisitor &plane);
void AddHostDcnTrafficToXPlane(tensorflow::profiler::XPlane *host_xplane);
void AddTpuCollectiveDcnTrafficToXPlane(
tensorflow::profiler::XPlane *device_xplane);
private:
const uint32_t num_tpu_tensor_cores_;
const bool is_megacore_;
static constexpr float kLimitLowHostDcnBw = 4.17;
static constexpr float kLimitMedHostDcnBw = 8.34;
static constexpr float kMaxHostDcnBw = 12.5;
std::vector<absl::string_view> registered_dcn_messages_;
absl::flat_hash_map<absl::string_view, int32_t> megascale_msg_;
std::vector<tensorflow::profiler::DcnMessage> received_messages_;
TimestampMap host_ts_map_;
std::vector<CollectiveTimestampMap> tpu_collective_ts_map_;
DcnBurstManager host_dcn_bursts_;
std::vector<CollectiveBurstManager> tpu_collective_bursts_;
uint32_t FindTpuIdx(int tpu);
absl::string_view GetBwInfo(bool is_per_tpu, const DcnBurst &burst,
float &burst_mean_bw,
float &burst_bw_utilization);
uint32_t NumCollectivesQualified(const std::vector<uint64_t> &latencies);
void QualifyCollectives();
void AddQualifiedCollectivesToXPlane(
tensorflow::profiler::XPlaneBuilder &plane_builder, uint32_t tpu_idx);
void AddUnqualifiedCollectivesToXPlane(
tensorflow::profiler::XPlaneBuilder &plane_builder, uint32_t tpu_idx);
void GenerateTimestampEvents(
const tensorflow::profiler::DcnMessage &dcn_message);
void PrintTimestampEvents();
void GenerateBursts();
};
}
}
#endif
#include "tensorflow/core/profiler/convert/dcn_analysis.h"
#include <algorithm>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/string_view.h"
#include "tensorflow/core/profiler/convert/dcn_utils.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tsl/profiler/utils/math_utils.h"
#include "tsl/profiler/utils/tpu_xplane_utils.h"
#include "tsl/profiler/utils/xplane_schema.h"
namespace tensorflow {
namespace profiler {
using tsl::profiler::kMaxCollectivesToDisplay;
using tsl::profiler::kMegaScaleDcnReceive;
using tsl::profiler::LineIdType;
using tsl::profiler::MicroToNano;
void DcnBurstManager::ResetBurstState() {
active_burst_messages_ = 0;
straggler_idx_ = 0;
active_burst_.num_messages = 0;
active_burst_.max_overlapping_messages = 0;
active_burst_.start_timestamp_ns = 0;
active_burst_.end_timestamp_ns = 0;
active_burst_.burst_size_bytes = 0;
}
void DcnBurstManager::CreateBursts(const TimestampMap& tm_events) {
ResetBurstState();
for (const auto& tm_event : tm_events) {
if (active_burst_messages_ < 0) {
LOG_FIRST_N(WARNING, 10)
<< "Negative messages in burst, bursts will be incorrect.";
}
if (active_burst_messages_ == 0) {
active_burst_.start_timestamp_ns = tm_event.first;
}
active_burst_messages_ += tm_event.second->message_diff;
if (tm_event.second->message_diff > 0) {
active_burst_.num_messages += tm_event.second->message_diff;
active_burst_.burst_size_bytes += tm_event.second->size_diff;
} else {
Straggler straggler = {tm_event.second->duration_ns,
tm_event.second->timestamp_ns,
tm_event.second->size_diff * (-1),
tm_event.second->src_slice_id};
active_burst_.stragglers[straggler_idx_] = straggler;
straggler_idx_ = (straggler_idx_ + 1) % kMaxStragglersPerBurst;
}
active_burst_.max_overlapping_messages =
std::max(active_burst_.max_overlapping_messages,
static_cast<uint64_t>(active_burst_messages_));
if (active_burst_messages_ == 0) {
active_burst_.end_timestamp_ns = tm_event.first;
total_latency_ +=
(active_burst_.end_timestamp_ns - active_burst_.start_timestamp_ns);
bursts_.emplace_back(std::move(active_burst_));
ResetBurstState();
}
}
}
DcnEventsProcessor::DcnEventsProcessor(uint32_t num_tpu_tensor_cores,
bool is_megacore)
: num_tpu_tensor_cores_(num_tpu_tensor_cores), is_megacore_(is_megacore) {
registered_dcn_messages_.push_back(kMegaScaleDcnReceive);
tpu_collective_ts_map_.resize(num_tpu_tensor_cores_);
tpu_collective_bursts_.resize(num_tpu_tensor_cores_);
}
void DcnEventsProcessor::SetupMessageInfo(const XPlaneVisitor& plane) {
plane.ForEachEventMetadata([&](const XEventMetadataVisitor& event_metadata) {
if (std::find(registered_dcn_messages_.begin(),
registered_dcn_messages_.end(),
event_metadata.Name()) != registered_dcn_messages_.end()) {
megascale_msg_[event_metadata.Name()] = event_metadata.Id();
}
});
}
uint32_t DcnEventsProcessor::FindTpuIdx(int tpu) {
uint32_t num_tpus = num_tpu_tensor_cores_;
if (is_megacore_) {
num_tpus /= 2;
}
uint32_t tpu_idx = tpu % num_tpus;
if (is_megacore_) {
tpu_idx = tpu_idx * 2;
}
return tpu_idx;
}
void DcnEventsProcessor::GenerateTimestampEvents(
const DcnMessage& dcn_message) {
std::shared_ptr<TimestampEvent> start_event(
new TimestampEvent{dcn_message.start_timestamp_ns, 0, 1,
dcn_message.size_bytes, dcn_message.slice_src});
std::shared_ptr<TimestampEvent> end_event(new TimestampEvent{
dcn_message.end_timestamp_ns,
static_cast<uint64_t>(MicroToNano(dcn_message.duration_us)), -1,
-1 * dcn_message.size_bytes, dcn_message.slice_src});
std::pair<uint64_t, std::shared_ptr<TimestampEvent>> start_event_entry =
std::make_pair(dcn_message.start_timestamp_ns, start_event);
std::pair<uint64_t, std::shared_ptr<TimestampEvent>> end_event_entry =
std::make_pair(dcn_message.end_timestamp_ns, end_event);
host_ts_map_.insert(start_event_entry);
host_ts_map_.insert(end_event_entry);
const std::string& collective_name = dcn_message.collective_name;
uint32_t tpu_idx = FindTpuIdx(dcn_message.tpu_dst);
auto& m = tpu_collective_ts_map_[tpu_idx][collective_name];
m.insert(start_event_entry);
m.insert(end_event_entry);
}
void DcnEventsProcessor::PrintTimestampEvents() {
for (const auto& host_ts : host_ts_map_) {
LOG(INFO) << host_ts.first << ": " << host_ts.second->timestamp_ns << " "
<< host_ts.second->duration_ns << " "
<< host_ts.second->message_diff << " "
<< host_ts.second->size_diff << " "
<< host_ts.second->src_slice_id;
}
for (uint32_t tpu_idx = 0; tpu_idx < num_tpu_tensor_cores_; tpu_idx++) {
LOG(INFO) << "TPU: " << tpu_idx;
for (const auto& col_id : tpu_collective_ts_map_[tpu_idx]) {
LOG(INFO) << col_id.first;
for (const auto& tpu_col_ts :
tpu_collective_ts_map_[tpu_idx][col_id.first]) {
LOG(INFO) << tpu_col_ts.first << ": " << tpu_col_ts.second->timestamp_ns
<< " " << tpu_col_ts.second->duration_ns << " "
<< tpu_col_ts.second->message_diff << " "
<< tpu_col_ts.second->size_diff << " "
<< tpu_col_ts.second->src_slice_id;
}
}
}
}
uint32_t DcnEventsProcessor::NumCollectivesQualified(
const std::vector<uint64_t>& latencies) {
uint32_t num_collectives_qualified = 0;
uint32_t max_collectives = kMaxCollectivesToDisplay - 1;
for (const auto& lat : latencies) {
if (lat < host_dcn_bursts_.TotalLatency() * 0.05) {
return num_collectives_qualified;
} else if (lat < host_dcn_bursts_.TotalLatency() * 0.2 &&
num_collectives_qualified >= (max_collectives / 2)) {
return num_collectives_qualified;
} else if (num_collectives_qualified >= max_collectives) {
return num_collectives_qualified;
} else {
num_collectives_qualified++;
}
}
return latencies.size();
}
void DcnEventsProcessor::QualifyCollectives() {
for (auto tpu_idx = 0; tpu_idx < num_tpu_tensor_cores_; tpu_idx++) {
std::vector<uint64_t> latency_to_order;
latency_to_order.reserve(tpu_collective_bursts_[tpu_idx].size());
for (const auto& col_info : tpu_collective_bursts_[tpu_idx]) {
latency_to_order.emplace_back(col_info.second.TotalLatency());
}
std::sort(latency_to_order.begin(), latency_to_order.end(),
std::greater<uint64_t>());
uint32_t num_collectives_qualified =
NumCollectivesQualified(latency_to_order);
if (num_collectives_qualified > 0) {
uint32_t min_latency_to_qualify =
latency_to_order[num_collectives_qualified - 1];
uint32_t col_num = 0;
for (auto& col_info : tpu_collective_bursts_[tpu_idx]) {
if (col_info.second.TotalLatency() >= min_latency_to_qualify) {
col_info.second.SetToDisplay(true);
if (++col_num == kMaxCollectivesToDisplay - 1) break;
}
}
}
}
}
void DcnEventsProcessor::GenerateBursts() {
host_dcn_bursts_.CreateBursts(host_ts_map_);
host_dcn_bursts_.SetToDisplay(true);
for (auto tpu_idx = 0; tpu_idx < num_tpu_tensor_cores_; tpu_idx++) {
for (const auto& col_info : tpu_collective_ts_map_[tpu_idx]) {
tpu_collective_bursts_[tpu_idx][col_info.first].CreateBursts(
tpu_collective_ts_map_[tpu_idx][col_info.first]);
}
}
QualifyCollectives();
}
void DcnEventsProcessor::ProcessReceiveMessages(const XPlaneVisitor& plane) {
plane.ForEachLine([&](const XLineVisitor& line) {
uint32_t recv_msg_id = megascale_msg_[kMegaScaleDcnReceive];
line.ForEachEvent([&](const XEventVisitor& event) {
if (event.Id() == recv_msg_id) {
DcnMessage dcn_message = GetDcnMessageFromXEvent(event);
if (dcn_message.validity_info == DCN_MESSAGE_VALID) {
GenerateTimestampEvents(dcn_message);
}
received_messages_.emplace_back(std::move(dcn_message));
}
});
});
GenerateBursts();
}
absl::string_view DcnEventsProcessor::GetBwInfo(bool is_per_tpu,
const DcnBurst& burst,
float& burst_mean_bw,
float& burst_bw_utilization) {
absl::string_view bw_level;
uint32_t bw_divider = 1;
burst_mean_bw = static_cast<float>(burst.burst_size_bytes) /
(burst.end_timestamp_ns - burst.start_timestamp_ns);
if (is_per_tpu) {
bw_divider = num_tpu_tensor_cores_;
if (is_megacore_) {
bw_divider /= 2;
}
}
if (burst_mean_bw < kLimitLowHostDcnBw / bw_divider) {
bw_level = "Low BW";
} else if (burst_mean_bw < kLimitMedHostDcnBw / bw_divider) {
bw_level = "Med BW";
} else {
bw_level = "High BW";
}
burst_bw_utilization = burst_mean_bw / (kMaxHostDcnBw / bw_divider);
return bw_level;
}
void DcnEventsProcessor::AddHostDcnTrafficToXPlane(XPlane* host_xplane) {
if (!host_dcn_bursts_.ToDisplay()) return;
XPlaneBuilder plane_builder(host_xplane);
XLineBuilder line =
plane_builder.GetOrCreateLine(LineIdType::kDcnHostTraffic);
line.SetNameIfEmpty("DCN Host Bandwidth");
line.SetTimestampNs(0);
XStatMetadata* bw_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Bandwidth (GBytes/sec)");
XStatMetadata* bw_util_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Bandwidth Utilization");
XStatMetadata* num_msg_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Total Messages");
XStatMetadata* max_overlap_msg_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Max Overlapping Messages");
XStatMetadata* avg_msg_size_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Average Message Size (Bytes)");
for (const auto& host_burst : host_dcn_bursts_.GetBursts()) {
float burst_mean_bw, bw_utilization;
absl::string_view bw_level =
GetBwInfo(false, host_burst, burst_mean_bw, bw_utilization);
XEventMetadata* event_metadata =
plane_builder.GetOrCreateEventMetadata(bw_level);
XEventBuilder event = line.AddEvent(*event_metadata);
event.SetOffsetNs(host_burst.start_timestamp_ns);
event.SetDurationNs(host_burst.end_timestamp_ns -
host_burst.start_timestamp_ns);
event.ParseAndAddStatValue(*bw_stat_metadata,
std::to_string(burst_mean_bw));
event.ParseAndAddStatValue(*bw_util_stat_metadata,
std::to_string(bw_utilization));
event.AddStatValue(*num_msg_stat_metadata, host_burst.num_messages);
event.AddStatValue(*max_overlap_msg_stat_metadata,
host_burst.max_overlapping_messages);
uint32_t avg_message_size =
host_burst.burst_size_bytes / host_burst.num_messages;
event.AddStatValue(*avg_msg_size_stat_metadata, avg_message_size);
}
}
void DcnEventsProcessor::AddUnqualifiedCollectivesToXPlane(
XPlaneBuilder& plane_builder, uint32_t tpu_idx) {
XLineBuilder line =
plane_builder.GetOrCreateLine(LineIdType::kDcnCollectiveTrafficMax);
line.SetNameIfEmpty("Remaining collectives");
line.SetTimestampNs(0);
for (const auto& col_item : tpu_collective_bursts_[tpu_idx]) {
if (col_item.second.ToDisplay()) continue;
for (const auto& col_burst : col_item.second.GetBursts()) {
XEventMetadata* straggler_event_metadata =
plane_builder.GetOrCreateEventMetadata(col_item.first);
uint32_t stragglers_processed = 0;
XStatMetadata* straggler_src_slice_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Source slice");
XStatMetadata* straggler_duration_ns_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Duration ns");
XStatMetadata* straggler_send_time_ns_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Send timestamp ns");
XStatMetadata* straggler_recv_time_ns_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Recv timestamp ns");
for (const auto& straggler : col_burst.stragglers) {
XEventBuilder straggler_event =
line.AddEvent(*straggler_event_metadata);
straggler_event.SetOffsetNs(straggler.end_timestamp_ns - 10000);
straggler_event.SetDurationNs(10000);
straggler_event.AddStatValue(*straggler_src_slice_stat_metadata,
straggler.src_slice_id);
straggler_event.AddStatValue(*straggler_duration_ns_stat_metadata,
straggler.duration_ns);
straggler_event.AddStatValue(
*straggler_send_time_ns_stat_metadata,
straggler.end_timestamp_ns - straggler.duration_ns);
straggler_event.AddStatValue(*straggler_recv_time_ns_stat_metadata,
straggler.end_timestamp_ns);
if (++stragglers_processed >= col_burst.num_messages) break;
}
}
}
}
void DcnEventsProcessor::AddQualifiedCollectivesToXPlane(
XPlaneBuilder& plane_builder, uint32_t tpu_idx) {
uint32_t total_collectives = 0;
for (const auto& col_item : tpu_collective_bursts_[tpu_idx]) {
if (!col_item.second.ToDisplay()) continue;
const std::string& col_name = col_item.first;
XLineBuilder line = plane_builder.GetOrCreateLine(
LineIdType::kDcnCollectiveTraffic + total_collectives++);
line.SetNameIfEmpty(col_name);
line.SetTimestampNs(0);
XStatMetadata* bw_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Bandwidth (GBytes/sec)");
XStatMetadata* bw_util_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Bandwidth Utilization");
XStatMetadata* num_msg_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Total Messages");
XStatMetadata* max_overlap_msg_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Max Overlapping Messages");
XStatMetadata* avg_msg_size_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Average Message Size (Bytes)");
XStatMetadata* straggler_details_metadata =
plane_builder.GetOrCreateStatMetadata("Straggler info:");
XStatMetadata* straggler_src_slice_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Source slice");
XStatMetadata* straggler_duration_ns_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Duration ns");
XStatMetadata* straggler_send_time_ns_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Send timestamp ns");
XStatMetadata* straggler_recv_time_ns_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Recv timestamp ns");
for (const auto& col_burst : col_item.second.GetBursts()) {
float burst_mean_bw, bw_utilization;
absl::string_view bw_level =
GetBwInfo(true, col_burst, burst_mean_bw, bw_utilization);
XEventMetadata* event_metadata =
plane_builder.GetOrCreateEventMetadata(bw_level);
XEventBuilder event = line.AddEvent(*event_metadata);
event.SetOffsetNs(col_burst.start_timestamp_ns);
event.SetDurationNs(col_burst.end_timestamp_ns -
col_burst.start_timestamp_ns);
event.ParseAndAddStatValue(*bw_stat_metadata,
std::to_string(burst_mean_bw));
event.ParseAndAddStatValue(*bw_util_stat_metadata,
std::to_string(bw_utilization));
event.AddStatValue(*num_msg_stat_metadata, col_burst.num_messages);
event.AddStatValue(*max_overlap_msg_stat_metadata,
col_burst.max_overlapping_messages);
event.AddStatValue(*avg_msg_size_stat_metadata,
col_burst.burst_size_bytes / col_burst.num_messages);
XEventMetadata* straggler_event_metadata =
plane_builder.GetOrCreateEventMetadata("Straggler");
uint32_t stragglers_processed = 0;
std::string straggler_details = "Stragglers:\n";
for (const auto& straggler : col_burst.stragglers) {
if (straggler.end_timestamp_ns == col_burst.end_timestamp_ns) {
XEventBuilder straggler_event =
line.AddEvent(*straggler_event_metadata);
straggler_event.SetOffsetNs(straggler.end_timestamp_ns -
straggler.duration_ns);
straggler_event.SetDurationNs(straggler.duration_ns);
straggler_event.AddStatValue(*straggler_src_slice_stat_metadata,
straggler.src_slice_id);
straggler_event.AddStatValue(*straggler_duration_ns_stat_metadata,
straggler.duration_ns);
straggler_event.AddStatValue(
*straggler_send_time_ns_stat_metadata,
straggler.end_timestamp_ns - straggler.duration_ns);
straggler_event.AddStatValue(*straggler_recv_time_ns_stat_metadata,
straggler.end_timestamp_ns);
}
straggler_details +=
" Src slice: " + std::to_string(straggler.src_slice_id) +
" -- Duration (ns): " + std::to_string(straggler.duration_ns) +
" -- [Send Timestamp, Recv Timestamp]: [" +
std::to_string(straggler.end_timestamp_ns - straggler.duration_ns) +
", " + std::to_string(straggler.end_timestamp_ns) + "]\n";
if (++stragglers_processed >= col_burst.num_messages) break;
}
event.AddStatValue(*straggler_details_metadata, straggler_details);
}
}
}
void DcnEventsProcessor::AddTpuCollectiveDcnTrafficToXPlane(
XPlane* device_xplane) {
XPlaneBuilder plane_builder(device_xplane);
auto tpu = tsl::profiler::GetTensorCoreId(plane_builder.Name());
if (!tpu.has_value()) return;
uint32_t tpu_idx = FindTpuIdx(tpu.value());
AddQualifiedCollectivesToXPlane(plane_builder, tpu_idx);
AddUnqualifiedCollectivesToXPlane(plane_builder, tpu_idx);
}
}
} | #include "tensorflow/core/profiler/convert/dcn_analysis.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/profiler/convert/dcn_utils.h"
#include "tsl/profiler/utils/tf_xplane_visitor.h"
#include "tsl/profiler/utils/xplane_schema.h"
namespace tensorflow {
namespace profiler {
namespace {
using tensorflow::profiler::DCN_MESSAGE_INVALID_BAD_KEY;
using tensorflow::profiler::DCN_MESSAGE_INVALID_CLOCK_SKEW;
using tensorflow::profiler::DCN_MESSAGE_VALID;
using tensorflow::profiler::DCN_MESSAGE_VALID_LOOPBACK;
using tensorflow::profiler::XEventBuilder;
using tensorflow::profiler::XEventMetadata;
using tensorflow::profiler::XLineBuilder;
using tensorflow::profiler::XPlane;
using tensorflow::profiler::XPlaneBuilder;
using tensorflow::profiler::XPlaneVisitor;
using tensorflow::profiler::XSpace;
using ::testing::FieldsAre;
using tsl::profiler::kMegaScaleDcnReceive;
using tsl::profiler::kMegaScaleDcnSend;
TEST(DcnAnalysis, SetupMessageInfoTest) {
XSpace space;
XPlane *host_trace = space.add_planes();
XPlaneBuilder host_trace_builder(host_trace);
XEventMetadata *event_metadata_1 =
host_trace_builder.GetOrCreateEventMetadata(1);
event_metadata_1->set_name(std::string(kMegaScaleDcnReceive));
XEventMetadata *event_metadata_2 =
host_trace_builder.GetOrCreateEventMetadata(2);
event_metadata_2->set_name(std::string(kMegaScaleDcnSend));
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace);
DcnEventsProcessor dcn_events_processor( 4,
false);
dcn_events_processor.SetupMessageInfo(plane);
ASSERT_FALSE(dcn_events_processor.HasDcnMessages(kMegaScaleDcnSend));
ASSERT_TRUE(dcn_events_processor.HasDcnMessages(kMegaScaleDcnReceive));
ASSERT_FALSE(dcn_events_processor.HasDcnMessages("Another Message"));
ASSERT_EQ(dcn_events_processor.MegaScaleMessageId(kMegaScaleDcnReceive), 1);
ASSERT_EQ(dcn_events_processor.MegaScaleMessageId(kMegaScaleDcnSend),
std::nullopt);
}
TEST(DcnAnalysis, CreateMessageTestValidMessages) {
XSpace space;
XPlane *host_trace = space.add_planes();
XPlaneBuilder xplane_builder(host_trace);
XEventMetadata *event_metadata_1 = xplane_builder.GetOrCreateEventMetadata(1);
event_metadata_1->set_name(std::string(kMegaScaleDcnReceive));
XLineBuilder xline_builder_0 = xplane_builder.GetOrCreateLine(0);
XLineBuilder xline_builder_1 = xplane_builder.GetOrCreateLine(1);
XEventBuilder event_builder = xline_builder_0.AddEvent(*event_metadata_1);
event_builder.SetOffsetNs(100000);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_label"),
"all-reduce.273_312");
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_slice_id"), 2);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_per_slice_device_id"),
3);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_destination_slice_id"), 1);
event_builder.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
"dcn_destination_per_slice_device_id"),
3);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_chunk"), 0);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_loop_index"), 24);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("duration_us"), 50);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 32768);
event_builder = xline_builder_0.AddEvent(*event_metadata_1);
event_builder.SetOffsetNs(175000);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_label"),
"super-collective.1234");
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_slice_id"), 112);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_per_slice_device_id"),
1);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_destination_slice_id"), 34);
event_builder.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
"dcn_destination_per_slice_device_id"),
2);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_chunk"), 4);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_loop_index"), 0);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("duration_us"), 50);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 1);
event_builder = xline_builder_1.AddEvent(*event_metadata_1);
event_builder.SetOffsetNs(150000);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_label"), "super-collective");
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_slice_id"), 9);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_per_slice_device_id"),
3);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_destination_slice_id"), 0);
event_builder.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
"dcn_destination_per_slice_device_id"),
0);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("duration_us"), 75);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 10);
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace);
DcnEventsProcessor dcn_events_processor(4, false);
dcn_events_processor.SetupMessageInfo(plane);
dcn_events_processor.ProcessReceiveMessages(plane);
ASSERT_EQ(dcn_events_processor.NumReceivedMessages(), 3);
EXPECT_THAT(dcn_events_processor.GetMessage(0),
FieldsAre("all-reduce.273_312",
2, 3, 1, 3,
50000, 100000, 50,
32768, 0, 24,
DCN_MESSAGE_VALID));
EXPECT_THAT(dcn_events_processor.GetMessage(1),
FieldsAre("super-collective.1234",
112, 1, 34, 2,
125000, 175000, 50,
1, 4, 0,
DCN_MESSAGE_VALID));
EXPECT_THAT(
dcn_events_processor.GetMessage(2),
FieldsAre("super-collective",
9, 3, 0, 0,
75000, 150000,
75,
10, -1, -1,
DCN_MESSAGE_VALID));
TimestampMap host_ts_map = dcn_events_processor.HostTsMap();
ASSERT_EQ(host_ts_map.size(), 6);
for (const auto &ts_map_item : host_ts_map) {
ASSERT_EQ(ts_map_item.first, ts_map_item.second->timestamp_ns);
if (ts_map_item.first == 50000) {
ASSERT_EQ(ts_map_item.second->duration_ns, 0);
ASSERT_EQ(ts_map_item.second->message_diff, 1);
ASSERT_EQ(ts_map_item.second->size_diff, 32768);
} else if (ts_map_item.first == 125000) {
ASSERT_EQ(ts_map_item.second->duration_ns, 0);
ASSERT_EQ(ts_map_item.second->message_diff, 1);
ASSERT_EQ(ts_map_item.second->size_diff, 1);
} else if (ts_map_item.first == 75000) {
ASSERT_EQ(ts_map_item.second->duration_ns, 0);
ASSERT_EQ(ts_map_item.second->message_diff, 1);
ASSERT_EQ(ts_map_item.second->size_diff, 10);
} else if (ts_map_item.first == 100000) {
ASSERT_EQ(ts_map_item.second->duration_ns, 50000);
ASSERT_EQ(ts_map_item.second->message_diff, -1);
ASSERT_EQ(ts_map_item.second->size_diff, -32768);
} else if (ts_map_item.first == 175000) {
ASSERT_EQ(ts_map_item.second->duration_ns, 50000);
ASSERT_EQ(ts_map_item.second->message_diff, -1);
ASSERT_EQ(ts_map_item.second->size_diff, -1);
} else if (ts_map_item.first == 150000) {
ASSERT_EQ(ts_map_item.second->duration_ns, 75000);
ASSERT_EQ(ts_map_item.second->message_diff, -1);
ASSERT_EQ(ts_map_item.second->size_diff, -10);
} else {
FAIL() << "Unexpected timestamp entry.";
}
}
const std::vector<DcnBurst> &host_bursts =
dcn_events_processor.GetHostBursts();
ASSERT_EQ(host_bursts.size(), 1);
ASSERT_EQ(host_bursts[0].num_messages, 3);
ASSERT_EQ(host_bursts[0].start_timestamp_ns, 50000);
ASSERT_EQ(host_bursts[0].end_timestamp_ns, 175000);
ASSERT_EQ(host_bursts[0].burst_size_bytes, 32779);
ASSERT_EQ(host_bursts[0].max_overlapping_messages, 2);
}
TEST(DcnAnalysis, CreateLoopBackMessageTest) {
XSpace space;
XPlane *host_trace = space.add_planes();
XPlaneBuilder xplane_builder(host_trace);
XEventMetadata *event_metadata_1 = xplane_builder.GetOrCreateEventMetadata(1);
event_metadata_1->set_name(std::string(kMegaScaleDcnReceive));
XLineBuilder xline_builder = xplane_builder.GetOrCreateLine(0);
XEventBuilder event_builder = xline_builder.AddEvent(*event_metadata_1);
event_builder.SetOffsetNs(5000000);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_label"), "all-gather.1234");
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_slice_id"), 2);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_per_slice_device_id"),
3);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_destination_slice_id"), 2);
event_builder.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
"dcn_destination_per_slice_device_id"),
1);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_chunk"), 4);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_loop_index"), 40);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("duration_us"), 1000);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 1000);
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace);
DcnEventsProcessor dcn_events_processor(4, false);
dcn_events_processor.SetupMessageInfo(plane);
dcn_events_processor.ProcessReceiveMessages(plane);
ASSERT_EQ(dcn_events_processor.NumReceivedMessages(), 1);
EXPECT_THAT(dcn_events_processor.GetMessage(0),
FieldsAre("all-gather.1234",
2, 3, 2, 1,
4000000, 5000000, 1000,
1000, 4, 40,
DCN_MESSAGE_VALID_LOOPBACK));
}
TEST(DcnAnalysis, CreateZeroDurationMessageTest) {
XSpace space;
XPlane *host_trace = space.add_planes();
XPlaneBuilder xplane_builder(host_trace);
XEventMetadata *event_metadata_1 = xplane_builder.GetOrCreateEventMetadata(1);
event_metadata_1->set_name(std::string(kMegaScaleDcnReceive));
XLineBuilder xline_builder = xplane_builder.GetOrCreateLine(0);
XEventBuilder event_builder = xline_builder.AddEvent(*event_metadata_1);
event_builder.SetOffsetNs(20000);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_label"),
"all-reduce.273_312");
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_slice_id"), 2);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_per_slice_device_id"),
3);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_destination_slice_id"), 1);
event_builder.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
"dcn_destination_per_slice_device_id"),
1);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_chunk"), 0);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_loop_index"), 25);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("duration_us"), 0);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 512);
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace);
DcnEventsProcessor dcn_events_processor(4, false);
dcn_events_processor.SetupMessageInfo(plane);
dcn_events_processor.ProcessReceiveMessages(plane);
EXPECT_THAT(
dcn_events_processor.GetMessage(0),
FieldsAre("all-reduce.273_312",
2, 3, 1, 1,
20000, 20000,
0,
512, 0, 25,
DCN_MESSAGE_INVALID_CLOCK_SKEW));
}
TEST(DcnAnalysis, CreateMissingKeyTest) {
XSpace space;
XPlane *host_trace = space.add_planes();
XPlaneBuilder xplane_builder(host_trace);
XEventMetadata *event_metadata_1 = xplane_builder.GetOrCreateEventMetadata(1);
event_metadata_1->set_name(std::string(kMegaScaleDcnReceive));
XLineBuilder xline_builder = xplane_builder.GetOrCreateLine(0);
XEventBuilder event_builder = xline_builder.AddEvent(*event_metadata_1);
event_builder.SetOffsetNs(50000);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("duration_us"), 10);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 100);
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace);
DcnEventsProcessor dcn_events_processor(4, false);
dcn_events_processor.SetupMessageInfo(plane);
dcn_events_processor.ProcessReceiveMessages(plane);
EXPECT_THAT(
dcn_events_processor.GetMessage(0),
FieldsAre("",
-1, -1, -1, -1,
40000, 50000,
10,
100, -1, -1,
DCN_MESSAGE_INVALID_BAD_KEY));
}
}
}
} |
1,469 | cpp | tensorflow/tensorflow | op_stats_to_pod_stats | tensorflow/core/profiler/convert/op_stats_to_pod_stats.cc | tensorflow/core/profiler/convert/op_stats_to_pod_stats_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_CONVERT_OP_STATS_TO_POD_STATS_H_
#define TENSORFLOW_CORE_PROFILER_CONVERT_OP_STATS_TO_POD_STATS_H_
#include "tensorflow/core/profiler/protobuf/op_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/pod_stats.pb.h"
namespace tensorflow {
namespace profiler {
PodStatsDatabase ConvertOpStatsToPodStats(const OpStats& op_stats);
}
}
#endif
#include "tensorflow/core/profiler/convert/op_stats_to_pod_stats.h"
#include <algorithm>
#include <utility>
#include <vector>
#include "google/protobuf/any.pb.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
#include "tensorflow/core/profiler/utils/diagnostics.h"
#include "tensorflow/core/profiler/utils/event_span.h"
#include "tensorflow/core/profiler/utils/math_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
PodStatsRecord CreatePodStatsRecord(absl::string_view host_name,
const StepInfoResult& step_info) {
PodStatsRecord record;
GenericStepBreakdown generic;
bool success = step_info.step_breakdown().UnpackTo(&generic);
DCHECK(success);
record.set_host_name(string(host_name));
record.set_step_num(step_info.step_num());
record.set_total_duration_us(
tsl::profiler::PicoToMicro(step_info.duration_ps()));
auto& step_breakdown_map = *record.mutable_step_breakdown_us();
std::vector<std::pair<uint64, absl::string_view>> metrics;
auto add_event = [&](GenericEventType type,
std::initializer_list<EventType> event_list) {
uint64 ps = 0;
for (const auto& event_type : event_list) {
ps += gtl::FindWithDefault(generic.type_ps(), event_type, 0);
}
step_breakdown_map[type] = tsl::profiler::PicoToMicro(ps);
metrics.emplace_back(ps, GetGenericEventTypeStr(type));
};
add_event(kDeviceCompute, {DEVICE_COMPUTE_32, DEVICE_COMPUTE_16});
add_event(kDeviceToDevice, {DEVICE_TO_DEVICE, DEVICE_WAIT_DEVICE});
add_event(kDeviceCollectives, {DEVICE_COLLECTIVES});
add_event(kHostCompute, {HOST_COMPUTE});
add_event(kHostPrepare, {HOST_PREPARE});
add_event(kInput, {HOST_WAIT_INPUT, HOST_TO_DEVICE, DEVICE_WAIT_HOST});
add_event(kOutput, {DEVICE_TO_HOST});
add_event(kCompile, {HOST_COMPILE});
add_event(kAllOthers, {UNKNOWN_TIME});
std::sort(metrics.begin(), metrics.end());
record.set_bottleneck(metrics.back().second.data(),
metrics.back().second.size());
return record;
}
}
PodStatsDatabase ConvertOpStatsToPodStats(const OpStats& op_stats) {
PodStatsDatabase pod_stats_db;
const auto& core_id_map = op_stats.core_id_to_details();
for (int i = GenericEventType::kFirstGenericEventType;
i <= GenericEventType::kLastGenericEventType; i++) {
auto& event = *pod_stats_db.add_step_breakdown_events();
event.set_id(i);
absl::string_view type_str =
GetGenericEventTypeStr(static_cast<GenericEventType>(i));
event.set_name(type_str.data(), type_str.size());
}
for (const auto& step_sequence : op_stats.step_db().step_sequence()) {
for (const auto& entry : step_sequence.step_info_per_core()) {
if (!core_id_map.contains(entry.first)) {
LOG(WARNING) << "core_id_map does not contain " << entry.first;
continue;
}
const CoreDetails& details = core_id_map.at(entry.first);
*pod_stats_db.add_pod_stats_record() =
CreatePodStatsRecord(details.hostname(), entry.second);
}
}
PopulateStepDiagnostics(op_stats, pod_stats_db.mutable_diagnostics());
return pod_stats_db;
}
}
} | #include "tensorflow/core/profiler/convert/op_stats_to_pod_stats.h"
#include "google/protobuf/any.pb.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/diagnostics.pb.h"
#include "tensorflow/core/profiler/protobuf/op_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
#include "tensorflow/core/profiler/utils/diagnostics.h"
#include "tensorflow/core/profiler/utils/event_span.h"
#include "tensorflow/core/profiler/utils/math_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
const double kMaxError = 1e-6;
constexpr int kStepNum = 2;
constexpr int kCoreId = 1001;
constexpr int kStepTimePs = 1000;
constexpr int kHostComputePs = 50;
constexpr int kHostCompilePs = 50;
constexpr int kHostToHostPs = 50;
constexpr int kHostToDevicePs = 50;
constexpr int kHostPreparePs = 50;
constexpr int kDeviceCollectivePs = 350;
constexpr int kHostWaitInputPs = 50;
constexpr int kDeviceToDevicePs = 50;
constexpr int kDeviceToHostPs = 50;
constexpr int kDeviceCompute32Ps = 50;
constexpr int kDeviceCompute16Ps = 50;
constexpr int kDeviceWaitDevicePs = 50;
constexpr int kDeviceWaitHostPs = 50;
constexpr int kUnknownTimePs = 50;
static constexpr char kHostname[] = "host:123";
void CreateOpStats(OpStats* op_stats) {
PerCoreStepInfo* info = op_stats->mutable_step_db()->add_step_sequence();
info->set_step_num(kStepNum);
StepInfoResult& step_info = (*info->mutable_step_info_per_core())[kCoreId];
step_info.set_step_num(kStepNum);
step_info.set_duration_ps(kStepTimePs);
GenericStepBreakdown breakdown;
auto& type_ps = *breakdown.mutable_type_ps();
type_ps[HOST_COMPUTE] = kHostComputePs;
type_ps[HOST_COMPILE] = kHostCompilePs;
type_ps[HOST_TO_HOST] = kHostToHostPs;
type_ps[HOST_TO_DEVICE] = kHostToDevicePs;
type_ps[HOST_PREPARE] = kHostPreparePs;
type_ps[DEVICE_COLLECTIVES] = kDeviceCollectivePs;
type_ps[HOST_WAIT_INPUT] = kHostWaitInputPs;
type_ps[DEVICE_TO_DEVICE] = kDeviceToDevicePs;
type_ps[DEVICE_TO_HOST] = kDeviceToHostPs;
type_ps[DEVICE_COMPUTE_32] = kDeviceCompute32Ps;
type_ps[DEVICE_COMPUTE_16] = kDeviceCompute16Ps;
type_ps[DEVICE_WAIT_DEVICE] = kDeviceWaitDevicePs;
type_ps[DEVICE_WAIT_HOST] = kDeviceWaitHostPs;
type_ps[UNKNOWN_TIME] = kUnknownTimePs;
step_info.mutable_step_breakdown()->PackFrom(breakdown);
CoreDetails& details = (*op_stats->mutable_core_id_to_details())[kCoreId];
details.set_hostname(kHostname);
}
TEST(OpStatsToPodStats, GpuPodStats) {
OpStats op_stats;
CreateOpStats(&op_stats);
PodStatsDatabase pod_stats_db = ConvertOpStatsToPodStats(op_stats);
EXPECT_EQ(1, pod_stats_db.pod_stats_record_size());
const PodStatsRecord& record = pod_stats_db.pod_stats_record(0);
EXPECT_EQ(kStepNum, record.step_num());
EXPECT_EQ(kHostname, record.host_name());
EXPECT_NEAR(tsl::profiler::PicoToMicro(kStepTimePs),
record.total_duration_us(), kMaxError);
const auto& breakdown = record.step_breakdown_us();
EXPECT_NEAR(
tsl::profiler::PicoToMicro(kDeviceCompute32Ps + kDeviceCompute16Ps),
breakdown.at(kDeviceCompute), kMaxError);
EXPECT_NEAR(
tsl::profiler::PicoToMicro(kDeviceToDevicePs + kDeviceWaitDevicePs),
breakdown.at(kDeviceToDevice), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kDeviceCollectivePs),
breakdown.at(kDeviceCollectives), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kHostComputePs),
breakdown.at(kHostCompute), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kHostPreparePs),
breakdown.at(kHostPrepare), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kHostWaitInputPs + kHostToDevicePs +
kDeviceWaitHostPs),
breakdown.at(kInput), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kDeviceToHostPs),
breakdown.at(kOutput), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kHostCompilePs),
breakdown.at(kCompile), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kUnknownTimePs),
breakdown.at(kAllOthers), kMaxError);
EXPECT_EQ(GetGenericEventTypeStr(kDeviceCollectives), record.bottleneck());
}
TEST(OpStatsToPodStats, Diagnostics) {
OpStats op_stats;
op_stats.mutable_step_db()->set_use_incomplete_step(true);
PodStatsDatabase pod_stats_db = ConvertOpStatsToPodStats(op_stats);
EXPECT_EQ(1, pod_stats_db.diagnostics().warnings_size());
EXPECT_EQ(kErrorIncompleteStep, pod_stats_db.diagnostics().warnings(0));
}
}
}
} |
1,470 | cpp | tensorflow/tensorflow | xplane_to_dcn_collective_stats | tensorflow/core/profiler/convert/xplane_to_dcn_collective_stats.cc | tensorflow/core/profiler/convert/xplane_to_dcn_collective_stats_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_CONVERT_XPLANE_TO_DCN_COLLECTIVE_STATS_H_
#define TENSORFLOW_CORE_PROFILER_CONVERT_XPLANE_TO_DCN_COLLECTIVE_STATS_H_
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/profiler/convert/repository.h"
#include "tensorflow/core/profiler/protobuf/dcn_slack_analysis.pb.h"
namespace tensorflow {
namespace profiler {
absl::StatusOr<bool> ConvertMultiXSpaceToDcnCollectiveStats(
const SessionSnapshot& session_snapshot);
absl::StatusOr<bool> HasDcnCollectiveStatsInMultiXSpace(
const SessionSnapshot& session_snapshot);
absl::StatusOr<DcnSlackAnalysis> GetDcnSlackAnalysisByHostName(
const SessionSnapshot& session_snapshot, std::string hostname);
}
}
#endif
#include "tensorflow/core/profiler/convert/xplane_to_dcn_collective_stats.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/strings/match.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/profiler/convert/dcn_slack_analysis_combiner.h"
#include "tensorflow/core/profiler/convert/repository.h"
#include "tensorflow/core/profiler/convert/xspace_to_dcn_slack_analysis.h"
#include "tensorflow/core/profiler/protobuf/dcn_slack_analysis.pb.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_utils.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
bool HasDcnCollectiveStatsInXSpace(const XSpace& xspace) {
if (const tensorflow::profiler::XPlane* xplane = FindPlaneWithName(
xspace, tensorflow::profiler::kHostThreadsPlaneName);
xplane != nullptr) {
for (const auto& [_, metadata] : xplane->event_metadata()) {
if (absl::StartsWith(metadata.name(), "MegaScale:")) {
return true;
}
}
}
return false;
}
absl::StatusOr<bool> GetDcnCollectiveStatsFromMultiXSpaceAndSaveToFile(
const SessionSnapshot& session_snapshot) {
DcnSlackAnalysisCombiner combiner;
for (int idx = 0; idx < session_snapshot.XSpaceSize(); idx++) {
std::string hostname = session_snapshot.GetHostname(idx);
TF_ASSIGN_OR_RETURN(std::unique_ptr<XSpace> xspace,
session_snapshot.GetXSpace(idx));
if (!HasDcnCollectiveStatsInXSpace(*xspace)) {
DcnSlackAnalysis dcnSlackAnalysis;
TF_RETURN_IF_ERROR(WriteBinaryProto(session_snapshot,
StoredDataType::DCN_COLLECTIVE_STATS,
kNoHostIdentifier, dcnSlackAnalysis));
return false;
}
DcnSlackAnalysis dcnSlackAnalysis =
ConvertXSpaceToDcnSlackAnalysis(*xspace, nullptr, nullptr);
TF_RETURN_IF_ERROR(WriteBinaryProto(session_snapshot,
StoredDataType::DCN_COLLECTIVE_STATS,
hostname, dcnSlackAnalysis));
combiner.Combine(dcnSlackAnalysis);
}
DcnSlackAnalysis dcnSlackAnalysis = combiner.Finalize();
TF_RETURN_IF_ERROR(WriteBinaryProto(session_snapshot,
StoredDataType::DCN_COLLECTIVE_STATS,
kAllHostsIdentifier, dcnSlackAnalysis));
return true;
}
}
absl::StatusOr<bool> HasDcnCollectiveStatsInMultiXSpace(
const SessionSnapshot& session_snapshot) {
std::pair<bool, std::string> hasCacheFile;
TF_ASSIGN_OR_RETURN(hasCacheFile, session_snapshot.HasCacheFile(
StoredDataType::DCN_COLLECTIVE_STATS));
if (!hasCacheFile.first) {
for (int idx = 0; idx < session_snapshot.XSpaceSize(); idx++) {
std::string hostname = session_snapshot.GetHostname(idx);
TF_ASSIGN_OR_RETURN(std::unique_ptr<XSpace> xspace,
session_snapshot.GetXSpace(idx));
if (HasDcnCollectiveStatsInXSpace(*xspace)) {
return true;
}
}
return false;
}
if (hasCacheFile.second.empty()) {
return false;
} else {
return true;
}
}
absl::StatusOr<bool> ConvertMultiXSpaceToDcnCollectiveStats(
const SessionSnapshot& session_snapshot) {
std::pair<bool, std::string> hasCacheFile;
TF_ASSIGN_OR_RETURN(hasCacheFile, session_snapshot.HasCacheFile(
StoredDataType::DCN_COLLECTIVE_STATS));
if (!hasCacheFile.first) {
return GetDcnCollectiveStatsFromMultiXSpaceAndSaveToFile(session_snapshot);
}
if (hasCacheFile.second.empty()) {
return false;
} else {
return true;
}
}
absl::StatusOr<DcnSlackAnalysis> GetDcnSlackAnalysisByHostName(
const SessionSnapshot& session_snapshot, const std::string hostname) {
TF_ASSIGN_OR_RETURN(bool hasDcnCollectiveStats,
ConvertMultiXSpaceToDcnCollectiveStats(session_snapshot));
DcnSlackAnalysis dcnSlackAnalysis;
if (hasDcnCollectiveStats) {
TF_RETURN_IF_ERROR(ReadBinaryProto(session_snapshot,
StoredDataType::DCN_COLLECTIVE_STATS,
hostname, &dcnSlackAnalysis));
}
return dcnSlackAnalysis;
}
}
} | #include "tensorflow/core/profiler/convert/xplane_to_dcn_collective_stats.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/file_system.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/profiler/convert/repository.h"
#include "tensorflow/core/profiler/protobuf/dcn_slack_analysis.pb.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_utils.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
DcnSlackAnalysis CreateDcnSlackAnalysisProto() {
DcnSlackAnalysis dcn_slack_analysis;
DcnSlackSummary* dcn_slack_summary =
dcn_slack_analysis.add_dcn_slack_summary();
dcn_slack_summary->set_rendezvous("collective");
dcn_slack_summary->set_recv_op_name("recv-done");
dcn_slack_summary->set_send_op_name("send");
dcn_slack_summary->set_slack_us(2);
dcn_slack_summary->set_observed_duration_us(12);
dcn_slack_summary->set_stall_duration_us(5);
dcn_slack_summary->set_occurrences(4);
dcn_slack_summary->set_bytes_transmitted_over_network(819200);
return dcn_slack_analysis;
}
SessionSnapshot CreateSessionSnapshot(bool create_cache_file,
bool has_dcn_collective_stats) {
std::string test_name =
::testing::UnitTest::GetInstance()->current_test_info()->name();
std::string path = absl::StrCat("ram:
std::unique_ptr<WritableFile> xplane_file;
std::vector<std::string> paths = {absl::StrCat(path, "hostname.xplane.pb")};
auto xspace = std::make_unique<XSpace>();
XPlane* xplane = FindOrAddMutablePlaneWithName(xspace.get(), "/host:CPU");
if (has_dcn_collective_stats) {
XPlaneBuilder xplane_builder(xplane);
xplane_builder.GetOrCreateEventMetadata("MegaScale:");
}
if (create_cache_file) {
if (has_dcn_collective_stats) {
tensorflow::Env::Default()
->NewAppendableFile(
absl::StrCat(path, "hostname.dcn_collective_stats.pb"),
&xplane_file)
.IgnoreError();
tensorflow::Env::Default()
->NewAppendableFile(
absl::StrCat(path, "ALL_HOSTS.dcn_collective_stats.pb"),
&xplane_file)
.IgnoreError();
} else {
tensorflow::Env::Default()
->NewAppendableFile(
absl::StrCat(path, "NO_HOST.dcn_collective_stats.pb"),
&xplane_file)
.IgnoreError();
}
}
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(xspace));
absl::StatusOr<SessionSnapshot> session_snapshot_status =
SessionSnapshot::Create(paths, std::move(xspaces));
TF_CHECK_OK(session_snapshot_status.status());
SessionSnapshot session_snapshot = std::move(session_snapshot_status.value());
if (has_dcn_collective_stats) {
DcnSlackAnalysis dcn_slack_analysis = CreateDcnSlackAnalysisProto();
TF_CHECK_OK(session_snapshot.WriteBinaryProto(
DCN_COLLECTIVE_STATS, "hostname", dcn_slack_analysis));
TF_CHECK_OK(session_snapshot.WriteBinaryProto(
DCN_COLLECTIVE_STATS, kAllHostsIdentifier, dcn_slack_analysis));
}
return session_snapshot;
}
TEST(ConvertXplaneToDcnCollectiveStats,
HasAllHostsDcnCollectiveStatsCacheFile) {
SessionSnapshot session_snapshot = CreateSessionSnapshot(true, true);
absl::StatusOr<bool> status =
HasDcnCollectiveStatsInMultiXSpace(session_snapshot);
EXPECT_EQ(status.value(), true);
}
TEST(ConvertXplaneToDcnCollectiveStats, HasNoHostDcnCollectiveStatsCacheFile) {
SessionSnapshot session_snapshot = CreateSessionSnapshot(true, false);
absl::StatusOr<bool> status =
HasDcnCollectiveStatsInMultiXSpace(session_snapshot);
EXPECT_EQ(status.value(), false);
}
TEST(ConvertXplaneToDcnCollectiveStats,
NoCacheFileButTraceHasDcnCollectiveStats) {
SessionSnapshot session_snapshot = CreateSessionSnapshot(false, true);
absl::StatusOr<bool> status =
HasDcnCollectiveStatsInMultiXSpace(session_snapshot);
EXPECT_EQ(status.value(), true);
}
TEST(ConvertXplaneToDcnCollectiveStats,
NoCacheFileNoDcnCollectiveStatsPresent) {
SessionSnapshot session_snapshot = CreateSessionSnapshot(false, false);
absl::StatusOr<bool> status =
HasDcnCollectiveStatsInMultiXSpace(session_snapshot);
EXPECT_EQ(status.value(), false);
}
TEST(ConvertXplaneToDcnCollectiveStats,
ConvertXSpaceToDcnCollectiveStatsWhenStatsPresent) {
SessionSnapshot session_snapshot = CreateSessionSnapshot(false, true);
absl::StatusOr<bool> status =
ConvertMultiXSpaceToDcnCollectiveStats(session_snapshot);
absl::StatusOr<std::optional<std::string>> all_hosts_filepath =
session_snapshot.GetHostDataFilePath(StoredDataType::DCN_COLLECTIVE_STATS,
kAllHostsIdentifier);
absl::StatusOr<std::optional<std::string>> host_filepath =
session_snapshot.GetHostDataFilePath(StoredDataType::DCN_COLLECTIVE_STATS,
"hostname");
EXPECT_EQ(status.value(), true);
TF_EXPECT_OK(all_hosts_filepath.status());
EXPECT_TRUE(all_hosts_filepath.value().has_value());
EXPECT_FALSE(all_hosts_filepath.value().value().empty());
TF_EXPECT_OK(host_filepath.status());
EXPECT_TRUE(host_filepath.value().has_value());
EXPECT_FALSE(host_filepath.value().value().empty());
}
TEST(ConvertXplaneToDcnCollectiveStats,
ConvertXSpaceToDcnCollectiveStatsWhenStatsNotPresent) {
SessionSnapshot session_snapshot = CreateSessionSnapshot(false, false);
absl::StatusOr<bool> status =
ConvertMultiXSpaceToDcnCollectiveStats(session_snapshot);
absl::StatusOr<std::optional<std::string>> filepath =
session_snapshot.GetHostDataFilePath(StoredDataType::DCN_COLLECTIVE_STATS,
kNoHostIdentifier);
EXPECT_EQ(status.value(), false);
TF_EXPECT_OK(filepath.status());
EXPECT_TRUE(filepath.value().has_value());
EXPECT_FALSE(filepath.value().value().empty());
}
TEST(ConvertXplaneToDcnCollectiveStats,
GetHostDcnSlackAnalysisWhenStatsNotPresent) {
SessionSnapshot session_snapshot = CreateSessionSnapshot(false, false);
absl::StatusOr<DcnSlackAnalysis> host_dcn_slack_analysis =
GetDcnSlackAnalysisByHostName(session_snapshot, "hostname");
TF_EXPECT_OK(host_dcn_slack_analysis.status());
EXPECT_EQ(host_dcn_slack_analysis.value().dcn_slack_summary_size(), 0);
}
TEST(ConvertXplaneToDcnCollectiveStats,
GetHostDcnSlackAnalysisWhenStatsPresent) {
SessionSnapshot session_snapshot = CreateSessionSnapshot(true, true);
absl::StatusOr<DcnSlackAnalysis> host_dcn_slack_analysis =
GetDcnSlackAnalysisByHostName(session_snapshot, "hostname");
TF_EXPECT_OK(host_dcn_slack_analysis.status());
EXPECT_EQ(host_dcn_slack_analysis.value().dcn_slack_summary_size(), 1);
}
}
}
} |
1,471 | cpp | tensorflow/tensorflow | op_stats_to_pod_viewer | tensorflow/core/profiler/convert/op_stats_to_pod_viewer.cc | tensorflow/core/profiler/convert/op_stats_to_pod_viewer_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_CONVERT_OP_STATS_TO_POD_VIEWER_H_
#define TENSORFLOW_CORE_PROFILER_CONVERT_OP_STATS_TO_POD_VIEWER_H_
#include "tensorflow/core/profiler/protobuf/op_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/pod_viewer.pb.h"
namespace tensorflow {
namespace profiler {
PodViewerDatabase ConvertOpStatsToPodViewer(const OpStats& op_stats);
}
}
#endif
#include "tensorflow/core/profiler/convert/op_stats_to_pod_viewer.h"
#include <utility>
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/profiler/convert/op_stats_to_pod_stats.h"
#include "tensorflow/core/profiler/protobuf/pod_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
#include "tensorflow/core/profiler/utils/diagnostics.h"
namespace tensorflow {
namespace profiler {
namespace {
PodStatsSequence ConvertOpStatsToPodStatsSequence(const OpStats& op_stats,
PodStatsDatabase pod_stats) {
PodStatsSequence result_db;
int i = 0;
for (const auto& step_sequence : op_stats.step_db().step_sequence()) {
PodStatsMap* pod_stats_map = result_db.add_pod_stats_map();
pod_stats_map->set_step_num(step_sequence.step_num());
for (const auto& entry : step_sequence.step_info_per_core()) {
PodStatsRecord& record =
(*pod_stats_map->mutable_pod_stats_per_core())[entry.first];
DCHECK_LE(i, pod_stats.pod_stats_record_size());
record = std::move(*pod_stats.mutable_pod_stats_record(i++));
}
}
return result_db;
}
}
PodViewerDatabase ConvertOpStatsToPodViewer(const OpStats& op_stats) {
PodViewerDatabase database;
database.set_device_type(op_stats.run_environment().device_type());
PodStatsDatabase pod_stats = ConvertOpStatsToPodStats(op_stats);
database.mutable_step_breakdown_events()->Swap(
pod_stats.mutable_step_breakdown_events());
*database.mutable_pod_stats_sequence() =
ConvertOpStatsToPodStatsSequence(op_stats, std::move(pod_stats));
PopulateStepDiagnostics(op_stats, database.mutable_diagnostics());
return database;
}
}
} | #include "tensorflow/core/profiler/convert/op_stats_to_pod_viewer.h"
#include "google/protobuf/any.pb.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/diagnostics.pb.h"
#include "tensorflow/core/profiler/protobuf/op_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/pod_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
#include "tensorflow/core/profiler/utils/diagnostics.h"
#include "tensorflow/core/profiler/utils/event_span.h"
#include "tensorflow/core/profiler/utils/math_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
const double kMaxError = 1e-6;
constexpr int kStepNum = 2;
constexpr int kCoreId = 1001;
constexpr int kStepTimePs = 1000;
constexpr int kHostComputePs = 50;
constexpr int kHostCompilePs = 50;
constexpr int kHostToHostPs = 50;
constexpr int kHostToDevicePs = 50;
constexpr int kHostPreparePs = 50;
constexpr int kDeviceCollectivePs = 350;
constexpr int kHostWaitInputPs = 50;
constexpr int kDeviceToDevicePs = 50;
constexpr int kDeviceToHostPs = 50;
constexpr int kDeviceCompute32Ps = 50;
constexpr int kDeviceCompute16Ps = 50;
constexpr int kDeviceWaitDevicePs = 50;
constexpr int kDeviceWaitHostPs = 50;
constexpr int kUnknownTimePs = 50;
static constexpr char kHostname[] = "host:123";
void CreateOpStats(OpStats* op_stats) {
PerCoreStepInfo* info = op_stats->mutable_step_db()->add_step_sequence();
info->set_step_num(kStepNum);
StepInfoResult& step_info = (*info->mutable_step_info_per_core())[kCoreId];
step_info.set_step_num(kStepNum);
step_info.set_duration_ps(kStepTimePs);
GenericStepBreakdown breakdown;
auto& type_ps = *breakdown.mutable_type_ps();
type_ps[HOST_COMPUTE] = kHostComputePs;
type_ps[HOST_COMPILE] = kHostCompilePs;
type_ps[HOST_TO_HOST] = kHostToHostPs;
type_ps[HOST_TO_DEVICE] = kHostToDevicePs;
type_ps[HOST_PREPARE] = kHostPreparePs;
type_ps[DEVICE_COLLECTIVES] = kDeviceCollectivePs;
type_ps[HOST_WAIT_INPUT] = kHostWaitInputPs;
type_ps[DEVICE_TO_DEVICE] = kDeviceToDevicePs;
type_ps[DEVICE_TO_HOST] = kDeviceToHostPs;
type_ps[DEVICE_COMPUTE_32] = kDeviceCompute32Ps;
type_ps[DEVICE_COMPUTE_16] = kDeviceCompute16Ps;
type_ps[DEVICE_WAIT_DEVICE] = kDeviceWaitDevicePs;
type_ps[DEVICE_WAIT_HOST] = kDeviceWaitHostPs;
type_ps[UNKNOWN_TIME] = kUnknownTimePs;
step_info.mutable_step_breakdown()->PackFrom(breakdown);
CoreDetails& details = (*op_stats->mutable_core_id_to_details())[kCoreId];
details.set_hostname(kHostname);
}
TEST(OpStatsToPodViewer, GpuPodViewer) {
OpStats op_stats;
CreateOpStats(&op_stats);
PodViewerDatabase pod_viewer_db = ConvertOpStatsToPodViewer(op_stats);
EXPECT_EQ(1, pod_viewer_db.pod_stats_sequence().pod_stats_map_size());
const PodStatsMap& pod_stats_map =
pod_viewer_db.pod_stats_sequence().pod_stats_map(0);
EXPECT_EQ(kStepNum, pod_stats_map.step_num());
const PodStatsRecord& record = pod_stats_map.pod_stats_per_core().at(kCoreId);
EXPECT_EQ(kStepNum, record.step_num());
EXPECT_EQ(kHostname, record.host_name());
EXPECT_NEAR(tsl::profiler::PicoToMicro(kStepTimePs),
record.total_duration_us(), kMaxError);
const auto& breakdown = record.step_breakdown_us();
EXPECT_NEAR(
tsl::profiler::PicoToMicro(kDeviceCompute32Ps + kDeviceCompute16Ps),
breakdown.at(kDeviceCompute), kMaxError);
EXPECT_NEAR(
tsl::profiler::PicoToMicro(kDeviceToDevicePs + kDeviceWaitDevicePs),
breakdown.at(kDeviceToDevice), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kDeviceCollectivePs),
breakdown.at(kDeviceCollectives), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kHostComputePs),
breakdown.at(kHostCompute), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kHostPreparePs),
breakdown.at(kHostPrepare), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kHostWaitInputPs + kHostToDevicePs +
kDeviceWaitHostPs),
breakdown.at(kInput), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kDeviceToHostPs),
breakdown.at(kOutput), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kHostCompilePs),
breakdown.at(kCompile), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kUnknownTimePs),
breakdown.at(kAllOthers), kMaxError);
EXPECT_EQ(GetGenericEventTypeStr(kDeviceCollectives), record.bottleneck());
}
TEST(OpStatsToPodViewer, Diagnostics) {
OpStats op_stats;
op_stats.mutable_step_db()->set_use_incomplete_step(true);
PodViewerDatabase pod_viewer_db = ConvertOpStatsToPodViewer(op_stats);
EXPECT_EQ(1, pod_viewer_db.diagnostics().warnings_size());
EXPECT_EQ(kErrorIncompleteStep, pod_viewer_db.diagnostics().warnings(0));
}
TEST(OpStatsToPodViewer, DeviceType) {
OpStats op_stats;
op_stats.mutable_run_environment()->set_device_type("GPU");
PodViewerDatabase pod_viewer_db = ConvertOpStatsToPodViewer(op_stats);
EXPECT_EQ("GPU", pod_viewer_db.device_type());
}
}
}
} |
1,472 | cpp | tensorflow/tensorflow | op_stats_to_tf_stats | tensorflow/core/profiler/convert/op_stats_to_tf_stats.cc | tensorflow/core/profiler/convert/op_stats_to_tf_stats_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_CONVERT_OP_STATS_TO_TF_STATS_H_
#define TENSORFLOW_CORE_PROFILER_CONVERT_OP_STATS_TO_TF_STATS_H_
#include "tensorflow/core/profiler/protobuf/op_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/tf_stats.pb.h"
namespace tensorflow {
namespace profiler {
TfStatsDatabase ConvertOpStatsToTfStats(const OpStats& op_stats);
}
}
#endif
#include "tensorflow/core/profiler/convert/op_stats_to_tf_stats.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/convert/op_metrics_to_record.h"
#include "tensorflow/core/profiler/protobuf/op_metrics.pb.h"
#include "tensorflow/core/profiler/protobuf/op_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/tf_stats.pb.h"
#include "tensorflow/core/profiler/utils/kernel_stats_utils.h"
#include "tensorflow/core/profiler/utils/math_utils.h"
#include "tensorflow/core/profiler/utils/op_metrics_db_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
const int kMaxNumOfOps = 500;
TfStatsRecord ConvertOpMetricsToTfStatsRecord(
bool on_device, const OpMetrics& metrics,
double ridge_point_operational_intensity) {
TfStatsRecord record;
record.set_host_or_device(on_device ? "Device" : "Host");
record.set_is_eager(metrics.is_eager());
record.set_op_type(metrics.category());
record.set_op_name(metrics.name());
SetExecutionTimes(metrics, &record);
SetRooflineMetrics(metrics, ridge_point_operational_intensity, &record);
return record;
}
TfStatsTable GenerateTfStatsTable(
const OpMetricsDb& host_tf_metrics_db,
const OpMetricsDb& device_tf_metrics_db,
const KernelStatsByOpName& kernel_stats_by_op_name, double ridge_point,
bool exclude_idle) {
TfStatsTable tf_stats_table;
TfStatsRecord sentinel;
sentinel.set_rank(0);
sentinel.set_device_cumulative_total_self_time_as_fraction(0.0);
sentinel.set_host_cumulative_total_self_time_as_fraction(0.0);
const TfStatsRecord* prev_record = &sentinel;
uint64 total_device_time_ps = TotalTimePs(device_tf_metrics_db, exclude_idle);
double total_device_time_us =
tsl::profiler::PicoToMicro(total_device_time_ps);
for (const OpMetrics* metrics :
SortedOpMetricsDb(device_tf_metrics_db, kMaxNumOfOps)) {
if (exclude_idle && IsIdleOp(*metrics)) continue;
TfStatsRecord* record = tf_stats_table.add_tf_stats_record();
*record = ConvertOpMetricsToTfStatsRecord(
true, *metrics, ridge_point);
auto iter = kernel_stats_by_op_name.find(record->op_name());
if (iter != kernel_stats_by_op_name.end()) {
record->set_gpu_tensorcore_utilization(
tsl::profiler::SafeDivide(iter->second.tensor_core_duration_ns,
iter->second.total_duration_ns));
} else {
record->set_gpu_tensorcore_utilization(0.0);
}
SetRankAndDeviceTimeFractions(total_device_time_us, *prev_record, record);
prev_record = record;
}
uint64 total_host_time_ps = TotalTimePs(host_tf_metrics_db, exclude_idle);
double total_host_time_us = tsl::profiler::PicoToMicro(total_host_time_ps);
for (const OpMetrics* metrics : tensorflow::profiler::SortedOpMetricsDb(
host_tf_metrics_db, kMaxNumOfOps)) {
if (exclude_idle && IsIdleOp(*metrics)) continue;
TfStatsRecord* record = tf_stats_table.add_tf_stats_record();
*record = ConvertOpMetricsToTfStatsRecord(
false, *metrics, ridge_point);
record->set_gpu_tensorcore_utilization(0.0);
SetRankAndHostTimeFractions(total_host_time_us, *prev_record, record);
prev_record = record;
}
return tf_stats_table;
}
}
TfStatsDatabase ConvertOpStatsToTfStats(const OpStats& op_stats) {
const OpMetricsDb& host_tf_metrics_db = op_stats.host_op_metrics_db();
OpMetricsDb device_tf_metrics_db =
CreateTfMetricsDbFromDeviceOpMetricsDb(op_stats.device_op_metrics_db());
double ridge_point = op_stats.perf_env().ridge_point();
KernelStatsByOpName kernel_stats_by_op_name =
GroupKernelReportsByOpName(op_stats.kernel_stats_db());
TfStatsDatabase tf_stats_db;
*tf_stats_db.mutable_with_idle() = GenerateTfStatsTable(
host_tf_metrics_db, device_tf_metrics_db, kernel_stats_by_op_name,
ridge_point, false);
*tf_stats_db.mutable_without_idle() = GenerateTfStatsTable(
host_tf_metrics_db, device_tf_metrics_db, kernel_stats_by_op_name,
ridge_point, true);
tf_stats_db.set_device_type(op_stats.run_environment().device_type());
return tf_stats_db;
}
}
} | #include "tensorflow/core/profiler/convert/op_stats_to_tf_stats.h"
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/convert/xplane_to_op_stats.h"
#include "tensorflow/core/profiler/protobuf/op_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/tf_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/math_utils.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_test_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
XEventBuilder AddTensorFlowOpEvent(std::string&& tf_op_fullname,
int64_t start_timestamp_ns,
int64_t duration_ns, bool on_device,
absl::string_view kernel_name,
XPlaneBuilder* plane, XLineBuilder* line) {
absl::string_view name = on_device ? kernel_name : tf_op_fullname;
XEventBuilder event = line->AddEvent(*plane->GetOrCreateEventMetadata(name));
event.SetTimestampNs(start_timestamp_ns);
event.SetDurationNs(duration_ns);
if (!on_device) return event;
event.AddStatValue(
*plane->GetOrCreateStatMetadata(GetStatTypeStr(StatType::kTfOp)),
*plane->GetOrCreateStatMetadata(std::move(tf_op_fullname)));
return event;
}
void AddTensorFlowOpEventWithKernelDetails(std::string&& tf_op_fullname,
int64_t start_timestamp_ns,
int64_t duration_ns, bool on_device,
absl::string_view kernel_name,
absl::string_view kernel_details,
XPlaneBuilder* plane,
XLineBuilder* line) {
XEventBuilder event =
AddTensorFlowOpEvent(std::move(tf_op_fullname), start_timestamp_ns,
duration_ns, on_device, kernel_name, plane, line);
if (!on_device) return;
event.ParseAndAddStatValue(*plane->GetOrCreateStatMetadata("kernel_details"),
kernel_details);
}
TEST(OpStatsToTfStats, GpuTfStats) {
static constexpr char kTfOp1[] = "TfOp1";
static constexpr char kTfOp2[] = "TfOp2";
static constexpr char kTfOp3[] = "Conv2D";
static constexpr char kKernel1[] = "kernel1";
static constexpr char kKernel2[] = "kernel2";
static constexpr char kKernel3[] = "kernel3";
static constexpr char kKernel4[] = "volta_fp16_s884gemm";
static constexpr char kKernel5[] = "kernel5";
constexpr int64_t kKernel1StartNs = 100000;
constexpr int64_t kKernel1DurationNs = 8000;
constexpr int64_t kKernel2StartNs = 110000;
constexpr int64_t kKernel2DurationNs = 10000;
constexpr int64_t kKernel3StartNs = 120000;
constexpr int64_t kKernel3DurationNs = 10000;
constexpr int64_t kKernel4StartNs = 130000;
constexpr int64_t kKernel4DurationNs = 10000;
constexpr int64_t kKernel5StartNs = 150000;
constexpr int64_t kKernel5DurationNs = 10000;
const std::string kKernelDetails = R"MULTI(regs:32
static_shared:0
dynamic_shared:16384
grid:2,1,1
block:32,1,1
occ_pct:100)MULTI";
XSpace space;
XPlaneBuilder device_plane(
GetOrCreateGpuXPlane(&space, 0));
XLineBuilder stream1 = device_plane.GetOrCreateLine(10);
AddTensorFlowOpEvent(absl::StrCat(kTfOp1, ":", kTfOp1), kKernel1StartNs,
kKernel1DurationNs, true, kKernel1,
&device_plane, &stream1);
AddTensorFlowOpEvent(absl::StrCat(kTfOp1, ":", kTfOp1), kKernel2StartNs,
kKernel2DurationNs, true, kKernel2,
&device_plane, &stream1);
XLineBuilder stream2 = device_plane.GetOrCreateLine(20);
AddTensorFlowOpEvent(absl::StrCat(kTfOp1, ":", kTfOp1), kKernel1StartNs,
kKernel1DurationNs, true, kKernel1,
&device_plane, &stream2);
AddTensorFlowOpEvent(absl::StrCat(kTfOp1, ":", kTfOp1), kKernel2StartNs,
kKernel2DurationNs, true, kKernel2,
&device_plane, &stream2);
AddTensorFlowOpEvent(absl::StrCat(kTfOp2, ":", kTfOp2), kKernel3StartNs,
kKernel3DurationNs, true, kKernel3,
&device_plane, &stream2);
AddTensorFlowOpEventWithKernelDetails(
absl::StrCat(kTfOp3, ":", kTfOp3), kKernel4StartNs, kKernel4DurationNs,
true, kKernel4, kKernelDetails, &device_plane, &stream2);
AddTensorFlowOpEventWithKernelDetails(
absl::StrCat(kTfOp3, ":", kTfOp3), kKernel5StartNs, kKernel5DurationNs,
true, kKernel5, kKernelDetails, &device_plane, &stream2);
OpStatsOptions options;
options.generate_kernel_stats_db = true;
options.generate_op_metrics_db = true;
const OpStats op_stats = ConvertXSpaceToOpStats(space, options);
const TfStatsDatabase tf_stats = ConvertOpStatsToTfStats(op_stats);
EXPECT_EQ(tf_stats.device_type(), op_stats.run_environment().device_type());
EXPECT_EQ(4, tf_stats.with_idle().tf_stats_record_size());
const TfStatsRecord& record_0 = tf_stats.with_idle().tf_stats_record(0);
EXPECT_EQ(kTfOp1, record_0.op_name());
EXPECT_EQ(kTfOp1, record_0.op_type());
EXPECT_EQ(2, record_0.occurrences());
EXPECT_EQ(tsl::profiler::NanoToMicro(kKernel1DurationNs) * 2 +
tsl::profiler::NanoToMicro(kKernel2DurationNs) * 2,
record_0.total_self_time_in_us());
const TfStatsRecord& record_1 = tf_stats.with_idle().tf_stats_record(1);
EXPECT_EQ(kTfOp3, record_1.op_name());
EXPECT_EQ(kTfOp3, record_1.op_type());
EXPECT_EQ(1, record_1.occurrences());
EXPECT_EQ(tsl::profiler::NanoToMicro(kKernel4DurationNs) +
tsl::profiler::NanoToMicro(kKernel5DurationNs),
record_1.total_self_time_in_us());
EXPECT_DOUBLE_EQ(0.5, record_1.gpu_tensorcore_utilization());
const TfStatsRecord& record_2 = tf_stats.with_idle().tf_stats_record(2);
EXPECT_EQ(kTfOp2, record_2.op_name());
EXPECT_EQ(kTfOp2, record_2.op_type());
EXPECT_EQ(1, record_2.occurrences());
EXPECT_EQ(tsl::profiler::NanoToMicro(kKernel3DurationNs),
record_2.total_self_time_in_us());
}
}
}
} |
1,473 | cpp | tensorflow/tensorflow | xplane_to_tf_functions | tensorflow/core/profiler/convert/xplane_to_tf_functions.cc | tensorflow/core/profiler/convert/xplane_to_tf_functions_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_CONVERT_XPLANE_TO_TF_FUNCTIONS_H_
#define TENSORFLOW_CORE_PROFILER_CONVERT_XPLANE_TO_TF_FUNCTIONS_H_
#include <string>
#include "tensorflow/core/profiler/protobuf/tf_function.pb.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
namespace tensorflow {
namespace profiler {
TfFunctionDb ConvertHostThreadsXLineToTfFunctionDb(const XLineVisitor& line);
std::string DebugString(TfFunctionDb tf_function_db);
void CombineTfFunctionDb(const TfFunctionDb& src, TfFunctionDb* dst);
}
}
#endif
#include "tensorflow/core/profiler/convert/xplane_to_tf_functions.h"
#include <algorithm>
#include <ostream>
#include <stack>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/math_utils.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
#include "tsl/profiler/utils/tf_xplane_visitor.h"
#include "tsl/profiler/utils/timespan.h"
namespace tensorflow {
namespace profiler {
namespace {
std::pair<TfFunctionExecutionMode, TfFunctionCompiler> Decode(
absl::string_view function_name, absl::string_view mode) {
if (mode == "eager") return {EAGER_MODE, INVALID_COMPILER};
if (mode == "concrete") return {CONCRETE_MODE, INVALID_COMPILER};
if (mode == "traced-xla") return {TRACED_MODE, XLA_COMPILER};
if (mode == "traced-nonXla") return {TRACED_MODE, OTHER_COMPILER};
if (mode == "notTraced-xla") return {NOT_TRACED_MODE, XLA_COMPILER};
if (mode == "notTraced-nonXla") return {NOT_TRACED_MODE, OTHER_COMPILER};
LOG(ERROR) << absl::StrCat("tf-function '", function_name,
"' has an unexpected execution mode '", mode, "'")
<< std::endl;
return {INVALID_MODE, INVALID_COMPILER};
DCHECK(false);
}
double ComputeExpensiveCallPercent(const TfFunction& tf_function) {
uint64 total_call_time_ps = 0;
uint64 expensive_call_time_ps = 0;
for (const auto& mode_metrics : tf_function.metrics()) {
const auto mode = mode_metrics.first;
const auto& metrics = mode_metrics.second;
total_call_time_ps += metrics.self_time_ps();
if (mode == TRACED_MODE || mode == EAGER_MODE) {
expensive_call_time_ps += metrics.self_time_ps();
}
}
return tsl::profiler::SafeDivide(100.0 * expensive_call_time_ps,
total_call_time_ps);
}
struct ActivationRecord {
std::string function_name;
tsl::profiler::Timespan timespan;
TfFunctionExecutionMode execution_mode;
TfFunctionCompiler compiler;
int64_t tracing_count;
uint64 children_duration_ps;
ActivationRecord()
: function_name(""),
execution_mode(INVALID_MODE),
compiler(INVALID_COMPILER),
tracing_count(0),
children_duration_ps(0) {}
ActivationRecord(absl::string_view name,
const tsl::profiler::Timespan& timespan,
TfFunctionExecutionMode exe_mode,
TfFunctionCompiler compiler, int64_t tracing_cnt)
: function_name(std::string(name)),
timespan(timespan),
execution_mode(exe_mode),
compiler(compiler),
tracing_count(tracing_cnt),
children_duration_ps(0) {}
std::string DebugString() const {
return absl::StrCat("{", function_name, ", ",
TfFunctionExecutionMode_Name(execution_mode), ", ",
TfFunctionCompiler_Name(compiler),
", tracing_count:", tracing_count,
", children_duration:", children_duration_ps,
" ps, timespan:", timespan.DebugString(), "}");
}
};
struct EntryOrExit {
bool is_entry;
int64_t index;
uint64 timestamp_ps;
EntryOrExit() : is_entry(false), index(-1), timestamp_ps(0) {}
EntryOrExit(bool is_entry, int64_t index, uint64 timestamp_ps)
: is_entry(is_entry), index(index), timestamp_ps(timestamp_ps) {}
std::string DebugString() const {
std::string entry_or_exit = is_entry ? "entry, " : "exit, ";
return absl::StrCat("{", entry_or_exit, "idx:", index,
", timestamp:", timestamp_ps, "}");
}
};
TfFunctionCompiler CombineCompilers(TfFunctionCompiler a,
TfFunctionCompiler b) {
if (a == INVALID_COMPILER) return b;
if (b == INVALID_COMPILER) return a;
if (a == b) return a;
return MIXED_COMPILER;
}
void CombineTfFunctionMetrics(const TfFunctionMetrics& src,
TfFunctionMetrics* dst) {
dst->set_count(src.count() + dst->count());
dst->set_self_time_ps(src.self_time_ps() + dst->self_time_ps());
}
void CombineTfFunction(const TfFunction& src, TfFunction* dst) {
dst->set_total_tracing_count(
std::max(src.total_tracing_count(), dst->total_tracing_count()));
dst->set_compiler(CombineCompilers(src.compiler(), dst->compiler()));
for (const auto& mode_metrics : src.metrics()) {
int32_t execution_mode = mode_metrics.first;
const TfFunctionMetrics& src_metrics = mode_metrics.second;
TfFunctionMetrics* dst_metrics =
gtl::FindOrNull(*dst->mutable_metrics(), execution_mode);
if (dst_metrics == nullptr) {
(*dst->mutable_metrics())[execution_mode] = src_metrics;
} else {
CombineTfFunctionMetrics(src_metrics, dst_metrics);
}
}
dst->set_expensive_call_percent(ComputeExpensiveCallPercent(*dst));
}
class TfFunctionExecutions {
public:
explicit TfFunctionExecutions(const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
absl::string_view mode;
int64_t tracing_count = 0;
event.ForEachStat([&mode, &tracing_count](const XStatVisitor& stat) {
if (!stat.Type().has_value()) return;
switch (stat.Type().value()) {
case StatType::kTfFunctionCall:
mode = stat.StrOrRefValue();
break;
case StatType::kTfFunctionTracingCount:
tracing_count = stat.IntValue();
break;
}
});
if (mode.empty()) return;
int64_t index = activations_.size();
auto timespan = event.GetTimespan();
auto mode_compiler = Decode(event.Name(), mode);
ActivationRecord activation_record =
ActivationRecord(event.Name(), timespan, mode_compiler.first,
mode_compiler.second, tracing_count);
activations_.push_back(activation_record);
EntryOrExit entry_point =
EntryOrExit(true, index, timespan.begin_ps());
EntryOrExit exit_point =
EntryOrExit(false, index, timespan.end_ps());
points_.push_back(entry_point);
points_.push_back(exit_point);
});
auto ascending_in_timestamp = [](const EntryOrExit& a,
const EntryOrExit& b) {
return a.timestamp_ps < b.timestamp_ps;
};
absl::c_sort(points_, ascending_in_timestamp);
CalculateChildrenDurations();
}
std::string DebugString() const {
std::string result = "\nActivations:\n";
for (int i = 0, end = activations_.size(); i < end; i++) {
absl::StrAppend(&result, "[", i, "] ", activations_[i].DebugString(),
"\n");
}
absl::StrAppend(&result, "tf-function Entry/Exit Points:\n");
for (const auto& pt : points_) {
absl::StrAppend(&result, pt.DebugString(), "\n");
}
return result;
}
TfFunctionDb ConvertToTfFunctionDb() {
TfFunctionDb result;
for (const auto& record : activations_) {
TfFunction* fun = &(*result.mutable_tf_functions())[record.function_name];
fun->set_total_tracing_count(
std::max(static_cast<int64_t>(fun->total_tracing_count()),
record.tracing_count));
fun->set_compiler(CombineCompilers(fun->compiler(), record.compiler));
uint64 self_time_ps =
record.timespan.duration_ps() - record.children_duration_ps;
TfFunctionMetrics* metrics =
&(*fun->mutable_metrics())[record.execution_mode];
metrics->set_count(metrics->count() + 1);
metrics->set_self_time_ps(metrics->self_time_ps() + self_time_ps);
}
for (auto& name_fun : *result.mutable_tf_functions()) {
TfFunction& fun = name_fun.second;
fun.set_expensive_call_percent(ComputeExpensiveCallPercent(fun));
}
return result;
}
void CalculateChildrenDurations() {
std::stack<int64_t> call_stack;
for (const auto& pt : points_) {
if (pt.is_entry) {
call_stack.push(pt.index);
} else {
DCHECK(call_stack.top() == pt.index);
uint64 call_duration = activations_[pt.index].timespan.duration_ps();
call_stack.pop();
if (!call_stack.empty()) {
activations_[call_stack.top()].children_duration_ps += call_duration;
}
}
}
}
private:
std::vector<ActivationRecord> activations_;
std::vector<EntryOrExit> points_;
};
}
std::string DebugString(const TfFunctionDb& tf_function_db) {
std::string str;
protobuf::TextFormat::PrintToString(tf_function_db, &str);
return str;
}
void CombineTfFunctionDb(const TfFunctionDb& src, TfFunctionDb* dst) {
for (const auto& name_function : src.tf_functions()) {
const auto& name = name_function.first;
const auto& src_fun = name_function.second;
TfFunction* dst_fun = gtl::FindOrNull(*dst->mutable_tf_functions(), name);
if (dst_fun == nullptr) {
(*dst->mutable_tf_functions())[name] = src_fun;
} else {
CombineTfFunction(src_fun, dst_fun);
}
}
}
TfFunctionDb ConvertHostThreadsXLineToTfFunctionDb(const XLineVisitor& line) {
TfFunctionExecutions tf_function_executions = TfFunctionExecutions(line);
return tf_function_executions.ConvertToTfFunctionDb();
}
}
} | #include "tensorflow/core/profiler/convert/xplane_to_tf_functions.h"
#include <string>
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/protobuf/tf_function.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_test_utils.h"
#include "tensorflow/core/profiler/utils/xplane_utils.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
#include "tsl/profiler/utils/tf_xplane_visitor.h"
namespace tensorflow {
namespace profiler {
namespace {
const absl::string_view kEager = "eager";
const absl::string_view kConcrete = "concrete";
const absl::string_view kTracedNonXla = "traced-nonXla";
const absl::string_view kTracedXla = "traced-xla";
const absl::string_view kNotTracedNonXla = "notTraced-nonXla";
const absl::string_view kNotTracedXla = "notTraced-xla";
constexpr double kMaxError = 0.001;
TfFunctionDb ConvertXSpaceToTfFunctionDb(const XSpace& space) {
TfFunctionDb result;
const XPlane* host_plane = FindPlaneWithName(space, kHostThreadsPlaneName);
if (host_plane) {
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_plane);
plane.ForEachLine([&result](const XLineVisitor& line) {
TfFunctionDb tf_function_db = ConvertHostThreadsXLineToTfFunctionDb(line);
CombineTfFunctionDb(tf_function_db, &result);
});
}
return result;
}
TEST(ConvertXPlaneToTfFunctions, CombineTwoThreads) {
XSpace space;
XPlaneBuilder host_plane_builder(space.add_planes());
host_plane_builder.SetName(kHostThreadsPlaneName);
host_plane_builder.ReserveLines(2);
std::string kFunctionName = "decrement";
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateTfFunctionCallEvent(&host_plane_builder, &main_thread, kFunctionName,
10, 100, kTracedNonXla, 1);
CreateTfFunctionCallEvent(&host_plane_builder, &main_thread, kFunctionName,
150, 20, kNotTracedNonXla, 2);
CreateTfFunctionCallEvent(&host_plane_builder, &main_thread, kFunctionName,
200, 80, kTracedNonXla, 3);
auto other_thread = host_plane_builder.GetOrCreateLine(1);
CreateTfFunctionCallEvent(&host_plane_builder, &other_thread, kFunctionName,
20, 100, kTracedNonXla, 2);
CreateTfFunctionCallEvent(&host_plane_builder, &other_thread, kFunctionName,
160, 20, kNotTracedNonXla, 2);
CreateTfFunctionCallEvent(&host_plane_builder, &other_thread, kFunctionName,
210, 80, kTracedXla, 4);
TfFunctionDb tf_function_db = ConvertXSpaceToTfFunctionDb(space);
EXPECT_EQ(tf_function_db.tf_functions().size(), 1);
EXPECT_EQ(tf_function_db.tf_functions().count(kFunctionName), 1);
const TfFunction& tf_function =
tf_function_db.tf_functions().at(kFunctionName);
EXPECT_EQ(tf_function.total_tracing_count(), 4);
EXPECT_EQ(tf_function.compiler(), MIXED_COMPILER);
EXPECT_NEAR(tf_function.expensive_call_percent(), 90, kMaxError);
const auto& metrics = tf_function.metrics();
EXPECT_EQ(metrics.size(), 2);
EXPECT_EQ(metrics.count(TRACED_MODE), 1);
EXPECT_EQ(metrics.count(NOT_TRACED_MODE), 1);
const auto& traced_mode = metrics.at(TRACED_MODE);
EXPECT_EQ(traced_mode.count(), 4);
EXPECT_EQ(traced_mode.self_time_ps(), 360);
const auto& not_traced_mode = metrics.at(NOT_TRACED_MODE);
EXPECT_EQ(not_traced_mode.count(), 2);
EXPECT_EQ(not_traced_mode.self_time_ps(), 40);
}
TEST(ConvertXPlaneToTfFunctions, NestedFunctions) {
XSpace space;
XPlaneBuilder host_plane_builder(space.add_planes());
host_plane_builder.SetName(kHostThreadsPlaneName);
host_plane_builder.ReserveLines(1);
std::string kOuterFunctionName = "outer";
std::string kInnerFunctionName = "inner";
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateTfFunctionCallEvent(&host_plane_builder, &main_thread,
kOuterFunctionName, 10, 100, kTracedNonXla, 1);
CreateTfFunctionCallEvent(&host_plane_builder, &main_thread,
kInnerFunctionName, 30, 40, kNotTracedXla, 0);
TfFunctionDb tf_function_db = ConvertXSpaceToTfFunctionDb(space);
EXPECT_EQ(tf_function_db.tf_functions().size(), 2);
EXPECT_EQ(tf_function_db.tf_functions().count(kOuterFunctionName), 1);
EXPECT_EQ(tf_function_db.tf_functions().count(kInnerFunctionName), 1);
const TfFunction& outer =
tf_function_db.tf_functions().at(kOuterFunctionName);
EXPECT_EQ(outer.total_tracing_count(), 1);
EXPECT_EQ(outer.compiler(), OTHER_COMPILER);
EXPECT_NEAR(outer.expensive_call_percent(), 100, kMaxError);
const auto& outer_metrics = outer.metrics();
EXPECT_EQ(outer_metrics.size(), 1);
EXPECT_EQ(outer_metrics.count(TRACED_MODE), 1);
const auto& traced_mode = outer_metrics.at(TRACED_MODE);
EXPECT_EQ(traced_mode.count(), 1);
EXPECT_EQ(traced_mode.self_time_ps(), 60);
const TfFunction& inner =
tf_function_db.tf_functions().at(kInnerFunctionName);
EXPECT_EQ(inner.total_tracing_count(), 0);
EXPECT_EQ(inner.compiler(), XLA_COMPILER);
EXPECT_NEAR(inner.expensive_call_percent(), 0, kMaxError);
const auto& inner_metrics = inner.metrics();
EXPECT_EQ(inner_metrics.size(), 1);
EXPECT_EQ(inner_metrics.count(NOT_TRACED_MODE), 1);
const auto& not_traced_mode = inner_metrics.at(NOT_TRACED_MODE);
EXPECT_EQ(not_traced_mode.count(), 1);
EXPECT_EQ(not_traced_mode.self_time_ps(), 40);
}
TEST(ConvertXPlaneToTfFunctions, EagerPlusConcrete) {
XSpace space;
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(&space));
host_plane_builder.ReserveLines(2);
std::string kEagerFunctionName = "i_am_eager";
std::string kConcreteFunctionName = "i_am_concrete";
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateTfFunctionCallEvent(&host_plane_builder, &main_thread,
kEagerFunctionName, 10, 200, kEager);
auto other_thread = host_plane_builder.GetOrCreateLine(1);
CreateTfFunctionCallEvent(&host_plane_builder, &other_thread,
kConcreteFunctionName, 20, 40, kConcrete);
TfFunctionDb tf_function_db = ConvertXSpaceToTfFunctionDb(space);
EXPECT_EQ(tf_function_db.tf_functions().size(), 2);
EXPECT_EQ(tf_function_db.tf_functions().count(kEagerFunctionName), 1);
EXPECT_EQ(tf_function_db.tf_functions().count(kConcreteFunctionName), 1);
const TfFunction& eager =
tf_function_db.tf_functions().at(kEagerFunctionName);
EXPECT_EQ(eager.total_tracing_count(), 0);
EXPECT_EQ(eager.compiler(), INVALID_COMPILER);
EXPECT_NEAR(eager.expensive_call_percent(), 100, kMaxError);
const auto& eager_metrics = eager.metrics();
EXPECT_EQ(eager_metrics.size(), 1);
EXPECT_EQ(eager_metrics.count(EAGER_MODE), 1);
const auto& eager_mode = eager_metrics.at(EAGER_MODE);
EXPECT_EQ(eager_mode.count(), 1);
EXPECT_EQ(eager_mode.self_time_ps(), 200);
const TfFunction& concrete =
tf_function_db.tf_functions().at(kConcreteFunctionName);
EXPECT_EQ(concrete.total_tracing_count(), 0);
EXPECT_EQ(concrete.compiler(), INVALID_COMPILER);
EXPECT_NEAR(concrete.expensive_call_percent(), 0, kMaxError);
const auto& concrete_metrics = concrete.metrics();
EXPECT_EQ(concrete_metrics.size(), 1);
EXPECT_EQ(concrete_metrics.count(CONCRETE_MODE), 1);
const auto& concrete_mode = concrete_metrics.at(CONCRETE_MODE);
EXPECT_EQ(concrete_mode.count(), 1);
EXPECT_EQ(concrete_mode.self_time_ps(), 40);
}
}
}
} |
1,474 | cpp | tensorflow/tensorflow | xplane_to_step_events | tensorflow/core/profiler/convert/xplane_to_step_events.cc | tensorflow/core/profiler/convert/xplane_to_step_events_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_CONVERT_XPLANE_TO_STEP_EVENTS_H_
#define TENSORFLOW_CORE_PROFILER_CONVERT_XPLANE_TO_STEP_EVENTS_H_
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/event_span.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
namespace tensorflow {
namespace profiler {
StepEvents ConvertHostThreadsXLineToStepEvents(
const XLineVisitor& line, const StepEvents* device_step_events);
StepEvents ConvertHostThreadsXPlaneToStepEvents(
const XPlane& host_trace, const StepEvents* device_step_events);
StepEvents ConvertDeviceTraceXLineToStepEvents(const XLineVisitor& line);
StepEvents ConvertDeviceTraceXPlaneToStepEvents(const XPlane& device_trace);
}
}
#endif
#include "tensorflow/core/profiler/convert/xplane_to_step_events.h"
#include <cstdint>
#include <optional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/match.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/event_span.h"
#include "tensorflow/core/profiler/utils/op_metrics_db_utils.h"
#include "tensorflow/core/profiler/utils/trace_utils.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
#include "tsl/profiler/utils/tf_op_utils.h"
#include "tsl/profiler/utils/tf_xplane_visitor.h"
#include "tsl/profiler/utils/timespan.h"
#include "tsl/profiler/utils/tpu_xplane_utils.h"
#include "tsl/profiler/utils/xplane_schema.h"
namespace tensorflow {
namespace profiler {
namespace {
inline bool IsExplicitHostStepMarker(absl::string_view event_name) {
return (absl::StartsWith(event_name, "train") ||
absl::StartsWith(event_name, "test") ||
absl::StartsWith(event_name, "TraceContext")) &&
!absl::StrContains(event_name, "/");
}
inline bool IsRealCpuCompute(absl::string_view event_name) {
bool not_real = absl::StartsWith(event_name, "EagerExecute") ||
absl::StartsWith(event_name, "EagerLocalExecute") ||
absl::StartsWith(event_name, "EagerKernelExecute") ||
absl::StartsWith(event_name, "FunctionRun") ||
IsExplicitHostStepMarker(event_name);
return !not_real;
}
uint64 ParseNumBytesFromMemcpyDetail(absl::string_view memcpy_detail) {
const std::vector<absl::string_view> params =
absl::StrSplit(memcpy_detail, absl::ByAnyChar(":\n"));
for (uint32 ii = 0; ii < params.size(); ii += 2) {
if (params[ii] != "num_bytes") continue;
uint64 value = 0;
if (absl::SimpleAtoi(params[ii + 1], &value)) return value;
break;
}
return 0ULL;
}
EventType ClassifyGpuCompute(absl::string_view event_name,
absl::string_view tensor_shapes) {
if (tensor_shapes.empty()) {
return (absl::StrContains(event_name, "half") ||
absl::StrContains(event_name, "fp16"))
? DEVICE_COMPUTE_16
: DEVICE_COMPUTE_32;
} else {
return (absl::StrContains(tensor_shapes, "half")) ? DEVICE_COMPUTE_16
: DEVICE_COMPUTE_32;
}
}
EventType ClassifyGpuEvent(absl::string_view event_name,
absl::string_view tensor_shapes) {
tsl::profiler::TfOp tf_op = tsl::profiler::ParseTfOpFullname(event_name);
if (tsl::profiler::IsMemcpyHToDOp(tf_op)) {
return HOST_TO_DEVICE;
} else if (tsl::profiler::IsMemcpyDToHOp(tf_op)) {
return DEVICE_TO_HOST;
} else if (tsl::profiler::IsMemcpyDToDOp(tf_op)) {
return DEVICE_TO_DEVICE;
} else if (absl::StartsWithIgnoreCase(event_name, "nccl")) {
return DEVICE_COLLECTIVES;
} else {
return ClassifyGpuCompute(event_name, tensor_shapes);
}
}
EventType ClassifyCpuEvent(absl::string_view event_name, bool has_device,
bool has_correlation_id) {
tsl::profiler::TfOp tf_op = tsl::profiler::ParseTfOpFullname(event_name);
if (tsl::profiler::IsInfeedEnqueueOp(tf_op) ||
tsl::profiler::IsMemcpyHToDOp(tf_op)) {
return HOST_TO_DEVICE;
} else if (tsl::profiler::IsMemcpyHToHOp(tf_op)) {
return HOST_TO_HOST;
} else if (has_device && (has_correlation_id ||
absl::StartsWithIgnoreCase(
event_name, "ExecutorState::Process"))) {
return HOST_PREPARE;
} else if (absl::StartsWithIgnoreCase(event_name, "IteratorGetNext")) {
return HOST_WAIT_INPUT;
} else {
return HOST_COMPUTE;
}
}
}
StepEvents ConvertHostThreadsXLineToStepEvents(
const XLineVisitor& line, const StepEvents* device_step_events) {
StepEvents result;
line.ForEachEvent([&](const XEventVisitor& event) {
int64_t correlation_id = -1;
int64_t group_id = -1;
absl::string_view step_name;
event.ForEachStat([&](const XStatVisitor& stat) {
if (!stat.Type().has_value()) return;
switch (stat.Type().value()) {
case StatType::kCorrelationId:
correlation_id = stat.IntValue();
break;
case StatType::kGroupId:
group_id = stat.IntValue();
break;
case StatType::kStepName:
step_name = stat.StrOrRefValue();
break;
}
});
if (group_id < 0) return;
bool has_device = (device_step_events != nullptr);
if (has_device && !device_step_events->contains(group_id)) return;
if (IsExplicitHostStepMarker(event.Name())) {
result[group_id].AddMarker(
StepMarker(StepMarkerType::kExplicitHostStepMarker, event.Name(),
event.GetTimespan()));
} else if (!step_name.empty()) {
result[group_id].AddMarker(
StepMarker(StepMarkerType::kImplicitHostStepMarker, event.Name(),
event.GetTimespan()));
} else if (IsRealCpuCompute(event.Name())) {
result[group_id].AddEvent(EventTypeSpan(
ClassifyCpuEvent(event.Name(), has_device, correlation_id >= 0),
event.GetTimespan()));
}
if (!step_name.empty()) {
result[group_id].SetStepName(std::string(step_name));
}
});
return result;
}
StepEvents ConvertHostThreadsXPlaneToStepEvents(
const XPlane& host_trace, const StepEvents* device_step_events) {
StepEvents host_step_events;
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(&host_trace);
plane.ForEachLine([&](const XLineVisitor& line) {
StepEvents thread_step_events =
ConvertHostThreadsXLineToStepEvents(line, device_step_events);
UnionCombineStepEvents(thread_step_events, &host_step_events);
});
return host_step_events;
}
StepEvents ConvertDeviceStepInfoToStepMarkers(const XLineVisitor& line) {
StepEvents result;
line.ForEachEvent([&](const XEventVisitor& event) {
if (std::optional<XStatVisitor> stat = event.GetStat(StatType::kGroupId)) {
result[stat->IntValue()].AddMarker(
StepMarker(StepMarkerType::kDeviceStepMarker, event.Name(),
event.GetTimespan()));
}
});
return result;
}
StepEvents ConvertDeviceTraceXLineToStepEvents(const uint64 device_id,
const XLineVisitor& line) {
StepEvents result;
line.ForEachEvent([&](const XEventVisitor& event) {
int64_t correlation_id = -1;
int64_t group_id = -1;
absl::string_view tensor_shapes;
absl::string_view memcpy_details;
event.ForEachStat([&](const XStatVisitor& stat) {
if (!stat.Type().has_value()) return;
switch (stat.Type().value()) {
case StatType::kCorrelationId:
correlation_id = stat.IntValue();
break;
case StatType::kGroupId:
group_id = stat.IntValue();
break;
case StatType::kTensorShapes:
tensor_shapes = stat.StrOrRefValue();
break;
case StatType::kMemcpyDetails:
memcpy_details = stat.StrOrRefValue();
break;
}
});
if (correlation_id >= 0 && group_id >= 0) {
EventType event_type = ClassifyGpuEvent(event.Name(), tensor_shapes);
EventTypeSpan event_type_span(event_type, event.GetTimespan());
result[group_id].AddEvent(event_type_span);
switch (event_type) {
case DEVICE_COLLECTIVES: {
AllReduceInfo collective_ops;
collective_ops.set_start_time_ps(event.TimestampPs());
collective_ops.set_end_time_ps(event.EndOffsetPs());
result[group_id].AddCollectiveOpEvent(device_id, collective_ops);
break;
}
case HOST_TO_DEVICE:
case DEVICE_TO_DEVICE:
case DEVICE_TO_HOST: {
uint64 bytes_transferred =
ParseNumBytesFromMemcpyDetail(memcpy_details);
result[group_id].AddDeviceMemoryTransferEvent(
event_type, event.GetTimespan(), bytes_transferred);
break;
}
default:
return;
}
}
});
return result;
}
StepEvents ConvertTpuDeviceTraceXLineToStepEvents(const uint64 device_id,
const XLineVisitor& line) {
StepEvents result;
absl::flat_hash_map<int64_t , XEventsOpMetricsDbBuilder>
op_metrics_builder;
line.ForEachEvent([&](const XEventVisitor& event) {
auto group_id = event.GetStat(StatType::kGroupId);
if (!group_id.has_value()) return;
op_metrics_builder[group_id->IntOrUintValue()].AddOpMetric(event);
});
for (auto& [group_id, builder] : op_metrics_builder) {
result[group_id].SetPerCoreOpMetricsDb(builder.Finalize(), device_id);
}
return result;
}
StepEvents ConvertDeviceTraceXPlaneToStepEvents(const XPlane& device_trace) {
StepEvents device_step_events;
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(&device_trace);
std::optional<int> tpu_core_id = tsl::profiler::GetTensorCoreId(plane.Name());
plane.ForEachLine([&](const XLineVisitor& line) {
int64_t line_id = line.Id();
if (line_id == kThreadIdStepInfo ||
(tpu_core_id.has_value() &&
line.Name() == tsl::profiler::kStepLineName)) {
StepEvents step_marker_events = ConvertDeviceStepInfoToStepMarkers(line);
UnionCombineStepEvents(step_marker_events, &device_step_events);
} else if (IsDerivedThreadId(line_id)) {
return;
} else {
StepEvents stream_step_events;
if (!tpu_core_id.has_value()) {
stream_step_events =
ConvertDeviceTraceXLineToStepEvents(plane.Id(), line);
} else {
stream_step_events =
ConvertTpuDeviceTraceXLineToStepEvents(tpu_core_id.value(), line);
}
UnionCombineStepEvents(stream_step_events, &device_step_events);
}
});
return device_step_events;
}
}
} | #include "tensorflow/core/profiler/convert/xplane_to_step_events.h"
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/event_span.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_test_utils.h"
#include "tsl/profiler/utils/group_events.h"
namespace tensorflow {
namespace profiler {
namespace {
TEST(ConvertXPlaneToOpStats, CpuOnlyStepDbTest) {
constexpr int64_t kFirstStepNum = 123;
constexpr int64_t kSecondStepNum = 456;
constexpr int64_t kFirstStepId = 0;
constexpr int64_t kSecondStepId = 1;
constexpr int64_t kFirstCorrelationId = 100;
constexpr int64_t kSecondCorrelationId = 200;
XSpace space;
XPlane* host_plane = GetOrCreateHostXPlane(&space);
XPlaneBuilder host_plane_builder(host_plane);
host_plane_builder.ReserveLines(2);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kTraceContext,
0, 100, {{StatType::kStepNum, kFirstStepNum}});
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kFunctionRun,
10, 90,
{{StatType::kStepId, kFirstStepId},
{StatType::kProducerType, int64_t{1}},
{StatType::kProducerId, kFirstStepId}});
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kTraceContext,
300, 100, {{StatType::kStepNum, kSecondStepNum}});
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kFunctionRun,
310, 90,
{{StatType::kStepId, kSecondStepId},
{StatType::kProducerType, int64_t{1}},
{StatType::kProducerId, kSecondStepId}});
auto tf_executor_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &tf_executor_thread,
HostEventType::kExecutorStateProcess, 20, 20,
{{StatType::kStepId, kFirstStepId},
{StatType::kConsumerType, int64_t{1}},
{StatType::kConsumerId, kFirstStepId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "matmul", 30, 10,
{{StatType::kCorrelationId, kFirstCorrelationId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread,
HostEventType::kExecutorStateProcess, 320, 20,
{{StatType::kStepId, kSecondStepId},
{StatType::kConsumerType, int64_t{1}},
{StatType::kConsumerId, kSecondStepId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "matmul", 330, 10,
{{StatType::kCorrelationId, kSecondCorrelationId}});
XPlane* device_plane = space.add_planes();
XPlaneBuilder device_plane_builder(device_plane);
device_plane_builder.ReserveLines(1);
auto stream = device_plane_builder.GetOrCreateLine(0);
CreateXEvent(&device_plane_builder, &stream, "matmul", 50, 40,
{{StatType::kCorrelationId, kFirstCorrelationId}});
tsl::profiler::GroupTfEvents(&space);
StepEvents device_step_events =
ConvertDeviceTraceXPlaneToStepEvents(*device_plane);
EXPECT_EQ(device_step_events.size(), 1);
EXPECT_EQ(device_step_events[0].Events().size(), 1);
StepEvents host_step_events =
ConvertHostThreadsXPlaneToStepEvents(*host_plane, &device_step_events);
EXPECT_EQ(host_step_events.size(), 1);
EXPECT_EQ(host_step_events[0].Markers().size(), 1);
EXPECT_EQ(host_step_events[0].Events().size(), 2);
}
}
}
} |
1,475 | cpp | tensorflow/tensorflow | xplane_to_memory_profile | tensorflow/core/profiler/convert/xplane_to_memory_profile.cc | tensorflow/core/profiler/convert/xplane_to_memory_profile_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_CONVERT_XPLANE_TO_MEMORY_PROFILE_H_
#define TENSORFLOW_CORE_PROFILER_CONVERT_XPLANE_TO_MEMORY_PROFILE_H_
#include <string>
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/memory_profile.pb.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tensorflow {
namespace profiler {
MemoryProfile ConvertXPlaneToMemoryProfile(const XPlane& host_plane,
int64_t max_num_snapshots = 1000);
Status ConvertXSpaceToMemoryProfileJson(const XSpace& xspace,
std::string* json_output);
}
}
#endif
#include "tensorflow/core/profiler/convert/xplane_to_memory_profile.h"
#include <algorithm>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/profiler/protobuf/memory_profile.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_utils.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
#include "tsl/profiler/utils/tf_xplane_visitor.h"
namespace tensorflow {
namespace profiler {
namespace {
constexpr int64_t kInvalidStepId = -1;
using IndexMetaPair =
std::pair<int64_t , const MemoryActivityMetadata*>;
bool IsMemoryAllocation(int64_t event_type) {
return event_type == HostEventType::kMemoryAllocation;
}
bool IsMemoryDeallocation(int64_t event_type) {
return event_type == HostEventType::kMemoryDeallocation;
}
void UpdateProfileSummary(const MemoryAggregationStats& stats,
int64_t time_offset_ps,
MemoryProfileSummary* summary) {
summary->set_peak_bytes_usage_lifetime(stats.peak_bytes_in_use());
MemoryAggregationStats* peak_stats = summary->mutable_peak_stats();
if (stats.stack_reserved_bytes() + stats.heap_allocated_bytes() >=
peak_stats->peak_bytes_in_use()) {
*peak_stats = stats;
peak_stats->set_peak_bytes_in_use(stats.stack_reserved_bytes() +
stats.heap_allocated_bytes());
summary->set_peak_stats_time_ps(time_offset_ps);
summary->set_memory_capacity(stats.stack_reserved_bytes() +
stats.heap_allocated_bytes() +
stats.free_memory_bytes());
}
}
MemoryProfile GenerateMemoryProfile(const XPlane* host_trace) {
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace);
MemoryProfile memory_profile;
plane.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
int64_t event_type =
event.Type().value_or(HostEventType::kUnknownHostEventType);
if (!(IsMemoryAllocation(event_type) ||
IsMemoryDeallocation(event_type))) {
return;
}
MemoryAggregationStats stats;
MemoryActivityMetadata metadata;
if (IsMemoryAllocation(event_type)) {
metadata.set_memory_activity(ALLOCATION);
} else if (IsMemoryDeallocation(event_type)) {
metadata.set_memory_activity(DEALLOCATION);
}
metadata.set_step_id(kInvalidStepId);
std::string memory_id;
event.ForEachStat([&](const XStatVisitor& stat) {
if (!stat.Type().has_value()) return;
switch (stat.Type().value()) {
case StatType::kIndexOnHost:
case StatType::kDeviceOrdinal:
memory_id = absl::StrCat(stat.IntValue());
break;
case StatType::kAllocatorName:
memory_id = std::string(stat.StrOrRefValue());
break;
case StatType::kBytesReserved:
stats.set_stack_reserved_bytes(stat.IntValue());
break;
case StatType::kBytesAllocated:
stats.set_heap_allocated_bytes(stat.IntValue());
break;
case StatType::kBytesAvailable:
stats.set_free_memory_bytes(stat.IntValue());
break;
case StatType::kFragmentation:
stats.set_fragmentation(stat.DoubleValue());
break;
case StatType::kPeakBytesInUse:
stats.set_peak_bytes_in_use(stat.IntValue());
break;
case StatType::kRequestedBytes:
metadata.set_requested_bytes(stat.IntValue());
break;
case StatType::kAllocationBytes:
metadata.set_allocation_bytes(stat.IntValue());
break;
case StatType::kAddress:
metadata.set_address(stat.IntValue());
break;
case StatType::kTfOp:
metadata.set_tf_op_name(std::string(stat.StrOrRefValue()));
break;
case StatType::kGroupId:
metadata.set_step_id(stat.IntValue());
break;
case StatType::kRegionType:
metadata.set_region_type(std::string(stat.StrOrRefValue()));
break;
case StatType::kDataType:
metadata.set_data_type(tensorflow::DataTypeString(
static_cast<tensorflow::DataType>(stat.IntValue())));
break;
case StatType::kTensorShapes:
metadata.set_tensor_shape(std::string(stat.StrOrRefValue()));
break;
}
});
MemoryProfileSummary* summary =
(*memory_profile.mutable_memory_profile_per_allocator())[memory_id]
.mutable_profile_summary();
UpdateProfileSummary(stats, event.OffsetPs(), summary);
MemoryProfileSnapshot* snapshot =
(*memory_profile.mutable_memory_profile_per_allocator())[memory_id]
.add_memory_profile_snapshots();
snapshot->set_time_offset_ps(event.OffsetPs());
*snapshot->mutable_aggregation_stats() = std::move(stats);
*snapshot->mutable_activity_metadata() = std::move(metadata);
});
});
return memory_profile;
}
void UpdateStepId(PerAllocatorMemoryProfile* memory_profile) {
int64_t last_valid_step_id = -1;
for (auto& snapshot : *memory_profile->mutable_memory_profile_snapshots()) {
DCHECK(snapshot.has_activity_metadata());
if (snapshot.mutable_activity_metadata()->step_id() == kInvalidStepId) {
snapshot.mutable_activity_metadata()->set_step_id(last_valid_step_id + 1);
} else {
last_valid_step_id = snapshot.mutable_activity_metadata()->step_id();
}
}
}
void UpdateDeallocation(PerAllocatorMemoryProfile* memory_profile) {
absl::flat_hash_map<uint64 , const MemoryActivityMetadata*>
addr_metadata_map;
for (auto& snapshot : *memory_profile->mutable_memory_profile_snapshots()) {
uint64 address = snapshot.activity_metadata().address();
if (snapshot.activity_metadata().memory_activity() == DEALLOCATION) {
if (addr_metadata_map.contains(address)) {
const MemoryActivityMetadata* alloc_meta = addr_metadata_map[address];
snapshot.mutable_activity_metadata()->set_tf_op_name(
alloc_meta->tf_op_name());
snapshot.mutable_activity_metadata()->set_region_type(
alloc_meta->region_type());
snapshot.mutable_activity_metadata()->set_data_type(
alloc_meta->data_type());
snapshot.mutable_activity_metadata()->set_tensor_shape(
alloc_meta->tensor_shape());
addr_metadata_map.erase(address);
} else {
VLOG(2)
<< "Can't find matching memory allocation for this deallocation: "
<< snapshot.DebugString();
}
} else if (!addr_metadata_map.contains(address)) {
addr_metadata_map[address] = &snapshot.activity_metadata();
} else {
VLOG(2) << "There are two allocations recorded for the same address: "
<< address
<< ". The later allocation event is: " << snapshot.DebugString();
}
}
VLOG(2) << "Number of allocations that cannot find matching dealloctions: "
<< addr_metadata_map.size();
}
int64_t GetPeakMemoryStep(int64_t peak_bytes_profile,
const PerAllocatorMemoryProfile* memory_profile) {
int64_t peak_bytes_profile_step_id = 0;
for (const auto& snapshot : memory_profile->memory_profile_snapshots()) {
if (peak_bytes_profile ==
snapshot.aggregation_stats().heap_allocated_bytes() +
snapshot.aggregation_stats().stack_reserved_bytes()) {
DCHECK(snapshot.has_activity_metadata());
peak_bytes_profile_step_id = snapshot.activity_metadata().step_id();
}
}
return peak_bytes_profile_step_id;
}
struct MetadataComparator {
bool operator()(const IndexMetaPair& a, const IndexMetaPair& b) const {
const MemoryActivityMetadata* a_meta = a.second;
const MemoryActivityMetadata* b_meta = b.second;
DCHECK_NE(a_meta, nullptr);
DCHECK_NE(b_meta, nullptr);
auto lhs =
std::make_tuple(-a_meta->allocation_bytes(), -a_meta->requested_bytes(),
a_meta->tf_op_name(), a_meta->region_type(),
a_meta->data_type(), a_meta->tensor_shape());
auto rhs =
std::make_tuple(-b_meta->allocation_bytes(), -b_meta->requested_bytes(),
b_meta->tf_op_name(), b_meta->region_type(),
b_meta->data_type(), b_meta->tensor_shape());
return lhs < rhs;
}
};
void InsertSpecialAllocations(int64_t unmapped_allocation_bytes,
int64_t step_id,
PerAllocatorMemoryProfile* memory_profile,
std::vector<IndexMetaPair>* active_allocs) {
int index = 0;
if (unmapped_allocation_bytes > 0) {
MemoryActivityMetadata* special_allocation =
memory_profile->add_special_allocations();
special_allocation->set_memory_activity(ALLOCATION);
special_allocation->set_requested_bytes(unmapped_allocation_bytes);
special_allocation->set_allocation_bytes(unmapped_allocation_bytes);
special_allocation->set_address(0);
special_allocation->set_tf_op_name("unused preallocated device memory");
special_allocation->set_step_id(step_id);
special_allocation->set_region_type("persist/dynamic");
special_allocation->set_data_type(
tensorflow::DataTypeString(static_cast<tensorflow::DataType>(0)));
special_allocation->set_tensor_shape("unknown");
active_allocs->push_back({--index, special_allocation});
}
int64_t stack_bytes =
memory_profile->profile_summary().peak_stats().stack_reserved_bytes();
if (stack_bytes > 0) {
MemoryActivityMetadata* special_allocation =
memory_profile->add_special_allocations();
special_allocation->set_memory_activity(ALLOCATION);
special_allocation->set_requested_bytes(stack_bytes);
special_allocation->set_allocation_bytes(stack_bytes);
special_allocation->set_address(0);
special_allocation->set_tf_op_name("stack");
special_allocation->set_step_id(step_id);
special_allocation->set_region_type("stack");
special_allocation->set_data_type(
tensorflow::DataTypeString(static_cast<tensorflow::DataType>(0)));
special_allocation->set_tensor_shape("unknown");
active_allocs->push_back({--index, special_allocation});
}
}
bool operator==(const IndexMetaPair& a, const IndexMetaPair& b) {
const MemoryActivityMetadata* a_meta = a.second;
const MemoryActivityMetadata* b_meta = b.second;
return a_meta->allocation_bytes() == b_meta->allocation_bytes() &&
a_meta->requested_bytes() == b_meta->requested_bytes() &&
a_meta->tf_op_name() == b_meta->tf_op_name() &&
a_meta->region_type() == b_meta->region_type() &&
a_meta->data_type() == b_meta->data_type() &&
a_meta->tensor_shape() == b_meta->tensor_shape();
}
void ProcessActiveAllocations(int64_t peak_bytes_profile_step_id,
PerAllocatorMemoryProfile* memory_profile) {
int64_t unmapped_allocation_bytes =
memory_profile->profile_summary().peak_stats().heap_allocated_bytes();
int64_t unmapped_deallocation_bytes = 0;
absl::flat_hash_map<int64_t , IndexMetaPair> active_alloc_map;
for (int i = 0; i < memory_profile->memory_profile_snapshots_size(); i++) {
const auto& snapshot = memory_profile->memory_profile_snapshots().at(i);
DCHECK(snapshot.has_activity_metadata());
const MemoryActivityMetadata& metadata = snapshot.activity_metadata();
if (snapshot.time_offset_ps() >
memory_profile->profile_summary().peak_stats_time_ps())
break;
if (metadata.step_id() != peak_bytes_profile_step_id) continue;
if (metadata.memory_activity() == ALLOCATION) {
active_alloc_map[metadata.address()] = {i, &metadata};
unmapped_allocation_bytes -= metadata.allocation_bytes();
} else {
DCHECK_EQ(metadata.memory_activity(), DEALLOCATION);
if (active_alloc_map.contains(metadata.address())) {
active_alloc_map.erase(metadata.address());
} else {
unmapped_deallocation_bytes += metadata.allocation_bytes();
}
unmapped_allocation_bytes += metadata.allocation_bytes();
}
}
unmapped_allocation_bytes -= unmapped_deallocation_bytes;
VLOG(2) << "unmapped_allocation_bytes=" << unmapped_allocation_bytes
<< ", unmapped_deallocation_bytes=" << unmapped_deallocation_bytes;
std::vector<IndexMetaPair> active_allocs;
for (const auto& address_and_index_meta : active_alloc_map) {
active_allocs.push_back(address_and_index_meta.second);
}
InsertSpecialAllocations(unmapped_allocation_bytes,
peak_bytes_profile_step_id, memory_profile,
&active_allocs);
std::sort(active_allocs.begin(), active_allocs.end(), MetadataComparator());
for (int i = 0, end = active_allocs.size(); i < end; i++) {
ActiveAllocation* allocation = memory_profile->add_active_allocations();
allocation->set_snapshot_index(active_allocs[i].first);
if (active_allocs[i].first < 0) {
allocation->set_special_index(-active_allocs[i].first - 1);
} else {
allocation->set_special_index(-1);
}
allocation->set_num_occurrences(1);
const int last_alloc = active_allocs.size() - 1;
while (i < last_alloc && active_allocs[i] == active_allocs[i + 1]) {
allocation->set_num_occurrences(allocation->num_occurrences() + 1);
i++;
}
}
VLOG(2) << "Distinctive active allocation count="
<< memory_profile->active_allocations_size();
}
void SaveActiveAllocationSnapshots(
protobuf::RepeatedPtrField<MemoryProfileSnapshot>* snapshots,
protobuf::RepeatedPtrField<ActiveAllocation>* active_allocations) {
std::vector<MemoryProfileSnapshot*> samples;
for (const auto& allocation : *active_allocations) {
auto orig_index = allocation.snapshot_index();
if (orig_index < 0) continue;
samples.push_back(&(*snapshots)[orig_index]);
}
int new_index = 0;
for (auto& allocation : *active_allocations) {
int64_t origin_index = allocation.snapshot_index();
if (origin_index < 0) continue;
allocation.set_snapshot_index(new_index);
new_index++;
}
protobuf::RepeatedPtrField<MemoryProfileSnapshot> new_snapshots;
new_snapshots.Reserve(samples.size());
for (const auto& sample : samples) {
*new_snapshots.Add() = std::move(*sample);
}
*snapshots = std::move(new_snapshots);
}
void SampleMemoryProfileTimeline(int64_t max_num_snapshots,
PerAllocatorMemoryProfile* memory_profile) {
const protobuf::RepeatedPtrField<MemoryProfileSnapshot>& original_snapshots =
memory_profile->memory_profile_snapshots();
protobuf::RepeatedPtrField<MemoryProfileSnapshot>* timeline_snapshots =
memory_profile->mutable_sampled_timeline_snapshots();
int64_t snapshot_count = original_snapshots.size();
if (snapshot_count > max_num_snapshots) {
auto max_box_filter = [&](int filter_width, int count, int start) {
for (int i = 0; i < count; i++) {
const MemoryProfileSnapshot* max_snapshot =
&original_snapshots[start + filter_width * i];
int64_t max_bytes =
max_snapshot->aggregation_stats().heap_allocated_bytes() +
max_snapshot->aggregation_stats().stack_reserved_bytes();
for (int index = start + filter_width * i + 1;
index < start + filter_width * (i + 1); index++) {
int64_t bytes = original_snapshots[index]
.aggregation_stats()
.heap_allocated_bytes() +
original_snapshots[index]
.aggregation_stats()
.stack_reserved_bytes();
if (bytes > max_bytes) {
max_snapshot = &original_snapshots[index];
max_bytes = bytes;
}
}
*timeline_snapshots->Add() = *max_snapshot;
}
};
int width = snapshot_count / max_num_snapshots;
int count1 = max_num_snapshots * (width + 1) - snapshot_count;
int count2 = max_num_snapshots - count1;
max_box_filter(width, count1, 0);
max_box_filter(width + 1, count2, width * count1);
} else {
*timeline_snapshots = original_snapshots;
}
}
void ProcessMemoryProfileProto(int64_t max_num_snapshots,
MemoryProfile* memory_profile) {
memory_profile->set_num_hosts(1);
for (const auto& id_and_allocator_profile :
memory_profile->memory_profile_per_allocator()) {
if (!id_and_allocator_profile.second.memory_profile_snapshots().empty()) {
memory_profile->add_memory_ids(id_and_allocator_profile.first);
}
}
absl::c_sort(*memory_profile->mutable_memory_ids());
for (auto& id_and_allocator_profile :
*memory_profile->mutable_memory_profile_per_allocator()) {
PerAllocatorMemoryProfile* allocator_memory_profile =
&id_and_allocator_profile.second;
protobuf::RepeatedPtrField<MemoryProfileSnapshot>* snapshots =
allocator_memory_profile->mutable_memory_profile_snapshots();
absl::c_sort(*snapshots, [](const MemoryProfileSnapshot& a,
const MemoryProfileSnapshot& b) {
return a.time_offset_ps() < b.time_offset_ps();
});
UpdateStepId(allocator_memory_profile);
UpdateDeallocation(allocator_memory_profile);
SampleMemoryProfileTimeline(max_num_snapshots, allocator_memory_profile);
int64_t peak_step_id =
GetPeakMemoryStep(allocator_memory_profile->profile_summary()
.peak_stats()
.peak_bytes_in_use(),
allocator_memory_profile);
ProcessActiveAllocations(peak_step_id, allocator_memory_profile);
SaveActiveAllocationSnapshots(
snapshots, allocator_memory_profile->mutable_active_allocations());
}
}
template <typename Proto>
Status ConvertProtoToJson(const Proto& proto_output, std::string* json_output) {
protobuf::util::JsonPrintOptions json_options;
json_options.always_print_primitive_fields = true;
auto status = protobuf::util::MessageToJsonString(proto_output, json_output,
json_options);
if (!status.ok()) {
auto error_msg = status.message();
return errors::Internal(
"Could not convert proto to JSON string: ",
absl::string_view(error_msg.data(), error_msg.length()));
}
return absl::OkStatus();
}
}
MemoryProfile ConvertXPlaneToMemoryProfile(const XPlane& host_plane,
int64_t max_num_snapshots) {
MemoryProfile memory_profile = GenerateMemoryProfile(&host_plane);
ProcessMemoryProfileProto(max_num_snapshots, &memory_profile);
memory_profile.set_version(1);
return memory_profile;
}
Status ConvertXSpaceToMemoryProfileJson(const XSpace& xspace,
std::string* json_output) {
if (const XPlane* host_plane =
FindPlaneWithName(xspace, kHostThreadsPlaneName)) {
MemoryProfile memory_profile = ConvertXPlaneToMemoryProfile(*host_plane);
TF_RETURN_IF_ERROR(ConvertProtoToJson(memory_profile, json_output));
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/profiler/convert/xplane_to_memory_profile.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/memory_profile.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_test_utils.h"
#include "tsl/profiler/utils/group_events.h"
namespace tensorflow {
namespace profiler {
namespace {
TEST(ConvertXPlaneToMemoryProfile, OneAllocatorMultiActivitiesTest) {
XSpace space;
XPlane* host_plane = GetOrCreateHostXPlane(&space);
XPlaneBuilder host_plane_builder(host_plane);
host_plane_builder.ReserveLines(1);
auto tf_executor_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &tf_executor_thread, "MemoryAllocation",
40000, 1000,
{{StatType::kBytesReserved, int64_t{2000}},
{StatType::kBytesAllocated, int64_t{3000}},
{StatType::kBytesAvailable, int64_t{5000}},
{StatType::kPeakBytesInUse, int64_t{8500}},
{StatType::kRequestedBytes, int64_t{200}},
{StatType::kAllocationBytes, int64_t{256}},
{StatType::kAddress, int64_t{222333}},
{StatType::kStepId, int64_t{-93746}},
{StatType::kDataType, int64_t{1}},
{StatType::kAllocatorName, "GPU_0_bfc"},
{StatType::kTfOp, "foo/bar"},
{StatType::kRegionType, "output"},
{StatType::kTensorShapes, "[3, 3, 512, 512]"}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "MemoryDeallocation",
50000, 1000,
{{StatType::kBytesReserved, int64_t{2000}},
{StatType::kBytesAllocated, int64_t{2744}},
{StatType::kBytesAvailable, int64_t{5256}},
{StatType::kPeakBytesInUse, int64_t{8500}},
{StatType::kRequestedBytes, int64_t{200}},
{StatType::kAllocationBytes, int64_t{256}},
{StatType::kAddress, int64_t{222333}},
{StatType::kStepId, int64_t{0}},
{StatType::kDataType, int64_t{0}},
{StatType::kAllocatorName, "GPU_0_bfc"},
{StatType::kRegionType, ""},
{StatType::kTensorShapes, ""}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "MemoryAllocation",
70000, 1000,
{{StatType::kBytesReserved, int64_t{2000}},
{StatType::kBytesAllocated, int64_t{5000}},
{StatType::kBytesAvailable, int64_t{3000}},
{StatType::kPeakBytesInUse, int64_t{9500}},
{StatType::kRequestedBytes, int64_t{300}},
{StatType::kAllocationBytes, int64_t{300}},
{StatType::kAddress, int64_t{345678}},
{StatType::kStepId, int64_t{-93746}},
{StatType::kDataType, int64_t{9}},
{StatType::kAllocatorName, "GPU_0_bfc"},
{StatType::kTfOp, "mul_grad/Sum"},
{StatType::kRegionType, "temp"},
{StatType::kTensorShapes, "[1, 2]"}});
tsl::profiler::GroupTfEvents(&space);
MemoryProfile memory_profile = ConvertXPlaneToMemoryProfile(*host_plane);
EXPECT_EQ(memory_profile.memory_profile_per_allocator().size(), 1);
EXPECT_EQ(memory_profile.num_hosts(), 1);
EXPECT_EQ(memory_profile.memory_ids_size(), 1);
EXPECT_EQ(memory_profile.memory_profile_per_allocator().begin()->first,
"GPU_0_bfc");
EXPECT_EQ(memory_profile.version(), 1);
const auto& allocator_memory_profile =
memory_profile.memory_profile_per_allocator().begin()->second;
EXPECT_EQ(
allocator_memory_profile.profile_summary().peak_bytes_usage_lifetime(),
9500);
EXPECT_EQ(allocator_memory_profile.profile_summary()
.peak_stats()
.peak_bytes_in_use(),
7000);
EXPECT_EQ(allocator_memory_profile.profile_summary().peak_stats_time_ps(),
70000);
EXPECT_EQ(allocator_memory_profile.sampled_timeline_snapshots_size(), 3);
EXPECT_EQ(allocator_memory_profile.memory_profile_snapshots_size(), 1);
EXPECT_EQ(allocator_memory_profile.memory_profile_snapshots()
.at(0)
.activity_metadata()
.tf_op_name(),
"mul_grad/Sum");
EXPECT_EQ(allocator_memory_profile.active_allocations_size(), 3);
EXPECT_EQ(
allocator_memory_profile.active_allocations().at(2).snapshot_index(), 0);
EXPECT_EQ(allocator_memory_profile.special_allocations_size(), 2);
EXPECT_EQ(allocator_memory_profile.special_allocations().at(1).tf_op_name(),
"stack");
EXPECT_EQ(
allocator_memory_profile.special_allocations().at(1).allocation_bytes(),
2000);
}
}
}
} |
1,476 | cpp | tensorflow/tensorflow | dcn_utils | tensorflow/core/profiler/convert/dcn_utils.cc | tensorflow/core/profiler/convert/dcn_utils_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_CONVERT_DCN_UTILS_H_
#define TENSORFLOW_CORE_PROFILER_CONVERT_DCN_UTILS_H_
#include <string>
#include "tsl/profiler/utils/xplane_visitor.h"
namespace tensorflow {
namespace profiler {
enum DcnMessageValidity {
DCN_MESSAGE_VALID = 1,
DCN_MESSAGE_VALID_LOOPBACK = 2,
DCN_MESSAGE_INVALID_CLOCK_SKEW = 3,
DCN_MESSAGE_INVALID_BAD_KEY = 4
};
struct DcnMessage {
std::string collective_name = "";
int32_t slice_src = -1;
int32_t tpu_src = -1;
int32_t slice_dst = -1;
int32_t tpu_dst = -1;
uint64_t start_timestamp_ns = 0;
uint64_t end_timestamp_ns = 0;
uint64_t duration_us = 0;
size_t size_bytes = 0;
int32_t chunk_id = -1;
int32_t loop_index_id = -1;
DcnMessageValidity validity_info = DCN_MESSAGE_INVALID_BAD_KEY;
};
DcnMessage GetDcnMessageFromXEvent(
const tsl::profiler::XEventVisitor& event_visitor);
bool IsDcnEvent(const tsl::profiler::XEventVisitor& event);
}
}
#endif
#include "tensorflow/core/profiler/convert/dcn_utils.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "tsl/profiler/utils/xplane_schema.h"
#include "tsl/profiler/utils/xplane_visitor.h"
namespace tensorflow {
namespace profiler {
namespace {
using tsl::profiler::MicroToNano;
using tsl::profiler::StatType;
using tsl::profiler::XEventVisitor;
using tsl::profiler::XStatVisitor;
DcnMessage CreateDcnMessageFromStats(const XEventVisitor& event_visitor) {
DcnMessage dcn_message;
event_visitor.ForEachStat([&](const XStatVisitor& stat) {
if (!stat.Type()) return;
switch (static_cast<StatType>(*stat.Type())) {
case StatType::kDcnLabel: {
dcn_message.collective_name = stat.ToString();
break;
}
case StatType::kDcnSourceSliceId: {
dcn_message.slice_src = stat.IntValue();
break;
}
case StatType::kDcnSourcePerSliceDeviceId: {
dcn_message.tpu_src = stat.IntValue();
break;
}
case StatType::kDcnDestinationSliceId: {
dcn_message.slice_dst = stat.IntValue();
break;
}
case StatType::kDcnDestinationPerSliceDeviceId: {
dcn_message.tpu_dst = stat.IntValue();
break;
}
case StatType::kDcnChunk: {
dcn_message.chunk_id = stat.IntValue();
break;
}
case StatType::kDcnLoopIndex: {
dcn_message.loop_index_id = stat.IntValue();
break;
}
case StatType::kPayloadSizeBytes: {
dcn_message.size_bytes = stat.IntValue();
break;
}
case StatType::kDuration: {
dcn_message.duration_us = stat.IntOrUintValue();
dcn_message.start_timestamp_ns =
event_visitor.TimestampNs() - MicroToNano(dcn_message.duration_us);
dcn_message.end_timestamp_ns = event_visitor.TimestampNs();
break;
}
default:
break;
}
});
return dcn_message;
}
void SetMessageValidity(DcnMessage& dcn_message) {
if (dcn_message.collective_name.empty() || dcn_message.slice_src == -1 ||
dcn_message.tpu_src == -1 || dcn_message.slice_dst == -1 ||
dcn_message.tpu_dst == -1 || dcn_message.size_bytes == -1) {
dcn_message.validity_info = DCN_MESSAGE_INVALID_BAD_KEY;
} else if (dcn_message.duration_us == 0) {
dcn_message.validity_info = DCN_MESSAGE_INVALID_CLOCK_SKEW;
} else if (dcn_message.slice_src == dcn_message.slice_dst) {
dcn_message.validity_info = DCN_MESSAGE_VALID_LOOPBACK;
} else {
dcn_message.validity_info = DCN_MESSAGE_VALID;
}
}
}
DcnMessage GetDcnMessageFromXEvent(const XEventVisitor& event_visitor) {
DcnMessage dcn_message = CreateDcnMessageFromStats(event_visitor);
SetMessageValidity(dcn_message);
return dcn_message;
}
bool IsDcnEvent(const tsl::profiler::XEventVisitor& event) {
return absl::StartsWith(event.Name(), "MegaScale:");
}
}
} | #include "tensorflow/core/profiler/convert/dcn_utils.h"
#include <cstdint>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tsl/profiler/utils/tf_xplane_visitor.h"
#include "tsl/profiler/utils/xplane_builder.h"
#include "tsl/profiler/utils/xplane_schema.h"
namespace tensorflow {
namespace profiler {
namespace {
using tsl::profiler::kMegaScaleDcnReceive;
using tsl::profiler::XEventBuilder;
using tsl::profiler::XEventVisitor;
using tsl::profiler::XLineBuilder;
using tsl::profiler::XPlaneBuilder;
using tsl::profiler::XPlaneVisitor;
void PopulateXPlane(XPlane &xplane, absl::string_view event_name, int offset,
absl::string_view label, int64_t source_slice_id,
int64_t source_per_slice_device_id,
int64_t destination_slice_id,
int64_t destination_per_slice_device_id, int64_t chunk,
int64_t loop_index, int64_t payload_size,
int64_t duration) {
XPlaneBuilder xplane_builder(&xplane);
XEventMetadata *event_metadata = xplane_builder.GetOrCreateEventMetadata(1);
event_metadata->set_name(std::string(event_name));
XLineBuilder xline_builder = xplane_builder.GetOrCreateLine(0);
XEventBuilder event_builder = xline_builder.AddEvent(*event_metadata);
event_builder.SetOffsetNs(offset);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_label"), label);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_slice_id"),
source_slice_id);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_per_slice_device_id"),
source_per_slice_device_id);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_destination_slice_id"),
destination_slice_id);
event_builder.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
"dcn_destination_per_slice_device_id"),
destination_per_slice_device_id);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_chunk"), chunk);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_loop_index"), loop_index);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("duration_us"), duration);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"),
payload_size);
}
TEST(DcnUtilsTest, IsDcnEvent) {
XPlane xplane;
PopulateXPlane(xplane, kMegaScaleDcnReceive, 0, "test", 0, 0, 0, 0, 0, 0, 0,
0);
XLine line = xplane.lines()[0];
XPlaneVisitor xplane_visitor = tsl::profiler::CreateTfXPlaneVisitor(&xplane);
XEventVisitor visitor(&xplane_visitor, &line, &line.events()[0]);
EXPECT_TRUE(IsDcnEvent(visitor));
}
TEST(DcnUtilsTest, IsNotDcnEvent) {
XPlane xplane;
PopulateXPlane(xplane, "test", 0, "test", 0, 0, 0, 0, 0, 0, 0, 0);
XLine line = xplane.lines()[0];
XPlaneVisitor xplane_visitor = tsl::profiler::CreateTfXPlaneVisitor(&xplane);
XEventVisitor visitor(&xplane_visitor, &line, &line.events()[0]);
EXPECT_FALSE(IsDcnEvent(visitor));
}
TEST(DcnUtilsTest, GetDcnMessageFromXEvent) {
XPlane xplane;
PopulateXPlane(xplane, kMegaScaleDcnReceive, 100000, "all-reduce.273_312", 2,
3, 1, 3, 0, 24, 32768, 50);
XPlaneVisitor xplane_visitor = tsl::profiler::CreateTfXPlaneVisitor(&xplane);
XEventVisitor visitor(&xplane_visitor, &xplane.lines()[0],
&xplane.lines()[0].events()[0]);
EXPECT_THAT(GetDcnMessageFromXEvent(visitor),
testing::FieldsAre(
"all-reduce.273_312",
2, 3, 1, 3,
50000, 100000, 50,
32768, 0, 24,
DCN_MESSAGE_VALID));
}
TEST(DcnUtilsTest, GetDcnMessageFromXEventLoopBack) {
XPlane xplane;
PopulateXPlane(xplane, kMegaScaleDcnReceive, 5000000, "all-gather.1234", 2, 3,
2, 1, 4, 40, 1000, 1000);
XPlaneVisitor xplane_visitor = tsl::profiler::CreateTfXPlaneVisitor(&xplane);
XEventVisitor visitor(&xplane_visitor, &xplane.lines()[0],
&xplane.lines()[0].events()[0]);
EXPECT_THAT(GetDcnMessageFromXEvent(visitor),
testing::FieldsAre(
"all-gather.1234",
2, 3, 2, 1,
4000000, 5000000, 1000,
1000, 4, 40,
DCN_MESSAGE_VALID_LOOPBACK));
}
}
}
} |
1,477 | cpp | tensorflow/tensorflow | trace_events_to_json | third_party/xla/third_party/tsl/tsl/profiler/convert/trace_events_to_json.cc | third_party/xla/third_party/tsl/tsl/profiler/convert/trace_events_to_json_test.cc | #ifndef TENSORFLOW_TSL_PROFILER_CONVERT_TRACE_EVENTS_TO_JSON_H_
#define TENSORFLOW_TSL_PROFILER_CONVERT_TRACE_EVENTS_TO_JSON_H_
#include <string>
#include "tsl/platform/types.h"
#include "tsl/profiler/convert/trace_container.h"
namespace tsl {
namespace profiler {
std::string TraceContainerToJson(const TraceContainer& container);
}
}
#endif
#include "tsl/profiler/convert/trace_events_to_json.h"
#include <algorithm>
#include <string>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/strings/str_cat.h"
#include "json/json.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/types.h"
#include "tsl/profiler/protobuf/trace_events.pb.h"
#include "tsl/profiler/utils/format_utils.h"
#include "tsl/profiler/utils/math_utils.h"
namespace tsl {
namespace profiler {
namespace {
inline std::string PicosToMicrosString(uint64 ps) {
return MaxPrecision(PicoToMicro(ps));
}
inline std::string JsonString(const std::string& s) {
return Json::valueToQuotedString(s.c_str());
}
template <typename Map>
std::vector<const typename Map::value_type*> SortByKey(const Map& m) {
std::vector<const typename Map::value_type*> pairs;
pairs.reserve(m.size());
for (const auto& pair : m) {
pairs.push_back(&pair);
}
absl::c_sort(pairs, [](const typename Map::value_type* a,
const typename Map::value_type* b) {
return a->first < b->first;
});
return pairs;
}
inline void AddDeviceMetadata(uint32 device_id, const Device& device,
std::string* json) {
if (!device.name().empty()) {
absl::StrAppend(json, R"({"ph":"M","pid":)", device_id,
R"(,"name":"process_name","args":{"name":)",
JsonString(device.name()), "}},");
}
absl::StrAppend(json, R"({"ph":"M","pid":)", device_id,
R"(,"name":"process_sort_index","args":{"sort_index":)",
device_id, "}},");
}
inline void AddResourceMetadata(uint32 device_id, uint32 resource_id,
const Resource& resource, std::string* json) {
if (!resource.name().empty()) {
absl::StrAppend(json, R"({"ph":"M","pid":)", device_id, R"(,"tid":)",
resource_id, R"(,"name":"thread_name","args":{"name":)",
JsonString(resource.name()), "}},");
}
uint32 sort_index =
resource.sort_index() ? resource.sort_index() : resource_id;
absl::StrAppend(json, R"({"ph":"M","pid":)", device_id, R"(,"tid":)",
resource_id, R"(,"name":"thread_sort_index")",
R"(,"args":{"sort_index":)", sort_index, "}},");
}
inline void AddTraceEvent(const TraceEvent& event, string* json) {
auto duration_ps = std::max(event.duration_ps(), protobuf_uint64{1});
absl::StrAppend(json, R"({"ph":"X","pid":)", event.device_id(), R"(,"tid":)",
event.resource_id(), R"(,"ts":)",
PicosToMicrosString(event.timestamp_ps()), R"(,"dur":)",
PicosToMicrosString(duration_ps), R"(,"name":)",
JsonString(event.name()));
if (!event.args().empty()) {
absl::StrAppend(json, R"(,"args":{)");
for (const auto* arg : SortByKey(event.args())) {
absl::StrAppend(json, JsonString(arg->first), ":",
JsonString(arg->second), ",");
}
json->back() = '}';
}
absl::StrAppend(json, "},");
}
}
std::string TraceContainerToJson(const TraceContainer& container) {
std::string json =
R"({"displayTimeUnit":"ns","metadata":{"highres-ticks":true},)"
R"("traceEvents":[)";
for (const auto* id_and_device : SortByKey(container.trace().devices())) {
uint32 device_id = id_and_device->first;
const Device& device = id_and_device->second;
AddDeviceMetadata(device_id, device, &json);
for (const auto* id_and_resource : SortByKey(device.resources())) {
uint32 resource_id = id_and_resource->first;
const Resource& resource = id_and_resource->second;
AddResourceMetadata(device_id, resource_id, resource, &json);
}
}
for (const TraceEvent* const event : container.UnsortedEvents()) {
AddTraceEvent(*event, &json);
}
absl::StrAppend(&json, "{}]}");
return json;
}
}
} | #include "tsl/profiler/convert/trace_events_to_json.h"
#include <string>
#include "json/json.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/test.h"
#include "tsl/profiler/convert/trace_container.h"
#include "tsl/profiler/protobuf/trace_events.pb.h"
namespace tsl {
namespace profiler {
namespace {
Json::Value ToJsonValue(const std::string& json_str) {
Json::Value json;
Json::Reader reader;
EXPECT_TRUE(reader.parse(json_str, json));
return json;
}
TEST(TraceEventsToJson, JsonConversion) {
const std::string metadata_string = R"pb(
devices {
key: 2
value {
name: 'D2'
device_id: 2
resources {
key: 2
value { resource_id: 2 name: 'R2.2' }
}
}
}
devices {
key: 1
value {
name: 'D1'
device_id: 1
resources {
key: 2
value { resource_id: 1 name: 'R1.2' }
}
}
}
)pb";
TraceContainer container;
EXPECT_TRUE(container.ParseMetadataFromString(metadata_string));
TraceEvent* event = container.CreateEvent();
event->set_device_id(1);
event->set_resource_id(2);
event->set_name("E1.2.1");
event->set_timestamp_ps(100000);
event->set_duration_ps(10000);
event->mutable_args()->insert({"long_name", "E1.2.1 long"});
event->mutable_args()->insert({"arg2", "arg2 val"});
event = container.CreateEvent();
event->set_device_id(2);
event->set_resource_id(2);
event->set_name("E2.2.1 # \"comment\"");
event->set_timestamp_ps(105000);
container.CapEvents(2);
Json::Value json = ToJsonValue(TraceContainerToJson(container));
Json::Value expected_json = ToJsonValue(R"(
{
"displayTimeUnit": "ns",
"metadata": { "highres-ticks": true },
"traceEvents": [
{"ph":"M", "pid":1, "name":"process_name", "args":{"name":"D1"}},
{"ph":"M", "pid":1, "name":"process_sort_index", "args":{"sort_index":1}},
{"ph":"M", "pid":1, "tid":2, "name":"thread_name",
"args":{"name":"R1.2"}},
{"ph":"M", "pid":1, "tid":2, "name":"thread_sort_index",
"args":{"sort_index":2}},
{"ph":"M", "pid":2, "name":"process_name", "args":{"name":"D2"}},
{"ph":"M", "pid":2, "name":"process_sort_index", "args":{"sort_index":2}},
{"ph":"M", "pid":2, "tid":2, "name":"thread_name",
"args":{"name":"R2.2"}},
{"ph":"M", "pid":2, "tid":2, "name":"thread_sort_index",
"args":{"sort_index":2}},
{
"ph" : "X",
"pid" : 1,
"tid" : 2,
"name" : "E1.2.1",
"ts" : 0.1,
"dur" : 0.01,
"args" : {"arg2": "arg2 val", "long_name": "E1.2.1 long"}
},
{
"ph" : "X",
"pid" : 2,
"tid" : 2,
"name" : "E2.2.1 # \"comment\"",
"ts" : 0.105,
"dur" : 1e-6
},
{}
]
})");
EXPECT_EQ(json, expected_json);
}
}
}
} |
1,478 | cpp | tensorflow/tensorflow | trace_viewer_visibility | tensorflow/core/profiler/convert/trace_viewer/trace_viewer_visibility.cc | tensorflow/core/profiler/convert/trace_viewer/trace_viewer_visibility_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_CONVERT_TRACE_VIEWER_TRACE_VIEWER_VISIBILITY_H_
#define TENSORFLOW_CORE_PROFILER_CONVERT_TRACE_VIEWER_TRACE_VIEWER_VISIBILITY_H_
#include <cmath>
#include <cstdint>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/types/optional.h"
#include "tensorflow/core/profiler/convert/trace_viewer/trace_events_filter_interface.h"
#include "tensorflow/core/profiler/protobuf/trace_events.pb.h"
#include "tsl/profiler/utils/timespan.h"
namespace tensorflow {
namespace profiler {
class TraceViewerVisibility {
public:
explicit TraceViewerVisibility(tsl::profiler::Timespan visible_span,
uint64_t resolution_ps = 0);
bool Visible(const TraceEvent& event);
bool VisibleAtResolution(const TraceEvent& event);
void SetVisibleAtResolution(const TraceEvent& event);
tsl::profiler::Timespan VisibleSpan() const { return visible_span_; }
uint64_t ResolutionPs() const { return resolution_ps_; }
private:
using RowId = std::pair<uint32_t , uint32_t >;
using CounterRowId = std::pair<uint32_t , std::string >;
class RowVisibility {
public:
size_t Depth(uint64_t begin_timestamp_ps) const;
std::optional<uint64_t> LastEndTimestampPs(size_t depth) const {
std::optional<uint64_t> result;
if (depth < last_end_timestamp_ps_.size()) {
result = last_end_timestamp_ps_[depth];
}
return result;
}
std::optional<uint64_t> LastFlowTimestampPs() const {
return last_flow_timestamp_ps_;
}
void SetLastEndTimestampPs(size_t depth, uint64_t timestamp_ps) {
last_end_timestamp_ps_.resize(depth);
last_end_timestamp_ps_.push_back(timestamp_ps);
}
void SetLastFlowTimestampPs(uint64_t timestamp_ps) {
last_flow_timestamp_ps_ = timestamp_ps;
}
private:
std::vector<uint64_t> last_end_timestamp_ps_;
std::optional<uint64_t> last_flow_timestamp_ps_;
};
tsl::profiler::Timespan visible_span_;
uint64_t resolution_ps_;
absl::flat_hash_map<RowId, RowVisibility> rows_;
absl::flat_hash_map<uint64_t , bool> flows_;
absl::flat_hash_map<CounterRowId, uint64_t> last_counter_timestamp_ps_;
};
class TraceVisibilityFilter : public TraceEventsFilterInterface {
public:
TraceVisibilityFilter(tsl::profiler::Timespan visible_span, double resolution)
: resolution_(resolution),
visibility_(visible_span, ResolutionPs(visible_span.duration_ps())) {}
tsl::profiler::Timespan VisibleSpan() const {
return visibility_.VisibleSpan();
}
uint64_t ResolutionPs() const { return visibility_.ResolutionPs(); }
void SetUp(const Trace& trace) override {
tsl::profiler::Timespan visible_span = VisibleSpan();
uint64_t start_time_ps = visible_span.begin_ps();
uint64_t end_time_ps = visible_span.end_ps();
if (end_time_ps == 0 && trace.has_max_timestamp_ps()) {
end_time_ps = trace.max_timestamp_ps();
}
if (start_time_ps == 0 && trace.has_min_timestamp_ps()) {
start_time_ps = trace.min_timestamp_ps();
}
visible_span =
tsl::profiler::Timespan::FromEndPoints(start_time_ps, end_time_ps);
visibility_ = TraceViewerVisibility(
visible_span, ResolutionPs(visible_span.duration_ps()));
}
void UpdateVisibility(double resolution) {
resolution_ = resolution;
visibility_ = TraceViewerVisibility(
visibility_.VisibleSpan(),
ResolutionPs(visibility_.VisibleSpan().duration_ps()));
}
bool Filter(const TraceEvent& event) override {
return !visibility_.Visible(event);
}
private:
uint64_t ResolutionPs(uint64_t duration_ps) {
return (resolution_ == 0.0) ? 0 : std::llround(duration_ps / resolution_);
}
double resolution_;
TraceViewerVisibility visibility_;
};
}
}
#endif
#include "tensorflow/core/profiler/convert/trace_viewer/trace_viewer_visibility.h"
#include <cstdint>
#include "tsl/profiler/utils/timespan.h"
namespace tensorflow {
namespace profiler {
TraceViewerVisibility::TraceViewerVisibility(
tsl::profiler::Timespan visible_span, uint64_t resolution_ps)
: visible_span_(visible_span), resolution_ps_(resolution_ps) {}
bool TraceViewerVisibility::Visible(const TraceEvent& event) {
if (visible_span_.Instant()) return true;
tsl::profiler::Timespan span(event.timestamp_ps(), event.duration_ps());
if (!visible_span_.Overlaps(span)) return false;
if (resolution_ps_ == 0) return true;
return VisibleAtResolution(event);
}
bool TraceViewerVisibility::VisibleAtResolution(const TraceEvent& event) {
DCHECK_NE(resolution_ps_, 0);
if (!event.has_resource_id()) {
#if 1
return true;
#else
CounterRowId counter_row_id(event.device_id(), event.name());
auto iter = last_counter_timestamp_ps_.find(counter_row_id);
bool found = (iter != last_counter_timestamp_ps_.end());
bool visible =
!found || ((event.timestamp_ps() - iter->second) >= resolution_ps_);
if (visible) {
if (found) {
iter->second = event.timestamp_ps();
} else {
last_counter_timestamp_ps_.emplace(counter_row_id,
event.timestamp_ps());
}
}
return visible;
#endif
}
tsl::profiler::Timespan span(event.timestamp_ps(), event.duration_ps());
bool visible = (span.duration_ps() >= resolution_ps_);
auto& row = rows_[RowId(event.device_id(), event.resource_id())];
size_t depth = row.Depth(span.begin_ps());
if (!visible) {
auto last_end_timestamp_ps = row.LastEndTimestampPs(depth);
visible = !last_end_timestamp_ps ||
(span.begin_ps() - *last_end_timestamp_ps >= resolution_ps_);
}
if (event.has_flow_id()) {
auto result = flows_.try_emplace(event.flow_id(), visible);
if (!visible) {
if (result.second) {
auto last_flow_timestamp_ps = row.LastFlowTimestampPs();
result.first->second =
!last_flow_timestamp_ps ||
(span.end_ps() - *last_flow_timestamp_ps >= resolution_ps_);
}
visible = result.first->second;
}
if (event.flow_entry_type() == TraceEvent::FLOW_END) {
flows_.erase(result.first);
}
if (visible) {
row.SetLastFlowTimestampPs(span.end_ps());
}
}
if (visible) {
row.SetLastEndTimestampPs(depth, span.end_ps());
}
return visible;
}
void TraceViewerVisibility::SetVisibleAtResolution(const TraceEvent& event) {
DCHECK_NE(resolution_ps_, 0);
if (!event.has_resource_id()) {
CounterRowId counter_row_id(event.device_id(), event.name());
last_counter_timestamp_ps_.insert_or_assign(counter_row_id,
event.timestamp_ps());
} else {
tsl::profiler::Timespan span(event.timestamp_ps(), event.duration_ps());
auto& row = rows_[RowId(event.device_id(), event.resource_id())];
if (event.has_flow_id()) {
if (event.flow_entry_type() == TraceEvent::FLOW_END) {
flows_.erase(event.flow_id());
} else {
flows_.try_emplace(event.flow_id(), true);
}
row.SetLastFlowTimestampPs(span.end_ps());
}
size_t depth = row.Depth(span.begin_ps());
row.SetLastEndTimestampPs(depth, span.end_ps());
}
}
size_t TraceViewerVisibility::RowVisibility::Depth(
uint64_t begin_timestamp_ps) const {
size_t depth = 0;
for (; depth < last_end_timestamp_ps_.size(); ++depth) {
if (last_end_timestamp_ps_[depth] <= begin_timestamp_ps) break;
}
return depth;
}
}
} | #include "tensorflow/core/profiler/convert/trace_viewer/trace_viewer_visibility.h"
#include <cstdint>
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/protobuf/trace_events.pb.h"
#include "tsl/profiler/utils/timespan.h"
namespace tensorflow {
namespace profiler {
namespace {
using tsl::profiler::Timespan;
constexpr uint32_t kDeviceId = 10;
constexpr uint32_t kResourceId = 1;
constexpr uint32_t kSrcResourceId = 2;
constexpr uint32_t kDstResourceId = 4;
TraceEvent Complete(Timespan span, uint32_t resource_id = kResourceId) {
TraceEvent event;
event.set_device_id(kDeviceId);
event.set_resource_id(resource_id);
event.set_timestamp_ps(span.begin_ps());
event.set_duration_ps(span.duration_ps());
return event;
}
TraceEvent Counter(uint64_t time_ps) {
TraceEvent event;
event.set_device_id(kDeviceId);
event.set_timestamp_ps(time_ps);
return event;
}
TraceEvent Flow(Timespan span, uint64_t flow_id, uint32_t resource_id) {
TraceEvent event;
event.set_flow_id(flow_id);
event.set_device_id(kDeviceId);
event.set_resource_id(resource_id);
event.set_timestamp_ps(span.begin_ps());
event.set_duration_ps(span.duration_ps());
return event;
}
TEST(TraceViewerVisibilityTest, VisibilityNoDownsampling) {
TraceViewerVisibility v(Timespan(1000, 1000));
EXPECT_FALSE(v.Visible(Complete(Timespan(999))));
EXPECT_TRUE(v.Visible(Complete(Timespan(1000))));
EXPECT_TRUE(v.Visible(Complete(Timespan(1500))));
EXPECT_TRUE(v.Visible(Complete(Timespan(2000))));
EXPECT_FALSE(v.Visible(Complete(Timespan(2001))));
EXPECT_FALSE(v.Visible(Complete(Timespan(900, 99))));
EXPECT_TRUE(v.Visible(Complete(Timespan(900, 100))));
EXPECT_TRUE(v.Visible(Complete(Timespan(1450, 100))));
EXPECT_TRUE(v.Visible(Complete(Timespan(2000, 50))));
EXPECT_FALSE(v.Visible(Complete(Timespan(2001, 50))));
}
TEST(TraceViewerVisibilityTest, DISABLED_CounterEventsDownsampling) {
TraceViewerVisibility v(Timespan(1000, 1000), 100);
EXPECT_FALSE(v.Visible(Counter(999)));
EXPECT_TRUE(v.Visible(Counter(1000)));
EXPECT_FALSE(v.Visible(Counter(1099)));
EXPECT_TRUE(v.Visible(Counter(1100)));
EXPECT_TRUE(v.Visible(Counter(2000)));
EXPECT_FALSE(v.Visible(Counter(2001)));
}
TEST(TraceViewerVisibilityTest, CompleteEventsDownsampling) {
TraceViewerVisibility v(Timespan(1000, 1000), 100);
EXPECT_TRUE(v.Visible(Complete(Timespan(950, 50))));
EXPECT_FALSE(v.Visible(Complete(Timespan(1050, 50))));
EXPECT_TRUE(v.Visible(Complete(Timespan(1055, 200))));
EXPECT_TRUE(v.Visible(Complete(Timespan(1355, 50))));
}
TEST(TraceViewerVisibilityTest, CompleteNestedEventsDownsampling) {
TraceViewerVisibility v(Timespan(1000, 1000), 100);
EXPECT_TRUE(v.Visible(Complete(Timespan(1000, 200))));
EXPECT_TRUE(v.Visible(Complete(Timespan(1200, 190))));
EXPECT_TRUE(v.Visible(Complete(Timespan(1250, 20))));
EXPECT_FALSE(v.Visible(Complete(Timespan(1270, 20))));
EXPECT_TRUE(v.Visible(Complete(Timespan(1290, 100))));
}
TEST(TraceViewerVisibilityTest, FlowEventsDownsampling) {
TraceViewerVisibility v(Timespan(1000, 1000), 100);
EXPECT_TRUE(v.Visible(Flow(Timespan(1000, 50), 1, kSrcResourceId)));
EXPECT_FALSE(v.Visible(Flow(Timespan(1050, 50), 2, kSrcResourceId)));
EXPECT_TRUE(v.Visible(Flow(Timespan(1100, 50), 3, kSrcResourceId)));
EXPECT_TRUE(v.Visible(Flow(Timespan(1100, 50), 1, kDstResourceId)));
EXPECT_FALSE(v.Visible(Flow(Timespan(1200, 52), 2, kDstResourceId)));
EXPECT_TRUE(v.Visible(Flow(Timespan(1252, 10), 3, kDstResourceId)));
EXPECT_TRUE(v.Visible(Complete(Timespan(1300, 50))));
EXPECT_FALSE(v.Visible(Complete(Timespan(1350, 50))));
EXPECT_FALSE(v.Visible(Complete(Timespan(1400, 50))));
EXPECT_TRUE(v.Visible(Flow(Timespan(1600, 50), 4, kResourceId)));
EXPECT_TRUE(v.Visible(Flow(Timespan(1700, 52), 5, kResourceId)));
EXPECT_FALSE(v.Visible(Flow(Timespan(1752, 10), 6, kResourceId)));
}
}
}
} |
1,479 | cpp | tensorflow/tensorflow | update_api_def | tensorflow/core/api_def/update_api_def.cc | tensorflow/core/api_def/update_api_def_test.cc | #ifndef TENSORFLOW_CORE_API_DEF_UPDATE_API_DEF_H_
#define TENSORFLOW_CORE_API_DEF_UPDATE_API_DEF_H_
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
string CreateApiDef(const OpDef& op);
string RemoveDoc(const OpDef& op, const string& file_contents,
size_t start_location);
void CreateApiDefs(const OpList& ops, const string& api_def_dir,
const string& op_file_pattern);
}
#endif
#include "tensorflow/core/api_def/update_api_def.h"
#include <ctype.h>
#include <algorithm>
#include <string>
#include <vector>
#include "tensorflow/core/api_def/excluded_ops.h"
#include "tensorflow/core/framework/api_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/op_gen_lib.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace {
constexpr char kApiDefFileFormat[] = "api_def_%s.pbtxt";
constexpr char kDocStart[] = ".Doc(R\"doc(";
constexpr char kDocEnd[] = ")doc\")";
void FillBaseApiDef(ApiDef* api_def, const OpDef& op) {
api_def->set_graph_op_name(op.name());
for (auto& input_arg : op.input_arg()) {
if (!input_arg.description().empty()) {
auto* api_def_in_arg = api_def->add_in_arg();
api_def_in_arg->set_name(input_arg.name());
api_def_in_arg->set_description(input_arg.description());
}
}
for (auto& output_arg : op.output_arg()) {
if (!output_arg.description().empty()) {
auto* api_def_out_arg = api_def->add_out_arg();
api_def_out_arg->set_name(output_arg.name());
api_def_out_arg->set_description(output_arg.description());
}
}
for (auto& attr : op.attr()) {
if (!attr.description().empty()) {
auto* api_def_attr = api_def->add_attr();
api_def_attr->set_name(attr.name());
api_def_attr->set_description(attr.description());
}
}
api_def->set_summary(op.summary());
api_def->set_description(op.description());
}
bool OpHasDocs(const OpDef& op) {
if (!op.summary().empty() || !op.description().empty()) {
return true;
}
for (const auto& arg : op.input_arg()) {
if (!arg.description().empty()) {
return true;
}
}
for (const auto& arg : op.output_arg()) {
if (!arg.description().empty()) {
return true;
}
}
for (const auto& attr : op.attr()) {
if (!attr.description().empty()) {
return true;
}
}
return false;
}
bool CheckDocsMatch(const OpDef& op1, const OpDef& op2) {
if (op1.summary() != op2.summary() ||
op1.description() != op2.description() ||
op1.input_arg_size() != op2.input_arg_size() ||
op1.output_arg_size() != op2.output_arg_size() ||
op1.attr_size() != op2.attr_size()) {
return false;
}
for (int i = 0; i < op1.input_arg_size(); ++i) {
if (op1.input_arg(i).description() != op2.input_arg(i).description()) {
return false;
}
}
for (int i = 0; i < op1.output_arg_size(); ++i) {
if (op1.output_arg(i).description() != op2.output_arg(i).description()) {
return false;
}
}
for (int i = 0; i < op1.attr_size(); ++i) {
if (op1.attr(i).description() != op2.attr(i).description()) {
return false;
}
}
return true;
}
bool ValidateOpDocs(const OpDef& op, const string& doc) {
OpDefBuilder b(op.name());
for (const auto& arg : op.input_arg()) {
b.Input(arg.name() + ":string");
}
for (const auto& arg : op.output_arg()) {
b.Output(arg.name() + ":string");
}
for (const auto& attr : op.attr()) {
b.Attr(attr.name() + ":string");
}
b.Doc(doc);
OpRegistrationData op_reg_data;
TF_CHECK_OK(b.Finalize(&op_reg_data));
return CheckDocsMatch(op, op_reg_data.op_def);
}
}
string RemoveDoc(const OpDef& op, const string& file_contents,
size_t start_location) {
const auto doc_start_location = file_contents.find(kDocStart, start_location);
const string format_error = strings::Printf(
"Could not find %s doc for removal. Make sure the doc is defined with "
"'%s' prefix and '%s' suffix or remove the doc manually.",
op.name().c_str(), kDocStart, kDocEnd);
if (doc_start_location == string::npos) {
std::cerr << format_error << std::endl;
LOG(ERROR) << "Didn't find doc start";
return file_contents;
}
const auto doc_end_location = file_contents.find(kDocEnd, doc_start_location);
if (doc_end_location == string::npos) {
LOG(ERROR) << "Didn't find doc start";
std::cerr << format_error << std::endl;
return file_contents;
}
const auto doc_start_size = sizeof(kDocStart) - 1;
string doc_text = file_contents.substr(
doc_start_location + doc_start_size,
doc_end_location - doc_start_location - doc_start_size);
if (!ValidateOpDocs(op, doc_text)) {
LOG(ERROR) << "Invalid doc: " << doc_text;
std::cerr << format_error << std::endl;
return file_contents;
}
auto before_doc = file_contents.substr(0, doc_start_location);
absl::StripTrailingAsciiWhitespace(&before_doc);
return before_doc +
file_contents.substr(doc_end_location + sizeof(kDocEnd) - 1);
}
namespace {
void RemoveDocs(const std::vector<const OpDef*>& ops,
const std::vector<string>& op_files) {
std::set<string> processed_ops;
for (const auto& file : op_files) {
string file_contents;
bool file_contents_updated = false;
TF_CHECK_OK(ReadFileToString(Env::Default(), file, &file_contents));
for (auto op : ops) {
if (processed_ops.find(op->name()) != processed_ops.end()) {
continue;
}
string register_call =
strings::Printf("REGISTER_OP(\"%s\")", op->name().c_str());
const auto register_call_location = file_contents.find(register_call);
if (register_call_location == string::npos) {
continue;
}
std::cout << "Removing .Doc call for " << op->name() << " from " << file
<< "." << std::endl;
file_contents = RemoveDoc(*op, file_contents, register_call_location);
file_contents_updated = true;
processed_ops.insert(op->name());
}
if (file_contents_updated) {
TF_CHECK_OK(WriteStringToFile(Env::Default(), file, file_contents))
<< "Could not remove .Doc calls in " << file
<< ". Make sure the file is writable.";
}
}
}
}
string CreateApiDef(const OpDef& op) {
ApiDefs api_defs;
FillBaseApiDef(api_defs.add_op(), op);
const std::vector<string> multi_line_fields = {"description"};
std::string new_api_defs_str;
::tensorflow::protobuf::TextFormat::PrintToString(api_defs,
&new_api_defs_str);
return PBTxtToMultiline(new_api_defs_str, multi_line_fields);
}
void CreateApiDefs(const OpList& ops, const string& api_def_dir,
const string& op_file_pattern) {
auto* excluded_ops = GetExcludedOps();
std::vector<const OpDef*> new_ops_with_docs;
for (const auto& op : ops.op()) {
if (excluded_ops->find(op.name()) != excluded_ops->end()) {
continue;
}
string file_path =
io::JoinPath(tensorflow::string(api_def_dir), kApiDefFileFormat);
file_path = strings::Printf(file_path.c_str(), op.name().c_str());
if (!Env::Default()->FileExists(file_path).ok()) {
std::cout << "Creating ApiDef file " << file_path << std::endl;
const auto& api_def_text = CreateApiDef(op);
TF_CHECK_OK(WriteStringToFile(Env::Default(), file_path, api_def_text));
if (OpHasDocs(op)) {
new_ops_with_docs.push_back(&op);
}
}
}
if (!op_file_pattern.empty()) {
std::vector<string> op_files;
TF_CHECK_OK(Env::Default()->GetMatchingPaths(op_file_pattern, &op_files));
RemoveDocs(new_ops_with_docs, op_files);
}
}
} | #include "tensorflow/core/api_def/update_api_def.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(UpdateApiDefTest, TestRemoveDocSingleOp) {
const string op_def_text = R"opdef(
REGISTER_OP("Op1")
.Input("a: T")
.Output("output: T")
.Attr("b: type")
.SetShapeFn(shape_inference::UnchangedShape);
)opdef";
const string op_def_text_with_doc = R"opdef(
REGISTER_OP("Op1")
.Input("a: T")
.Output("output: T")
.Attr("b: type")
.SetShapeFn(shape_inference::UnchangedShape)
.Doc(R"doc(
Summary for Op1.
Description
for Op1.
b : Description for b.
a: Description for a.
output: Description for output.
)doc");
)opdef";
const string op_text = R"(
name: "Op1"
input_arg {
name: "a"
description: "Description for a."
}
output_arg {
name: "output"
description: "Description for output."
}
attr {
name: "b"
description: "Description for b."
}
summary: "Summary for Op1."
description: "Description\nfor Op1."
)";
OpDef op;
protobuf::TextFormat::ParseFromString(op_text, &op);
EXPECT_EQ(op_def_text,
RemoveDoc(op, op_def_text_with_doc, 0 ));
}
TEST(UpdateApiDefTest, TestRemoveDocMultipleOps) {
const string op_def_text = R"opdef(
REGISTER_OP("Op1")
.Input("a: T")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("Op2")
.Input("a: T")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("Op3")
.Input("c: T")
.SetShapeFn(shape_inference::UnchangedShape);
)opdef";
const string op_def_text_with_doc = R"opdef(
REGISTER_OP("Op1")
.Input("a: T")
.Doc(R"doc(
Summary for Op1.
)doc")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("Op2")
.Input("a: T")
.SetShapeFn(shape_inference::UnchangedShape)
.Doc(R"doc(
Summary for Op2.
)doc");
REGISTER_OP("Op3")
.Input("c: T")
.SetShapeFn(shape_inference::UnchangedShape)
.Doc(R"doc(
Summary for Op3.
)doc");
)opdef";
const string op1_text = R"(
name: "Op1"
input_arg {
name: "a"
}
summary: "Summary for Op1."
)";
const string op2_text = R"(
name: "Op2"
input_arg {
name: "a"
}
summary: "Summary for Op2."
)";
const string op3_text = R"(
name: "Op3"
input_arg {
name: "c"
}
summary: "Summary for Op3."
)";
OpDef op1, op2, op3;
protobuf::TextFormat::ParseFromString(op1_text, &op1);
protobuf::TextFormat::ParseFromString(op2_text, &op2);
protobuf::TextFormat::ParseFromString(op3_text, &op3);
string updated_text =
RemoveDoc(op2, op_def_text_with_doc,
op_def_text_with_doc.find("Op2") );
EXPECT_EQ(string::npos, updated_text.find("Summary for Op2"));
EXPECT_NE(string::npos, updated_text.find("Summary for Op1"));
EXPECT_NE(string::npos, updated_text.find("Summary for Op3"));
updated_text = RemoveDoc(op3, updated_text,
updated_text.find("Op3") );
updated_text = RemoveDoc(op1, updated_text,
updated_text.find("Op1") );
EXPECT_EQ(op_def_text, updated_text);
}
TEST(UpdateApiDefTest, TestCreateApiDef) {
const string op_text = R"(
name: "Op1"
input_arg {
name: "a"
description: "Description for a."
}
output_arg {
name: "output"
description: "Description for output."
}
attr {
name: "b"
description: "Description for b."
}
summary: "Summary for Op1."
description: "Description\nfor Op1."
)";
OpDef op;
protobuf::TextFormat::ParseFromString(op_text, &op);
const string expected_api_def = R"(op {
graph_op_name: "Op1"
in_arg {
name: "a"
description: <<END
Description for a.
END
}
out_arg {
name: "output"
description: <<END
Description for output.
END
}
attr {
name: "b"
description: <<END
Description for b.
END
}
summary: "Summary for Op1."
description: <<END
Description
for Op1.
END
}
)";
EXPECT_EQ(expected_api_def, CreateApiDef(op));
}
}
} |
1,480 | cpp | tensorflow/tensorflow | tpu_embedding_errors | tensorflow/core/tpu/tpu_embedding_errors.cc | tensorflow/core/tpu/tpu_embedding_errors_test.cc | #ifndef TENSORFLOW_CORE_TPU_TPU_EMBEDDING_ERRORS_H_
#define TENSORFLOW_CORE_TPU_TPU_EMBEDDING_ERRORS_H_
#include <string>
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/protobuf/tpu/tpu_embedding_configuration.pb.h"
namespace tensorflow::tpu {
constexpr absl::string_view kTpuEmbeddingErrorUrl =
"type.googleapis.com/tensorflow.tpu.TPUEmbeddingError";
constexpr absl::string_view kTpuEmbeddingErrorMessage =
"TPUEmbedding permanent error";
Status AppendTpuEmbeddingErrorPayload(Status obj);
template <typename T>
StatusOr<T> AppendTpuEmbeddingErrorPayload(StatusOr<T> obj) {
if (obj.ok()) {
return std::move(obj.value());
} else {
const std::string error_message =
absl::StrCat(kTpuEmbeddingErrorMessage, ". ", obj.status().message());
Status status(obj.status().code(), error_message);
TPUEmbeddingError error_payload;
status.SetPayload(kTpuEmbeddingErrorUrl,
absl::Cord(error_payload.SerializeAsString()));
return status;
}
}
bool HasTpuEmbeddingErrorPayload(const Status& status);
bool HasTpuEmbeddingErrorMessage(const Status& status);
}
#endif
#include "tensorflow/core/tpu/tpu_embedding_errors.h"
#include <string>
#include "absl/strings/cord.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/tpu/tpu_embedding_configuration.pb.h"
namespace tensorflow::tpu {
Status AppendTpuEmbeddingErrorPayload(Status obj) {
if (obj.ok()) {
return absl::OkStatus();
} else {
const std::string error_message =
absl::StrCat(kTpuEmbeddingErrorMessage, ". ", obj.message());
Status status(obj.code(), error_message);
TPUEmbeddingError error_payload;
status.SetPayload(kTpuEmbeddingErrorUrl,
absl::Cord(error_payload.SerializeAsString()));
return status;
}
}
bool HasTpuEmbeddingErrorPayload(const Status& status) {
return status.GetPayload(kTpuEmbeddingErrorUrl).has_value();
}
bool HasTpuEmbeddingErrorMessage(const Status& status) {
return absl::StrContains(status.message(), kTpuEmbeddingErrorMessage);
}
} | #include "tensorflow/core/tpu/tpu_embedding_errors.h"
#include <string>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/errors.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow::tpu {
namespace {
using absl::Status;
using absl::StatusOr;
StatusOr<std::string> GenerateTFStatusOr(absl::StatusCode code,
absl::string_view value = "") {
if (code == absl::StatusCode::kOk) {
return std::string(value);
} else {
return absl::Status(code, value);
}
}
TEST(TpuEmbeddingErrors, StatusOk) {
constexpr absl::string_view kValue = "success";
{
const Status status = AppendTpuEmbeddingErrorPayload(absl::OkStatus());
TF_EXPECT_OK(status);
EXPECT_FALSE(HasTpuEmbeddingErrorPayload(status));
EXPECT_FALSE(HasTpuEmbeddingErrorMessage(status));
}
{
TF_ASSERT_OK_AND_ASSIGN(const std::string value,
AppendTpuEmbeddingErrorPayload(GenerateTFStatusOr(
absl::StatusCode::kOk, kValue)));
EXPECT_EQ(value, kValue);
}
}
TEST(TpuEmbeddingErrors, StatusFailed) {
{
const Status status =
AppendTpuEmbeddingErrorPayload(errors::InvalidArgument(""));
EXPECT_EQ(status.code(), error::Code::INVALID_ARGUMENT);
EXPECT_TRUE(HasTpuEmbeddingErrorPayload(status));
EXPECT_TRUE(HasTpuEmbeddingErrorMessage(status));
}
{
StatusOr<std::string> status_or = AppendTpuEmbeddingErrorPayload(
GenerateTFStatusOr(absl::StatusCode::kResourceExhausted));
EXPECT_FALSE(status_or.ok());
const Status& status = status_or.status();
EXPECT_EQ(status.code(), error::Code::RESOURCE_EXHAUSTED);
EXPECT_TRUE(HasTpuEmbeddingErrorPayload(status));
EXPECT_TRUE(HasTpuEmbeddingErrorMessage(status));
}
}
}
} |
1,481 | cpp | tensorflow/tensorflow | tpu_embedding_configuration_proto_rewrite | tensorflow/core/tpu/tpu_embedding_configuration_proto_rewrite.cc | tensorflow/core/tpu/tpu_embedding_configuration_proto_rewrite_test.cc | #ifndef TENSORFLOW_CORE_TPU_TPU_EMBEDDING_CONFIGURATION_PROTO_REWRITE_H_
#define TENSORFLOW_CORE_TPU_TPU_EMBEDDING_CONFIGURATION_PROTO_REWRITE_H_
#include "absl/status/status.h"
#include "tensorflow/core/protobuf/tpu/tpu_embedding_configuration.pb.h"
namespace tensorflow {
absl::Status PopulateMissingFieldsInTPUEmbeddingConfig(
tpu::TPUEmbeddingConfiguration* config);
}
#endif
#include "tensorflow/core/tpu/tpu_embedding_configuration_proto_rewrite.h"
#include <cstdint>
#include <functional>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "tensorflow/core/lib/math/math_util.h"
#include "tensorflow/core/protobuf/tpu/tpu_embedding_configuration.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace tensorflow {
namespace {
absl::Status ValidateBatchSizeAndFeatureCounts(
const tpu::TPUEmbeddingConfiguration& config) {
if (config.batch_size_per_tensor_core() <= 0) {
return absl::InvalidArgumentError(absl::StrFormat(
"Invalid batch_size_per_tensor_core: %d found in the TPU embedding "
"configuration. Valid values are >0.",
config.batch_size_per_tensor_core()));
}
for (const auto& table_config : config.table_descriptor()) {
if (table_config.num_features() <= 0) {
return absl::InvalidArgumentError(absl::StrFormat(
"Invalid num_features: %d found for table: %s in the TPU embedding "
"configuration. Valid values are >0.",
table_config.num_features(), table_config.name()));
}
}
return absl::OkStatus();
}
absl::Status ValidateBatchSizeAndFeatureCountsAreEmpty(
const tpu::TPUEmbeddingConfiguration& config) {
if (config.batch_size_per_tensor_core() != 0) {
return absl::InvalidArgumentError(
"Invalid TPU embedding configuration. The batch_size_per_tensor_core "
"field must NOT be populated when the feature_descriptor fields are "
"filled in.");
}
for (const auto& table_config : config.table_descriptor()) {
if (table_config.num_features() != 0) {
return absl::InvalidArgumentError(absl::StrFormat(
"Invalid TPU embedding configuration. The "
"TableDescriptor.num_features field must NOT be populated when the "
"feature_descriptor fields are filled in, num_features is set to %d "
"for table %s.",
table_config.num_features(), table_config.name()));
}
}
return absl::OkStatus();
}
absl::Status ValidateFeatureDescriptors(
const tpu::TPUEmbeddingConfiguration& config) {
const int table_count = config.table_descriptor_size();
std::vector<bool> tables_present(table_count, false);
for (const auto& feature_config : config.feature_descriptor()) {
const int table_id = feature_config.table_id();
const auto& input_shape = feature_config.input_shape();
if (table_id < 0 || table_id >= table_count) {
return absl::InvalidArgumentError(absl::StrFormat(
"Invalid table_id: %d found in feature_descriptor: %s, all table_ids "
"must be in the range[0, %d)",
table_id, feature_config.ShortDebugString(), table_count));
}
if (input_shape.empty()) {
return absl::InvalidArgumentError(absl::StrFormat(
"The input_shape field cannot be empty in feature_descriptor: %s",
feature_config.ShortDebugString()));
}
for (const int dim_size : input_shape) {
if (dim_size <= 0) {
return absl::InvalidArgumentError(absl::StrFormat(
"The input_shape dimension sizes must all be >0 in "
"feature_descriptor: %s, found dimension size set to %d",
feature_config.ShortDebugString(), dim_size));
}
}
tables_present[table_id] = true;
}
for (int table_id = 0; table_id < table_count; ++table_id) {
if (!tables_present[table_id]) {
return absl::InvalidArgumentError(absl::StrFormat(
"No feature_descriptor fields found for table: %s (ID: %d) in "
"the TPU embedding configuration.",
config.table_descriptor(table_id).name(), table_id));
}
}
return absl::OkStatus();
}
void PopulateFeatureDescriptors(tpu::TPUEmbeddingConfiguration* config) {
for (int table_id = 0; table_id < config->table_descriptor_size();
++table_id) {
tpu::TPUEmbeddingConfiguration::FeatureDescriptor* feature_descriptor =
config->add_feature_descriptor();
feature_descriptor->set_table_id(table_id);
feature_descriptor->add_input_shape(
config->batch_size_per_tensor_core() *
config->table_descriptor(table_id).num_features());
}
}
std::vector<int> ComputeInputFeatureBatchSizes(
const tpu::TPUEmbeddingConfiguration& config) {
std::vector<int32_t> input_feature_batch_sizes;
for (int i = 0; i < config.feature_descriptor_size(); ++i) {
const int32_t batch_size =
absl::c_accumulate(config.feature_descriptor(i).input_shape(),
1, std::multiplies<>());
input_feature_batch_sizes.push_back(batch_size);
}
return input_feature_batch_sizes;
}
int ComputeBatchSizePerTensorCore(
absl::Span<const int> input_feature_batch_sizes) {
uint32_t batch_size = input_feature_batch_sizes[0];
for (const uint32_t input_feature_batch_size : input_feature_batch_sizes) {
batch_size =
tensorflow::MathUtil::GCD(batch_size, input_feature_batch_size);
}
return batch_size;
}
std::vector<int> ComputeTpuFeatureCounts(
const tpu::TPUEmbeddingConfiguration& config,
absl::Span<const int> input_feature_batch_sizes,
int batch_size_per_tensor_core) {
DCHECK_EQ(input_feature_batch_sizes.size(), config.feature_descriptor_size());
std::vector<int> tpu_feature_counts(config.table_descriptor_size(), 0);
for (int i = 0; i < config.feature_descriptor_size(); ++i) {
DCHECK_EQ(input_feature_batch_sizes[i] % batch_size_per_tensor_core, 0);
tpu_feature_counts[config.feature_descriptor(i).table_id()] +=
(input_feature_batch_sizes[i] / batch_size_per_tensor_core);
}
return tpu_feature_counts;
}
void PopulateBatchSizeAndFeatureCounts(tpu::TPUEmbeddingConfiguration* config) {
const std::vector<int> input_feature_batch_sizes =
ComputeInputFeatureBatchSizes(*config);
const int batch_size_per_tensor_core =
ComputeBatchSizePerTensorCore(input_feature_batch_sizes);
const std::vector<int> tpu_feature_counts = ComputeTpuFeatureCounts(
*config, input_feature_batch_sizes, batch_size_per_tensor_core);
config->set_batch_size_per_tensor_core(batch_size_per_tensor_core);
for (int table_id = 0; table_id < config->table_descriptor_size();
++table_id) {
auto* table_config = config->mutable_table_descriptor(table_id);
table_config->set_num_features(tpu_feature_counts[table_id]);
}
}
}
absl::Status PopulateMissingFieldsInTPUEmbeddingConfig(
tpu::TPUEmbeddingConfiguration* config) {
if (config->feature_descriptor_size() == 0) {
TF_RETURN_IF_ERROR(ValidateBatchSizeAndFeatureCounts(*config));
PopulateFeatureDescriptors(config);
} else {
TF_RETURN_IF_ERROR(ValidateBatchSizeAndFeatureCountsAreEmpty(*config));
TF_RETURN_IF_ERROR(ValidateFeatureDescriptors(*config));
PopulateBatchSizeAndFeatureCounts(config);
}
return absl::OkStatus();
}
} | #include "tensorflow/core/tpu/tpu_embedding_configuration_proto_rewrite.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/protobuf/tpu/tpu_embedding_configuration.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/test.h"
namespace tensorflow {
namespace {
Status ParseTextProto(absl::string_view text_proto,
tpu::TPUEmbeddingConfiguration* parsed_proto) {
tsl::protobuf::TextFormat::Parser parser;
tsl::protobuf::io::ArrayInputStream input_stream(text_proto.data(),
text_proto.size());
if (parser.Parse(&input_stream, parsed_proto)) {
return absl::OkStatus();
}
parsed_proto->Clear();
return errors::InvalidArgument("Could not parse text proto: ", text_proto);
}
TEST(TPUEmbeddingConfigurationProtoRewriteTest, FillFeatureDescriptor) {
const std::string config_str = R"pb(
table_descriptor {
name: "T0"
vocabulary_size: 35324928
dimension: 128
num_features: 3
optimization_parameters {
adagrad {}
learning_rate { constant: 0.1 }
}
}
table_descriptor {
name: "T1"
vocabulary_size: 3122176
dimension: 128
num_features: 2
optimization_parameters {
adagrad {}
learning_rate { constant: 0.1 }
}
}
mode: TRAINING
batch_size_per_tensor_core: 256
num_hosts: 16
num_tensor_cores: 128
pipeline_execution_with_tensor_core: true
)pb";
tpu::TPUEmbeddingConfiguration tpu_embedding_config;
TF_ASSERT_OK(ParseTextProto(config_str, &tpu_embedding_config));
TF_ASSERT_OK(
PopulateMissingFieldsInTPUEmbeddingConfig(&tpu_embedding_config));
EXPECT_EQ(tpu_embedding_config.feature_descriptor_size(), 2);
const auto& feature_0 = tpu_embedding_config.feature_descriptor(0);
EXPECT_EQ(feature_0.table_id(), 0);
EXPECT_THAT(feature_0.input_shape(), ::testing::ElementsAre(256 * 3));
const auto& feature_1 = tpu_embedding_config.feature_descriptor(1);
EXPECT_EQ(feature_1.table_id(), 1);
EXPECT_THAT(feature_1.input_shape(), ::testing::ElementsAre(256 * 2));
}
TEST(TPUEmbeddingConfigurationProtoRewriteTest, FillBatchSizeAndNumFeatures) {
const std::string config_str = R"pb(
table_descriptor {
name: "T0"
vocabulary_size: 35324928
dimension: 128
optimization_parameters {
adagrad {}
learning_rate { constant: 0.1 }
}
}
table_descriptor {
name: "T1"
vocabulary_size: 3122176
dimension: 128
optimization_parameters {
adagrad {}
learning_rate { constant: 0.1 }
}
}
feature_descriptor {
name: "F0"
table_id: 0
input_shape: [ 100, 5 ]
}
feature_descriptor {
name: "F1"
table_id: 1
input_shape: [ 200, 5, 20 ]
}
feature_descriptor {
name: "F2"
table_id: 0
input_shape: [ 50 ]
}
feature_descriptor {
name: "F3"
table_id: 0
input_shape: [ 100, 2, 3 ]
}
mode: TRAINING
num_hosts: 16
num_tensor_cores: 128
pipeline_execution_with_tensor_core: true
)pb";
tpu::TPUEmbeddingConfiguration tpu_embedding_config;
TF_ASSERT_OK(ParseTextProto(config_str, &tpu_embedding_config));
TF_ASSERT_OK(
PopulateMissingFieldsInTPUEmbeddingConfig(&tpu_embedding_config));
EXPECT_EQ(tpu_embedding_config.batch_size_per_tensor_core(), 50);
const auto& table_0 = tpu_embedding_config.table_descriptor(0);
EXPECT_EQ(table_0.num_features(), 23);
const auto& table_1 = tpu_embedding_config.table_descriptor(1);
EXPECT_EQ(table_1.num_features(), 400);
}
TEST(TPUEmbeddingConfigurationProtoRewriteTest, InvalidBatchSizeOrNumFeatures) {
const std::string config_str = R"pb(
table_descriptor {
name: "T0"
vocabulary_size: 35324928
dimension: 128
num_features: 3
optimization_parameters {
adagrad {}
learning_rate { constant: 0.1 }
}
}
feature_descriptor {
table_id: 0
input_shape: [ 768 ]
}
mode: TRAINING
batch_size_per_tensor_core: 256
num_hosts: 16
num_tensor_cores: 128
pipeline_execution_with_tensor_core: true
)pb";
tpu::TPUEmbeddingConfiguration tpu_embedding_config;
TF_ASSERT_OK(ParseTextProto(config_str, &tpu_embedding_config));
{
tpu::TPUEmbeddingConfiguration invalid_config = tpu_embedding_config;
invalid_config.clear_feature_descriptor();
invalid_config.clear_batch_size_per_tensor_core();
EXPECT_THAT(
PopulateMissingFieldsInTPUEmbeddingConfig(&invalid_config),
tensorflow::testing::StatusIs(
absl::StatusCode::kInvalidArgument,
::testing::HasSubstr("Invalid batch_size_per_tensor_core")));
}
{
tpu::TPUEmbeddingConfiguration invalid_config = tpu_embedding_config;
invalid_config.clear_feature_descriptor();
invalid_config.mutable_table_descriptor(0)->clear_num_features();
EXPECT_THAT(PopulateMissingFieldsInTPUEmbeddingConfig(&invalid_config),
tensorflow::testing::StatusIs(
absl::StatusCode::kInvalidArgument,
::testing::HasSubstr("Invalid num_features")));
}
{
tpu::TPUEmbeddingConfiguration invalid_config = tpu_embedding_config;
EXPECT_THAT(
PopulateMissingFieldsInTPUEmbeddingConfig(&invalid_config),
tensorflow::testing::StatusIs(
absl::StatusCode::kInvalidArgument,
::testing::HasSubstr(
"The batch_size_per_tensor_core field must NOT be populated")));
}
{
tpu::TPUEmbeddingConfiguration invalid_config = tpu_embedding_config;
invalid_config.clear_batch_size_per_tensor_core();
EXPECT_THAT(PopulateMissingFieldsInTPUEmbeddingConfig(&invalid_config),
tensorflow::testing::StatusIs(
absl::StatusCode::kInvalidArgument,
::testing::HasSubstr("The TableDescriptor.num_features "
"field must NOT be populated")));
}
}
TEST(TPUEmbeddingConfigurationProtoRewriteTest, InvalidFeatureDescriptor) {
const std::string config_str = R"pb(
table_descriptor {
name: "T0"
vocabulary_size: 35324928
dimension: 128
optimization_parameters {
adagrad {}
learning_rate { constant: 0.1 }
}
}
table_descriptor {
name: "T1"
vocabulary_size: 3122176
dimension: 128
optimization_parameters {
adagrad {}
learning_rate { constant: 0.1 }
}
}
feature_descriptor {
name: "F1"
table_id: 0
input_shape: [ 768 ]
}
feature_descriptor {
name: "F2"
table_id: 1
input_shape: [ 512 ]
}
mode: TRAINING
num_hosts: 16
num_tensor_cores: 128
pipeline_execution_with_tensor_core: true
)pb";
tpu::TPUEmbeddingConfiguration tpu_embedding_config;
TF_ASSERT_OK(ParseTextProto(config_str, &tpu_embedding_config));
{
tpu::TPUEmbeddingConfiguration invalid_config = tpu_embedding_config;
invalid_config.mutable_feature_descriptor(0)->set_table_id(2);
EXPECT_THAT(PopulateMissingFieldsInTPUEmbeddingConfig(&invalid_config),
tensorflow::testing::StatusIs(
absl::StatusCode::kInvalidArgument,
::testing::HasSubstr("Invalid table_id")));
}
{
tpu::TPUEmbeddingConfiguration invalid_config = tpu_embedding_config;
invalid_config.mutable_feature_descriptor(0)->clear_input_shape();
EXPECT_THAT(
PopulateMissingFieldsInTPUEmbeddingConfig(&invalid_config),
tensorflow::testing::StatusIs(
absl::StatusCode::kInvalidArgument,
::testing::HasSubstr("The input_shape field cannot be empty")));
}
{
tpu::TPUEmbeddingConfiguration invalid_config = tpu_embedding_config;
invalid_config.mutable_feature_descriptor(0)->set_input_shape(0, -5);
EXPECT_THAT(
PopulateMissingFieldsInTPUEmbeddingConfig(&invalid_config),
tensorflow::testing::StatusIs(
absl::StatusCode::kInvalidArgument,
::testing::HasSubstr("The input_shape dimension sizes must all")));
}
{
tpu::TPUEmbeddingConfiguration invalid_config = tpu_embedding_config;
invalid_config.mutable_feature_descriptor(1)->set_table_id(0);
EXPECT_THAT(PopulateMissingFieldsInTPUEmbeddingConfig(&invalid_config),
tensorflow::testing::StatusIs(
absl::StatusCode::kInvalidArgument,
::testing::HasSubstr(
"No feature_descriptor fields found for table: T1")));
}
}
}
} |
1,482 | cpp | tensorflow/tensorflow | sparse_core_layout | tensorflow/core/tpu/kernels/sparse_core_layout.cc | tensorflow/core/tpu/kernels/sparse_core_layout_test.cc | #ifndef TENSORFLOW_CORE_TPU_KERNELS_SPARSE_CORE_LAYOUT_H_
#define TENSORFLOW_CORE_TPU_KERNELS_SPARSE_CORE_LAYOUT_H_
#include <cstdint>
#include <limits>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/btree_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/tpu/kernels/sparse_core_layout.pb.h"
namespace tensorflow::tpu {
class SparseCoreLayoutStacker {
public:
explicit SparseCoreLayoutStacker(int num_partitions,
bool disable_table_stacking = false,
int sparse_cores_per_partition = 4);
void SetActivationMemoryBytesLimit(int64_t activation_mem_bytes_limit) {
CHECK(stacks_by_group_.empty()) << "must call before AddTable";
activation_mem_bytes_limit_ = activation_mem_bytes_limit;
}
void SetVariableShardBytesLimit(int64_t variable_shard_bytes_limit) {
CHECK(stacks_by_group_.empty()) << "must call before AddTable";
variable_shard_bytes_limit_ = variable_shard_bytes_limit;
}
void SetStackingEnabled(bool stacking_enabled) {
CHECK(stacks_by_group_.empty()) << "must call before AddTable";
stacking_enabled_ = stacking_enabled;
}
void SetStackingRowLimit(int64_t row_limit) {
CHECK(stacks_by_group_.empty()) << "must call before AddTable";
row_limit_ = row_limit;
}
void SetStackingTableLimit(int table_limit) {
CHECK(stacks_by_group_.empty()) << "must call before AddTable";
table_limit_ = table_limit;
}
absl::Status AddTable(tsl::StringPiece table_name, int64_t table_height,
int64_t table_width, tsl::StringPiece group,
int64_t output_samples);
absl::StatusOr<SparseCoreTableLayouts> GetLayouts();
private:
struct TableStack {
std::string temporary_name;
int64_t padded_width = 0;
int64_t unsharded_height = 0;
int64_t total_activation_mem_bytes = 0;
int64_t total_variable_shard_bytes = 0;
std::vector<SparseCoreTableLayout> incomplete_tables;
};
const int num_partitions_;
const int sparse_cores_per_partition_;
const int num_sparse_cores_;
bool stacking_enabled_ = true;
int64_t activation_mem_bytes_limit_ = 0;
int64_t variable_shard_bytes_limit_ = 0;
int64_t row_limit_ = (1LL << 31) - 1;
int table_limit_ = std::numeric_limits<int>::max();
absl::btree_map<std::pair<int64_t, std::string>, std::vector<TableStack>>
stacks_by_group_;
};
}
#endif
#include "tensorflow/core/tpu/kernels/sparse_core_layout.h"
#include <cstdint>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/substitute.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/core/tpu/kernels/sparse_core_layout.pb.h"
#include "tsl/platform/stringpiece.h"
namespace tensorflow {
ABSL_ATTRIBUTE_WEAK bool GetDisableTableStacking(bool disable_table_stacking) {
bool should_disable_stacking = false;
XlaSparseCoreFlags *sparse_core_flags = GetXlaSparseCoreFlags();
should_disable_stacking =
sparse_core_flags->tf_xla_sparse_core_disable_table_stacking;
return should_disable_stacking || disable_table_stacking;
}
ABSL_ATTRIBUTE_WEAK int64_t GetXlaSparseCoreStackingMemLimit() {
XlaSparseCoreFlags *sparse_core_flags = GetXlaSparseCoreFlags();
return sparse_core_flags->tf_xla_sparse_core_stacking_mem_limit_bytes;
}
ABSL_ATTRIBUTE_WEAK int64_t GetXlaSparseCoreStackingTableShardLimit() {
XlaSparseCoreFlags *sparse_core_flags = GetXlaSparseCoreFlags();
return sparse_core_flags->tf_xla_sparse_core_stacking_table_shard_limit_bytes;
}
namespace tpu {
static int64_t NextLargestMultiple(int64_t n, int64_t factor) {
int64_t extra = n % factor;
if (extra == 0) return n;
return n + factor - extra;
}
SparseCoreLayoutStacker::SparseCoreLayoutStacker(int num_partitions,
bool disable_table_stacking,
int sparse_cores_per_partition)
: num_partitions_(num_partitions),
sparse_cores_per_partition_(sparse_cores_per_partition),
num_sparse_cores_(num_partitions_ * sparse_cores_per_partition_),
stacking_enabled_(!GetDisableTableStacking(disable_table_stacking)),
activation_mem_bytes_limit_(GetXlaSparseCoreStackingMemLimit()),
variable_shard_bytes_limit_(GetXlaSparseCoreStackingTableShardLimit()) {}
absl::Status SparseCoreLayoutStacker::AddTable(tsl::StringPiece table_name,
int64_t table_height,
int64_t table_width,
tsl::StringPiece group,
int64_t output_samples) {
if (stacks_by_group_.empty()) {
VLOG(1) << "Stacking parameters: stacking_enabled_ = " << stacking_enabled_
<< ", activation_mem_bytes_limit_ = " << activation_mem_bytes_limit_
<< ", variable_shard_bytes_limit_ = " << variable_shard_bytes_limit_
<< ", row_limit_ = " << row_limit_
<< ", table_limit_ = " << table_limit_;
}
VLOG(2) << "Table " << table_name << ":";
int64_t samples_per_sparse_core =
output_samples / sparse_cores_per_partition_;
int64_t padded_width = NextLargestMultiple(table_width, 8);
int64_t padded_height =
NextLargestMultiple(table_height, num_sparse_cores_ * 8);
VLOG(2) << " Original size: " << table_height << "x" << table_width
<< " padded size: " << padded_height << "x" << padded_width;
int64_t activation_mem_bytes =
sizeof(float) * padded_width * samples_per_sparse_core;
int64_t variable_shard_bytes =
sizeof(float) * padded_width * padded_height / num_partitions_;
VLOG(2) << " activation mem = " << activation_mem_bytes
<< ", variable shard bytes = " << variable_shard_bytes;
std::vector<TableStack> &candidate_stacks =
stacks_by_group_[std::make_pair(padded_width, std::string(group))];
TableStack *stack = nullptr;
if (stacking_enabled_) {
for (TableStack &ts : candidate_stacks) {
if (ts.incomplete_tables.size() >= table_limit_) continue;
if (activation_mem_bytes_limit_ != 0 &&
ts.total_activation_mem_bytes + activation_mem_bytes >=
activation_mem_bytes_limit_) {
continue;
}
if (variable_shard_bytes_limit_ != 0 &&
ts.total_variable_shard_bytes + variable_shard_bytes >=
variable_shard_bytes_limit_) {
continue;
}
if (row_limit_ != 0 &&
ts.unsharded_height + padded_height >= row_limit_) {
continue;
}
stack = &ts;
break;
}
}
if (stack == nullptr) {
candidate_stacks.emplace_back();
stack = &candidate_stacks.back();
stack->padded_width = padded_width;
stack->temporary_name = absl::Substitute("w$0_i$1_$2", padded_width,
candidate_stacks.size(), group);
}
stack->incomplete_tables.emplace_back();
SparseCoreTableLayout &layout = stack->incomplete_tables.back();
layout.set_table_name(std::string(table_name));
layout.set_num_sparse_cores(num_sparse_cores_);
layout.set_num_partitions(num_partitions_);
layout.add_unsharded_shape(table_height);
layout.add_unsharded_shape(table_width);
layout.add_unsharded_padded_shape(padded_height);
layout.add_unsharded_padded_shape(padded_width);
layout.set_sparse_core_shard_row_offset(stack->unsharded_height /
num_sparse_cores_);
layout.set_sparse_core_shard_rotation(((stack->incomplete_tables.size() - 1) *
num_sparse_cores_ / num_partitions_) %
num_sparse_cores_);
stack->unsharded_height += padded_height;
stack->total_variable_shard_bytes += variable_shard_bytes;
stack->total_activation_mem_bytes += activation_mem_bytes;
return absl::OkStatus();
}
absl::StatusOr<SparseCoreTableLayouts> SparseCoreLayoutStacker::GetLayouts() {
SparseCoreTableLayouts layouts;
for (const auto &[key, stacks] : stacks_by_group_) {
VLOG(1) << "Stack group: padded width " << key.first
<< ", name = " << key.second;
for (const TableStack &stack : stacks) {
VLOG(1) << " Stack " << stack.temporary_name
<< ": unsharded_height = " << stack.unsharded_height
<< ", total_activation_mem_bytes = "
<< stack.total_activation_mem_bytes
<< ", total_variable_shard_bytes = "
<< stack.total_variable_shard_bytes;
std::string stacked_table_name;
for (const SparseCoreTableLayout &incomplete_layout :
stack.incomplete_tables) {
if (!stacked_table_name.empty()) stacked_table_name += "_";
absl::StrAppend(&stacked_table_name, incomplete_layout.table_name());
}
for (const SparseCoreTableLayout &incomplete_layout :
stack.incomplete_tables) {
SparseCoreTableLayout *out_layout = layouts.add_tables();
*out_layout = incomplete_layout;
out_layout->set_stacked_table_name(stacked_table_name);
VLOG(1) << " Contains " << out_layout->table_name();
out_layout->set_total_rows_per_sparse_core_shard(
stack.unsharded_height / num_sparse_cores_);
}
}
}
return layouts;
}
}
} | #include "tensorflow/core/tpu/kernels/sparse_core_layout.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/tpu/kernels/sparse_core_layout.pb.h"
namespace tensorflow {
namespace tpu {
namespace {
using ::testing::EqualsProto;
using ::testing::proto::Partially;
using ::testing::status::IsOkAndHolds;
TEST(SparseCoreLayoutStacker, StacksTwoTablesAndPads) {
SparseCoreLayoutStacker stacker(2);
ASSERT_OK(stacker.AddTable("table1", 100, 6, "stack1", 10));
ASSERT_OK(stacker.AddTable("table2", 50, 5, "stack1", 10));
EXPECT_THAT(stacker.GetLayouts(), IsOkAndHolds(EqualsProto(R"pb(
tables {
table_name: 'table1'
stacked_table_name: 'table1_table2'
num_sparse_cores: 8
num_partitions: 2
total_rows_per_sparse_core_shard: 24 # = (128 + 64) / 8
unsharded_shape: [ 100, 6 ]
unsharded_padded_shape: [ 128, 8 ]
sparse_core_shard_row_offset: 0
sparse_core_shard_rotation: 0
}
tables {
table_name: 'table2'
stacked_table_name: 'table1_table2'
num_sparse_cores: 8
num_partitions: 2
total_rows_per_sparse_core_shard: 24
unsharded_shape: [ 50, 5 ]
unsharded_padded_shape: [ 64, 8 ]
sparse_core_shard_row_offset: 16 # = 128/8
sparse_core_shard_rotation: 4
}
)pb")));
}
TEST(SparseCoreLayoutStacker, RespectsDisableStacking) {
SparseCoreLayoutStacker stacker(2);
stacker.SetStackingEnabled(false);
ASSERT_OK(stacker.AddTable("table1", 100, 6, "stack1", 10));
ASSERT_OK(stacker.AddTable("table2", 50, 5, "stack1", 10));
EXPECT_THAT(stacker.GetLayouts(), IsOkAndHolds(EqualsProto(R"pb(
tables {
table_name: 'table1'
stacked_table_name: 'table1'
num_sparse_cores: 8
num_partitions: 2
total_rows_per_sparse_core_shard: 16 # = 128 / 8
unsharded_shape: [ 100, 6 ]
unsharded_padded_shape: [ 128, 8 ]
sparse_core_shard_row_offset: 0
sparse_core_shard_rotation: 0
}
tables {
table_name: 'table2'
stacked_table_name: 'table2'
num_sparse_cores: 8
num_partitions: 2
total_rows_per_sparse_core_shard: 8 # = 64/8
unsharded_shape: [ 50, 5 ]
unsharded_padded_shape: [ 64, 8 ]
sparse_core_shard_row_offset: 0
sparse_core_shard_rotation: 0
}
)pb")));
}
TEST(SparseCoreLayoutStacker, RespectsActivationMemLimit) {
SparseCoreLayoutStacker stacker(2);
stacker.SetActivationMemoryBytesLimit(16384 + 1);
ASSERT_OK(stacker.AddTable("table1", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table2", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table3", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table4", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table5", 128, 8, "stack1", 1024));
EXPECT_THAT(
stacker.GetLayouts(), IsOkAndHolds(Partially(EqualsProto(R"pb(
tables { table_name: 'table1' stacked_table_name: 'table1_table2' }
tables { table_name: 'table2' stacked_table_name: 'table1_table2' }
tables { table_name: 'table3' stacked_table_name: 'table3_table4' }
tables { table_name: 'table4' stacked_table_name: 'table3_table4' }
tables { table_name: 'table5' stacked_table_name: 'table5' }
)pb"))));
}
TEST(SparseCoreLayoutStacker, RespectsVariableShardLimit) {
SparseCoreLayoutStacker stacker(2);
stacker.SetVariableShardBytesLimit(4096 + 1);
ASSERT_OK(stacker.AddTable("table1", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table2", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table3", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table4", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table5", 128, 8, "stack1", 1024));
EXPECT_THAT(
stacker.GetLayouts(), IsOkAndHolds(Partially(EqualsProto(R"pb(
tables { table_name: 'table1' stacked_table_name: 'table1_table2' }
tables { table_name: 'table2' stacked_table_name: 'table1_table2' }
tables { table_name: 'table3' stacked_table_name: 'table3_table4' }
tables { table_name: 'table4' stacked_table_name: 'table3_table4' }
tables { table_name: 'table5' stacked_table_name: 'table5' }
)pb"))));
}
TEST(SparseCoreLayoutStacker, RespectsRowLimit) {
SparseCoreLayoutStacker stacker(2);
stacker.SetActivationMemoryBytesLimit(0);
stacker.SetVariableShardBytesLimit(0);
ASSERT_OK(stacker.AddTable("table1", 1 << 29, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table2", 1 << 29, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table3", 1 << 29, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table4", 1 << 29, 8, "stack1", 1024));
EXPECT_THAT(stacker.GetLayouts(), IsOkAndHolds(Partially(EqualsProto(R"pb(
tables {
table_name: 'table1'
stacked_table_name: 'table1_table2_table3'
}
tables {
table_name: 'table2'
stacked_table_name: 'table1_table2_table3'
}
tables {
table_name: 'table3'
stacked_table_name: 'table1_table2_table3'
}
tables { table_name: 'table4' stacked_table_name: 'table4' }
)pb"))));
}
TEST(SparseCoreLayoutStacker, RespectsTableLimit) {
SparseCoreLayoutStacker stacker(2);
stacker.SetActivationMemoryBytesLimit(0);
stacker.SetVariableShardBytesLimit(0);
stacker.SetStackingTableLimit(2);
ASSERT_OK(stacker.AddTable("table1", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table2", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table3", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table4", 128, 8, "stack1", 1024));
EXPECT_THAT(
stacker.GetLayouts(), IsOkAndHolds(Partially(EqualsProto(R"pb(
tables { table_name: 'table1' stacked_table_name: 'table1_table2' }
tables { table_name: 'table2' stacked_table_name: 'table1_table2' }
tables { table_name: 'table3' stacked_table_name: 'table3_table4' }
tables { table_name: 'table4' stacked_table_name: 'table3_table4' }
)pb"))));
}
}
}
} |
1,483 | cpp | tensorflow/tensorflow | sparse_core_ops_utils | tensorflow/core/tpu/kernels/sparse_core_ops_utils.cc | tensorflow/core/tpu/kernels/sparse_core_ops_utils_test.cc | #ifndef TENSORFLOW_CORE_TPU_KERNELS_SPARSE_CORE_OPS_UTILS_H_
#define TENSORFLOW_CORE_TPU_KERNELS_SPARSE_CORE_OPS_UTILS_H_
#include <cstdint>
#include <functional>
#include <limits>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
const int32_t kXlaPadValue = std::numeric_limits<int32_t>::max();
std::vector<int> ConvertBinarySplitsToBucketSplits(int64 split,
int max_division_level);
int64 ConvertBucketSplitsToBinarySplits(std::vector<int> bucket_splits,
int max_division_level);
Status ValidateInputCombiner(const std::string& combiner);
std::function<float(float)> GetCombinerScaleContributionFunction(
absl::string_view combiner);
std::function<float(float)> GetCombinerScaleTransformFunction(
absl::string_view combiner);
std::vector<std::vector<std::string>> GetTableStacks(
const std::vector<int64_t>& table_height,
const std::vector<int64_t>& table_width,
const std::vector<int64_t>& table_num_samples,
const std::vector<int64_t>& table_group,
const std::vector<std::string>& table_names, int64_t num_tpu_chips);
int GetMinibatchMaxDivisionLevel();
bool GetDisableTableStacking();
int64_t GetXlaSparseCoreStackingMemLimit();
int64_t GetXlaSparseCoreStackingTableShardLimit();
Status GetMaxIdsAndUniquesExternal(const std::string& program_key,
const std::string& table_name,
int64_t num_samples_per_sparse_core,
int64_t feature_width,
int64_t* max_ids_per_partition,
int64_t* max_unique_ids_per_partition);
}
#endif
#include "tensorflow/core/tpu/kernels/sparse_core_ops_utils.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <functional>
#include <string>
#include <tuple>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/container/flat_hash_map.h"
#include "absl/numeric/bits.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/jit/flags.h"
#include "xla/stream_executor/tpu/status_helper.h"
#include "xla/stream_executor/tpu/tpu_api.h"
#include "xla/stream_executor/tpu/tpu_ops_c_api.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
std::vector<int> ConvertBinarySplitsToBucketSplits(int64 split,
int max_division_level) {
std::vector<int> bucket_splits;
uint32 current_index = 0;
while (split > 0) {
if (split % 2 == 1) {
int split_level = absl::bit_width(current_index + 1) - 1;
int split_offset = current_index - (1 << split_level) + 1;
int split_size = 1 << (max_division_level - 1 - split_level);
bucket_splits.push_back(split_size + split_offset * split_size * 2);
}
split >>= 1;
current_index += 1;
}
absl::c_sort(bucket_splits);
return bucket_splits;
}
int64 ConvertBucketSplitsToBinarySplits(std::vector<int> bucket_splits,
int max_division_level) {
int64 binary_splits = 0;
for (auto& bucket_split : bucket_splits) {
int split_level = max_division_level - 1;
while (bucket_split > 0 && bucket_split % 2 == 0) {
--split_level;
bucket_split = bucket_split >> 1;
}
binary_splits |= (1LL << ((1 << split_level) - 1 + bucket_split / 2));
}
return binary_splits;
}
Status ValidateInputCombiner(const std::string& combiner) {
if (combiner != "sum" && combiner != "mean" && combiner != "sqrtn") {
return absl::InvalidArgumentError(
"Invalid combiner: only \"sum\", \"mean\", and "
"\"sqrtn\" are supported.");
}
return absl::OkStatus();
}
std::function<float(float)> GetCombinerScaleContributionFunction(
absl::string_view combiner) {
if (combiner == "sum") {
return [](float x) -> float { return 1.f; };
} else if (combiner == "mean") {
return [](float x) -> float { return x; };
} else {
return [](float x) -> float { return x * x; };
}
}
std::function<float(float)> GetCombinerScaleTransformFunction(
absl::string_view combiner) {
if (combiner == "sum") {
return [](float x) -> float { return 1; };
} else if (combiner == "mean") {
return [](float x) -> float { return x == 0.0f ? 0.0f : 1.0 / x; };
} else {
return
[](float x) -> float { return x == 0.0f ? 0.0f : 1.0 / std::sqrt(x); };
}
}
Status GetMaxIdsAndUniquesExternal(const std::string& program_key,
const std::string& table_name,
int64_t num_samples_per_sparse_core,
int64_t feature_width,
int64_t* max_ids_per_partition,
int64_t* max_unique_ids_per_partition) {
SparseCore_GetMaxIdsAndUniques_Params params;
params.program_key = program_key.c_str();
params.table_name = table_name.c_str();
params.num_samples_per_sparse_core = num_samples_per_sparse_core;
params.feature_width = feature_width;
StatusHelper status;
params.status = status.c_status;
stream_executor::tpu::OpsApiFn()->SparseCore_GetMaxIdsAndUniquesFn(¶ms);
*max_ids_per_partition = params.max_ids_per_partition;
*max_unique_ids_per_partition = params.max_unique_ids_per_partition;
return status.status();
}
std::vector<std::vector<std::string>> GetTableStacks(
const std::vector<int64_t>& table_height,
const std::vector<int64_t>& table_width,
const std::vector<int64_t>& table_num_samples,
const std::vector<int64_t>& table_group,
const std::vector<std::string>& table_names, int64_t num_tpu_chips) {
if (GetDisableTableStacking()) {
std::vector<std::vector<std::string>> stacks(table_names.size());
for (int i = 0; i < table_names.size(); ++i) stacks[i] = {table_names[i]};
return stacks;
}
std::vector<std::tuple<int64_t, int64_t, int64_t, int64_t, std::string>>
table_data(table_height.size());
for (int i = 0; i < table_height.size(); ++i)
table_data[i] =
std::make_tuple(table_height[i], table_width[i], table_num_samples[i],
table_group[i], table_names[i]);
std::sort(table_data.begin(), table_data.end(), [](auto& lh, auto& rh) {
return std::get<4>(lh) < std::get<4>(rh);
});
absl::flat_hash_map<int64_t, std::vector<std::vector<std::string>>>
stacks_by_group;
absl::flat_hash_map<int64_t, std::vector<int64_t>> stacks_height_by_group;
absl::flat_hash_map<int64_t, std::vector<int64_t>> stacks_width_by_group;
absl::flat_hash_map<int64_t, std::vector<int64_t>> stacks_samples_by_group;
const int64_t mem_limit = GetXlaSparseCoreStackingMemLimit();
const int64_t table_shard_limit = GetXlaSparseCoreStackingTableShardLimit();
for (const auto& table : table_data) {
int64_t height;
int64_t width;
int64_t num_samples;
int64_t group;
std::string name;
std::tie(height, width, num_samples, group, name) = table;
num_samples /= 4;
int64_t stack_id = 0;
for (; stack_id < stacks_by_group[group].size(); ++stack_id)
if (((mem_limit == 0) ||
(sizeof(float) * width *
(num_samples + stacks_samples_by_group[group][stack_id]) <
mem_limit)) &&
((table_shard_limit == 0) ||
(sizeof(float) * (height + stacks_height_by_group[group][stack_id]) *
width / num_tpu_chips <
table_shard_limit)))
break;
if (stack_id == stacks_by_group[group].size()) {
stacks_by_group[group].resize(stacks_by_group[group].size() + 1);
stacks_height_by_group[group].push_back(0);
stacks_width_by_group[group].push_back(width);
stacks_samples_by_group[group].push_back(0);
}
stacks_by_group[group][stack_id].push_back(name);
stacks_height_by_group[group][stack_id] += height;
stacks_samples_by_group[group][stack_id] += num_samples;
}
std::vector<std::vector<std::string>> table_stacks;
for (const auto& [group, stacks] : stacks_by_group)
table_stacks.insert(table_stacks.end(), stacks.begin(), stacks.end());
return table_stacks;
}
ABSL_ATTRIBUTE_WEAK int GetMinibatchMaxDivisionLevel() {
XlaSparseCoreFlags* sparse_core_flags = GetXlaSparseCoreFlags();
return sparse_core_flags->tf_xla_sparse_core_minibatch_max_division_level;
}
ABSL_ATTRIBUTE_WEAK bool GetDisableTableStacking() {
XlaSparseCoreFlags* sparse_core_flags = GetXlaSparseCoreFlags();
return sparse_core_flags->tf_xla_sparse_core_disable_table_stacking;
}
ABSL_ATTRIBUTE_WEAK int64_t GetXlaSparseCoreStackingMemLimit() {
XlaSparseCoreFlags* sparse_core_flags = GetXlaSparseCoreFlags();
return sparse_core_flags->tf_xla_sparse_core_stacking_mem_limit_bytes;
}
ABSL_ATTRIBUTE_WEAK int64_t GetXlaSparseCoreStackingTableShardLimit() {
XlaSparseCoreFlags* sparse_core_flags = GetXlaSparseCoreFlags();
return sparse_core_flags->tf_xla_sparse_core_stacking_table_shard_limit_bytes;
}
} | #include "tensorflow/core/tpu/kernels/sparse_core_ops_utils.h"
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
TEST(ConvertSplitsAndBackTest, Split0) {
const int max_division_level = 6;
int64 original_split = 0;
std::vector<int> actual_buckets =
ConvertBinarySplitsToBucketSplits(original_split, max_division_level);
std::vector<int> expected_buckets = {};
int64 re_split =
ConvertBucketSplitsToBinarySplits(expected_buckets, max_division_level);
ASSERT_EQ(re_split, original_split);
}
TEST(ConvertSplitsAndBackTest, Split2) {
const int max_division_level = 6;
int64 original_split = 2;
std::vector<int> actual_buckets =
ConvertBinarySplitsToBucketSplits(original_split, max_division_level);
std::vector<int> expected_buckets = {16};
int64 re_split =
ConvertBucketSplitsToBinarySplits(expected_buckets, max_division_level);
ASSERT_EQ(re_split, original_split);
}
TEST(ConvertSplitsAndBackTest, Split3) {
const int max_division_level = 6;
int64 original_split = 3;
std::vector<int> actual_buckets =
ConvertBinarySplitsToBucketSplits(original_split, max_division_level);
std::vector<int> expected_buckets = {16, 32};
int64 re_split =
ConvertBucketSplitsToBinarySplits(expected_buckets, max_division_level);
ASSERT_EQ(re_split, original_split);
}
}
} |
1,484 | cpp | tensorflow/tensorflow | tensor_cord | tensorflow/core/kernels/tensor_cord.cc | tensorflow/core/kernels/tensor_cord_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_TENSOR_CORD_H_
#define TENSORFLOW_CORE_KERNELS_TENSOR_CORD_H_
#include <array>
#include <numeric>
#include "absl/container/inlined_vector.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "tensorflow/core/framework/variant_tensor_data.h"
namespace tensorflow {
typedef void (*CordRepReleaser)(void*);
class TensorCord {
public:
static constexpr const char kTypeName[] = "tensorflow::TensorCord";
TensorCord() : chunks_() {}
~TensorCord();
TensorCord(absl::string_view view, CordRepReleaser releaser,
void* memory = nullptr)
: chunks_({new CordRep(view, releaser, memory)}) {}
TensorCord(absl::string_view view, Tensor* tensor)
: chunks_({NewCordRepFromTensor(view, tensor)}) {}
TensorCord(absl::string_view view, std::nullptr_t, void* memory) = delete;
TensorCord(absl::string_view view, std::nullptr_t) = delete;
TensorCord(const TensorCord& other);
TensorCord(TensorCord&& other) noexcept;
TensorCord& operator=(const TensorCord& other);
TensorCord& operator=(TensorCord&& other) noexcept;
void Append(const TensorCord& other);
void Append(absl::string_view view, CordRepReleaser releaser,
void* memory = nullptr);
void Append(absl::string_view view, Tensor* tensor);
void Append(absl::string_view view, std::nullptr_t, void* memory) = delete;
void Append(absl::string_view view, std::nullptr_t) = delete;
size_t size() const;
bool empty() const { return size() == 0; }
explicit operator string() const;
class ChunkIterator {
public:
using iterator_category = std::input_iterator_tag;
using value_type = absl::string_view;
using difference_type = ptrdiff_t;
using pointer = const value_type*;
using reference = value_type;
ChunkIterator& operator++();
ChunkIterator operator++(int) {
ChunkIterator tmp(*this);
operator++();
return tmp;
}
bool operator==(const ChunkIterator& other) const {
return (cord_ == other.cord_ && chunk_index_ == other.chunk_index_);
}
bool operator!=(const ChunkIterator& other) const {
return !(*this == other);
}
reference operator*() const {
assert(cord_ != nullptr);
return view_;
}
pointer operator->() const {
assert(cord_ != nullptr);
return &view_;
}
friend class TensorCord;
private:
explicit ChunkIterator(const TensorCord* cord, int chunk_index);
const TensorCord* const cord_;
int chunk_index_;
absl::string_view view_;
};
class ChunkRange {
public:
explicit ChunkRange(const TensorCord* cord) : cord_(cord) {}
ChunkIterator begin() const { return ChunkIterator(cord_, 0); }
ChunkIterator end() const {
return ChunkIterator(cord_, cord_->chunks_.size());
}
private:
const TensorCord* cord_;
};
ChunkRange Chunks() const { return ChunkRange(this); }
ChunkIterator chunk_begin() const { return ChunkIterator(this, 0); }
ChunkIterator chunk_end() const {
return ChunkIterator(this, chunks_.size());
}
static string TypeName() { return kTypeName; }
string DebugString() const {
return absl::StrCat("<TensorCord size=", size(), ">");
}
void Encode(VariantTensorData* data) const;
bool Decode(VariantTensorData data);
private:
void Cleanup();
class CordRep : public core::RefCounted {
public:
CordRep(absl::string_view view, CordRepReleaser releaser,
void* arg = nullptr)
: is_inline_(false), rep_(view, releaser, arg) {}
explicit CordRep(absl::string_view view) : is_inline_(true), rep_(view) {}
~CordRep() override;
absl::string_view view() const {
if (is_inline_) {
return absl::string_view(
rep_.internal.data() + 1,
*reinterpret_cast<const uint8*>(rep_.internal.data()));
} else {
return rep_.external.view;
}
}
private:
friend class TensorCord;
struct ExternalRep {
absl::string_view view;
CordRepReleaser releaser;
void* arg;
ExternalRep(absl::string_view view_, CordRepReleaser releaser_,
void* arg_)
: view(view_), releaser(releaser_), arg(arg_) {}
};
static constexpr int kMaxInlineSize = sizeof(ExternalRep) - 1;
static_assert(kMaxInlineSize < 255,
"Cannot store size of InlineRep in a single byte.");
using InlineRep = std::array<char, sizeof(ExternalRep)>;
const bool is_inline_;
const union _rep_union {
InlineRep internal;
ExternalRep external;
_rep_union(absl::string_view view, CordRepReleaser releaser, void* arg)
: external(view, releaser, arg) {}
explicit _rep_union(absl::string_view view) {
DCHECK_LT(view.size(), kMaxInlineSize);
*reinterpret_cast<uint8*>(internal.data()) = view.size();
std::memcpy(static_cast<char*>(internal.data() + 1), view.data(),
view.size());
}
} rep_;
};
static TensorBuffer* TensorBufWithRef(Tensor* tensor);
static void TensorBufReleaser(void* tensor_buffer);
static void StringReleaser(void* str_ptr);
static CordRep* NewCordRepFromTensor(absl::string_view view, Tensor* tensor);
absl::InlinedVector<CordRep*, 2> chunks_;
};
inline TensorCord::TensorCord(const TensorCord& other)
: chunks_(other.chunks_) {
for (auto* rep : chunks_) {
rep->Ref();
}
}
inline TensorCord::TensorCord(TensorCord&& other) noexcept
: chunks_(std::move(other.chunks_)) {
other.chunks_.clear();
}
inline TensorCord& TensorCord::operator=(const TensorCord& other) {
Cleanup();
chunks_ = other.chunks_;
for (auto* rep : chunks_) {
rep->Ref();
}
return *this;
}
inline TensorCord& TensorCord::operator=(TensorCord&& other) noexcept {
Cleanup();
std::swap(chunks_, other.chunks_);
return *this;
}
inline void TensorCord::Append(const TensorCord& other) {
for (auto* rep : other.chunks_) {
chunks_.push_back(rep);
rep->Ref();
}
}
inline void TensorCord::Append(absl::string_view view, CordRepReleaser releaser,
void* memory) {
chunks_.push_back(new CordRep(view, releaser, memory));
}
inline void TensorCord::Append(absl::string_view view, Tensor* tensor) {
chunks_.push_back(NewCordRepFromTensor(view, tensor));
}
inline size_t TensorCord::size() const {
return (chunks_.empty())
? 0
: std::accumulate(chunk_begin(), chunk_end(), 0,
[](size_t acc, absl::string_view b) {
return acc + b.size();
});
}
inline TensorCord::ChunkIterator& TensorCord::ChunkIterator::operator++() {
assert(cord_ != nullptr);
assert(chunk_index_ < cord_->chunks_.size());
chunk_index_ += 1;
if (chunk_index_ != cord_->chunks_.size()) {
view_ = cord_->chunks_[chunk_index_]->view();
}
return *this;
}
inline TensorCord::ChunkIterator::ChunkIterator(const TensorCord* cord,
int index)
: cord_(cord), chunk_index_(index) {
if (index < cord_->chunks_.size()) {
view_ = cord_->chunks_[index]->view();
}
}
inline TensorCord::CordRep* TensorCord::NewCordRepFromTensor(
absl::string_view view, Tensor* tensor) {
if (view.size() <= TensorCord::CordRep::kMaxInlineSize) {
return new CordRep(view);
} else {
return new CordRep(view, &TensorBufReleaser, TensorBufWithRef(tensor));
}
}
inline void TensorCord::Cleanup() {
if (chunks_.empty()) return;
for (auto* rep : chunks_) {
rep->Unref();
}
chunks_.clear();
}
}
#endif
#include "tensorflow/core/kernels/tensor_cord.h"
#include <cstring>
#include "tensorflow/core/framework/variant.h"
namespace tensorflow {
static_assert(Variant::CanInlineType<TensorCord>(),
"TensorCord should be inlined into Variants");
TensorCord::CordRep::~CordRep() {
if (!is_inline_ && rep_.external.releaser) {
rep_.external.releaser(rep_.external.arg);
}
}
TensorCord::~TensorCord() { Cleanup(); }
void TensorCord::Encode(VariantTensorData* data) const {
data->metadata_string().clear();
for (auto rep : Chunks()) {
data->metadata_string().append(rep.data(), rep.size());
}
}
bool TensorCord::Decode(VariantTensorData data) {
auto* str = new string(std::move(data.metadata_string()));
Cleanup();
chunks_.push_back(new CordRep(absl::string_view(*str), &StringReleaser, str));
return true;
}
TensorBuffer* TensorCord::TensorBufWithRef(Tensor* tensor) {
TensorBuffer* buf = tensor->buf_;
buf->Ref();
return buf;
}
void TensorCord::TensorBufReleaser(void* tensor_buffer) {
static_cast<TensorBuffer*>(tensor_buffer)->Unref();
}
void TensorCord::StringReleaser(void* str_ptr) {
delete static_cast<string*>(str_ptr);
}
namespace {
template <typename string_type, typename = void>
struct ResizeUninitializedTraits {
using HasMember = std::false_type;
static void Resize(string_type* s, size_t new_size) { s->resize(new_size); }
};
template <typename string_type>
struct ResizeUninitializedTraits<
string_type, absl::void_t<decltype(std::declval<string_type&>()
.__resize_default_init(237))> > {
using HasMember = std::true_type;
static void Resize(string_type* s, size_t new_size) {
s->__resize_default_init(new_size);
}
};
static inline void STLStringResizeUninitialized(string* s, size_t new_size) {
ResizeUninitializedTraits<string>::Resize(s, new_size);
}
}
TensorCord::operator string() const {
string out;
STLStringResizeUninitialized(&out, size());
char* data = const_cast<char*>(out.data());
for (auto* rep : chunks_) {
auto view = rep->view();
memcpy(data, view.data(), view.size());
data += view.size();
}
DCHECK_EQ(data - out.data(), size());
return out;
}
} | #include "tensorflow/core/kernels/tensor_cord.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_reference.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/framework/variant_encode_decode.h"
#include "tensorflow/core/platform/cord.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
void DoNothingReleaser(void*) {}
TEST(TensorCordTest, Empty) {
TensorCord tc;
EXPECT_EQ(tc.size(), 0);
EXPECT_EQ(tc.chunk_begin(), tc.chunk_end());
auto chunks = tc.Chunks();
EXPECT_EQ(chunks.begin(), chunks.end());
}
TEST(TensorCordTest, ViewOfValue) {
TensorCord tc("abc", &DoNothingReleaser, nullptr);
EXPECT_EQ(*tc.chunk_begin(), "abc");
auto it = tc.chunk_begin();
EXPECT_EQ(*it, "abc");
++it;
EXPECT_EQ(it, tc.chunk_end());
}
TEST(TensorCordTest, Chunks) {
TensorCord tc("abc", &DoNothingReleaser, nullptr);
int counter = 0;
for (auto string_piece : tc.Chunks()) {
EXPECT_EQ(string_piece, "abc");
++counter;
}
EXPECT_EQ(counter, 1);
}
template <typename T>
CordRepReleaser CreateThunkFor(const T& fn) {
return [](void* ptr) { (*static_cast<T*>(ptr))(); };
}
TEST(TensorCordTest, Copy) {
int cleaned = 0;
auto cleaner = [&cleaned]() { ++cleaned; };
auto thunk = CreateThunkFor(cleaner);
TensorCord tc_copy;
string a = "abc";
{
TensorCord tc(a, thunk, &cleaner);
tc_copy = tc;
}
auto it = tc_copy.chunk_begin();
EXPECT_EQ(*it, "abc");
++it;
EXPECT_EQ(it, tc_copy.chunk_end());
EXPECT_EQ(cleaned, 0);
tc_copy = TensorCord();
EXPECT_EQ(cleaned, 1);
}
TEST(TensorCordTest, AppendCord) {
int cleaned_0 = 0;
int cleaned_1 = 0;
auto cleaner_0 = [&cleaned_0]() { ++cleaned_0; };
auto cleaner_1 = [&cleaned_1]() { ++cleaned_1; };
auto thunk_0 = CreateThunkFor(cleaner_0);
auto thunk_1 = CreateThunkFor(cleaner_1);
TensorCord tc_0("abc", thunk_0, &cleaner_0);
TensorCord tc_1("cba", thunk_1, &cleaner_1);
tc_0.Append(tc_1);
EXPECT_EQ(string(tc_0), "abccba");
auto it = tc_0.chunk_begin();
EXPECT_EQ(*it, "abc");
++it;
EXPECT_EQ(*it, "cba");
++it;
EXPECT_EQ(it, tc_0.chunk_end());
tc_1 = TensorCord();
EXPECT_EQ(cleaned_0, 0);
EXPECT_EQ(cleaned_1, 0);
tc_0 = TensorCord();
EXPECT_EQ(cleaned_0, 1);
EXPECT_EQ(cleaned_1, 1);
}
TEST(TensorCordTest, AppendView) {
int cleaned_0 = 0;
int cleaned_1 = 0;
auto cleaner_0 = [&cleaned_0]() { ++cleaned_0; };
auto cleaner_1 = [&cleaned_1]() { ++cleaned_1; };
auto thunk_0 = CreateThunkFor(cleaner_0);
auto thunk_1 = CreateThunkFor(cleaner_1);
TensorCord tc_0("abc", thunk_0, &cleaner_0);
tc_0.Append("cba", thunk_1, &cleaner_1);
EXPECT_EQ(string(tc_0), "abccba");
auto it = tc_0.chunk_begin();
EXPECT_EQ(*it, "abc");
++it;
EXPECT_EQ(*it, "cba");
++it;
EXPECT_EQ(it, tc_0.chunk_end());
EXPECT_EQ(cleaned_0, 0);
EXPECT_EQ(cleaned_1, 0);
tc_0 = TensorCord();
EXPECT_EQ(cleaned_0, 1);
EXPECT_EQ(cleaned_1, 1);
}
TEST(TensorCordTest, Move) {
int cleaned = 0;
auto cleaner = [&cleaned]() { ++cleaned; };
auto thunk = CreateThunkFor(cleaner);
TensorCord tc_copy;
string a = "abc";
{
TensorCord tc(a, thunk, &cleaner);
tc_copy = std::move(tc);
}
EXPECT_EQ(tc_copy.size(), 3);
auto it = tc_copy.chunk_begin();
EXPECT_EQ(*it, "abc");
++it;
EXPECT_EQ(it, tc_copy.chunk_end());
EXPECT_EQ(cleaned, 0);
tc_copy = TensorCord();
EXPECT_EQ(tc_copy.size(), 0);
EXPECT_EQ(cleaned, 1);
}
TEST(TensorCordTest, CopyConstructor) {
int cleaned = 0;
auto cleaner = [&cleaned]() { ++cleaned; };
auto thunk = CreateThunkFor(cleaner);
string a = "abc";
TensorCord tc(a, thunk, &cleaner);
TensorCord tc_copy(tc);
EXPECT_EQ(tc.size(), 3);
EXPECT_EQ(tc_copy.size(), 3);
auto it = tc_copy.chunk_begin();
EXPECT_EQ(*it, "abc");
++it;
EXPECT_EQ(it, tc_copy.chunk_end());
EXPECT_EQ(cleaned, 0);
tc = TensorCord();
EXPECT_EQ(cleaned, 0);
tc_copy = TensorCord();
EXPECT_EQ(cleaned, 1);
}
TEST(TensorCordTest, MoveConstructor) {
int cleaned = 0;
auto cleaner = [&cleaned]() { ++cleaned; };
auto thunk = CreateThunkFor(cleaner);
string a = "abc";
TensorCord tc(a, thunk, &cleaner);
TensorCord tc_copy(std::move(tc));
EXPECT_EQ(tc_copy.size(), 3);
auto it = tc_copy.chunk_begin();
EXPECT_EQ(*it, "abc");
++it;
EXPECT_EQ(it, tc_copy.chunk_end());
EXPECT_EQ(cleaned, 0);
tc_copy = TensorCord();
EXPECT_EQ(cleaned, 1);
}
#ifdef PLATFORM_GOOGLE
void TensorCopyFromTensorBenchmark(benchmark::State& state, int num_elem,
int string_size) {
Tensor strings(DT_STRING, {num_elem});
auto t = strings.flat<tstring>();
for (int i = 0; i < num_elem; ++i) {
t(i).insert(0, string_size, 'a');
}
for (auto _ : state) {
benchmark::DoNotOptimize(tensor::DeepCopy(strings));
}
}
void TensorCordFromTensorBenchmark(benchmark::State& state, int num_elem,
int string_size) {
Tensor strings(DT_STRING, {num_elem});
auto t = strings.flat<tstring>();
for (int i = 0; i < num_elem; ++i) {
t(i).insert(0, string_size, 'a');
}
for (auto _ : state) {
Tensor copy(DT_VARIANT, {num_elem});
auto t_copy = copy.flat<Variant>();
for (int i = 0; i < num_elem; ++i) {
t_copy(i) = TensorCord(t(i), &strings);
}
}
}
void CordReleaser(void* cord_ptr) { delete static_cast<absl::Cord*>(cord_ptr); }
void TensorCordFromAbslCordBenchmark(benchmark::State& state, int num_elem,
int string_size) {
std::vector<absl::Cord> cords(num_elem);
for (int i = 0; i < num_elem; ++i) {
string s(string_size, 'a');
cords[i] = s;
}
for (auto _ : state) {
Tensor copy(DT_VARIANT, {num_elem});
auto t_copy = copy.flat<Variant>();
for (int i = 0; i < num_elem; ++i) {
auto my_cord = new absl::Cord(cords[i]);
t_copy(i) = TensorCord(*my_cord->chunk_begin(), CordReleaser, my_cord);
}
}
}
#define CreateBM(NUM_ELEM, STRING_SIZE) \
void BM_TensorCopyFromTensor_NumElem_##NUM_ELEM##_StringSize_##STRING_SIZE( \
benchmark::State& state) { \
TensorCopyFromTensorBenchmark(state, NUM_ELEM, STRING_SIZE); \
} \
BENCHMARK( \
BM_TensorCopyFromTensor_NumElem_##NUM_ELEM##_StringSize_##STRING_SIZE); \
void BM_TensorCordFromTensor_NumElem_##NUM_ELEM##_StringSize_##STRING_SIZE( \
benchmark::State& state) { \
TensorCordFromTensorBenchmark(state, NUM_ELEM, STRING_SIZE); \
} \
BENCHMARK( \
BM_TensorCordFromTensor_NumElem_##NUM_ELEM##_StringSize_##STRING_SIZE); \
void \
BM_TensorCordFromAbslCord_NumElem_##NUM_ELEM##_StringSize_##STRING_SIZE( \
benchmark::State& state) { \
TensorCordFromAbslCordBenchmark(state, NUM_ELEM, STRING_SIZE); \
} \
BENCHMARK( \
BM_TensorCordFromAbslCord_NumElem_##NUM_ELEM##_StringSize_##STRING_SIZE);
#define CreateStringBMs(NUM_ELEM) \
CreateBM(NUM_ELEM, 16); \
CreateBM(NUM_ELEM, 32); \
CreateBM(NUM_ELEM, 128); \
CreateBM(NUM_ELEM, 1024); \
CreateBM(NUM_ELEM, 4096);
CreateStringBMs(1);
CreateStringBMs(16);
CreateStringBMs(32);
CreateStringBMs(64);
CreateStringBMs(128);
#endif
}
} |
1,485 | cpp | tensorflow/tensorflow | random_poisson_op | tensorflow/core/kernels/random_poisson_op.cc | tensorflow/core/kernels/random_poisson_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_RANDOM_POISSON_OP_H_
#define TENSORFLOW_CORE_KERNELS_RANDOM_POISSON_OP_H_
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/lib/random/simple_philox.h"
namespace tensorflow {
namespace functor {
template <typename Device, typename T , typename U >
struct PoissonFunctor {
void operator()(OpKernelContext* ctx, const Device& d, const T* rate_flat,
int64_t num_rate, int64_t num_samples,
const random::PhiloxRandom& rng, U* samples_flat);
};
}
}
#endif
#define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/random_poisson_op.h"
#include <algorithm>
#include <cmath>
#include <limits>
#include <memory>
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/lib/random/random_distributions.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/util/guarded_philox_random.h"
#include "tensorflow/core/util/work_sharder.h"
#if EIGEN_COMP_GNUC && __cplusplus > 199711L
#define DISABLE_FLOAT_EQUALITY_WARNING \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wfloat-equal\"")
#define ENABLE_FLOAT_EQUALITY_WARNING _Pragma("GCC diagnostic pop")
#else
#define DISABLE_FLOAT_EQUALITY_WARNING
#define ENABLE_FLOAT_EQUALITY_WARNING
#endif
#define UNIFORM(X) \
if (uniform_remaining == 0) { \
uniform_remaining = Uniform::kResultElementCount; \
uniform_result = uniform(&gen); \
} \
uniform_remaining--; \
CT X = uniform_result[uniform_remaining]
namespace tensorflow {
namespace {
static constexpr int kReservedSamplesPerOutput = 256;
typedef Eigen::ThreadPoolDevice CPUDevice;
template <typename T>
struct PoissonComputeType {
typedef double ComputeType;
};
}
namespace functor {
template <typename T, typename U>
struct PoissonFunctor<CPUDevice, T, U> {
void operator()(OpKernelContext* ctx, const CPUDevice& d, const T* rate_flat,
int64_t num_rate, int64_t num_samples,
const random::PhiloxRandom& rng, U* samples_flat) {
typedef random::UniformDistribution<random::PhiloxRandom, CT> Uniform;
auto DoWork = [num_samples, num_rate, &rng, samples_flat, rate_flat](
int64_t start_output, int64_t limit_output) {
Uniform uniform;
typename Uniform::ResultType uniform_result;
for (int64_t output_idx = start_output; output_idx < limit_output;
) {
const int64_t rate_idx = output_idx / num_samples;
const CT rate = CT(rate_flat[rate_idx]);
auto samples_rate_output = samples_flat + rate_idx;
if (rate < CT(10)) {
const CT exp_neg_rate = Eigen::numext::exp(-rate);
for (int64_t sample_idx = output_idx % num_samples;
sample_idx < num_samples && output_idx < limit_output;
sample_idx++, output_idx++) {
random::PhiloxRandom gen = rng;
gen.Skip(kReservedSamplesPerOutput * output_idx);
int16_t uniform_remaining = 0;
CT prod = 1;
CT x = 0;
while (true) {
UNIFORM(u);
prod = prod * u;
if (prod <= exp_neg_rate &&
x <= CT(Eigen::NumTraits<U>::highest())) {
samples_rate_output[sample_idx * num_rate] = U(x);
break;
}
x += 1;
}
}
continue;
}
if (Eigen::numext::isinf(rate) && rate > CT(0)) {
for (int64_t sample_idx = output_idx % num_samples;
sample_idx < num_samples && output_idx < limit_output;
sample_idx++, output_idx++) {
U k = Eigen::NumTraits<U>::infinity();
samples_rate_output[sample_idx * num_rate] = k;
}
continue;
}
using Eigen::numext::log;
const CT log_rate = log(rate);
const CT b = CT(0.931) + CT(2.53) * Eigen::numext::sqrt(rate);
const CT a = CT(-0.059) + CT(0.02483) * b;
const CT inv_alpha = CT(1.1239) + CT(1.1328) / (b - CT(3.4));
for (int64_t sample_idx = output_idx % num_samples;
sample_idx < num_samples && output_idx < limit_output;
sample_idx++, output_idx++) {
random::PhiloxRandom gen = rng;
gen.Skip(kReservedSamplesPerOutput * output_idx);
int16_t uniform_remaining = 0;
while (true) {
UNIFORM(u);
u -= CT(0.5);
UNIFORM(v);
CT u_shifted = CT(0.5) - Eigen::numext::abs(u);
CT k = Eigen::numext::floor((CT(2) * a / u_shifted + b) * u + rate +
CT(0.43));
if (k > CT(Eigen::NumTraits<U>::highest())) {
continue;
}
if (u_shifted >= CT(0.07) &&
v <= CT(0.9277) - CT(3.6224) / (b - CT(2))) {
samples_rate_output[sample_idx * num_rate] = U(k);
break;
}
if (k < 0 || (u_shifted < CT(0.013) && v > u_shifted)) {
continue;
}
CT s = log(v * inv_alpha / (a / (u_shifted * u_shifted) + b));
CT t = -rate + k * log_rate - Eigen::numext::lgamma(k + 1);
if (s <= t) {
samples_rate_output[sample_idx * num_rate] = U(k);
break;
}
}
}
}
};
static const int kElementCost = 165 + 6 * Uniform::kElementCost +
6 * random::PhiloxRandom::kElementCost;
auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads());
Shard(worker_threads.num_threads, worker_threads.workers,
num_rate * num_samples, kElementCost, DoWork);
}
private:
typedef typename PoissonComputeType<T>::ComputeType CT;
};
}
namespace {
template <typename T, typename U>
class RandomPoissonOp : public OpKernel {
public:
explicit RandomPoissonOp(OpKernelConstruction* context) : OpKernel(context) {
OP_REQUIRES_OK(context, generator_.Init(context));
}
void Compute(OpKernelContext* ctx) override {
const Tensor& shape_t = ctx->input(0);
const Tensor& rate_t = ctx->input(1);
TensorShape samples_shape;
OP_REQUIRES_OK(ctx, tensor::MakeShape(shape_t, &samples_shape));
const int64_t num_samples = samples_shape.num_elements();
OP_REQUIRES_OK(ctx, samples_shape.AppendShapeWithStatus(rate_t.shape()));
Tensor* samples_t = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, samples_shape, &samples_t));
if (num_samples == 0) return;
const auto rate_flat = rate_t.flat<T>().data();
const int64_t num_rate = rate_t.NumElements();
auto samples_flat = samples_t->flat<U>().data();
random::PhiloxRandom rng = generator_.ReserveRandomOutputs(
num_samples * num_rate, kReservedSamplesPerOutput);
functor::PoissonFunctor<CPUDevice, T, U>()(
ctx, ctx->eigen_device<CPUDevice>(), rate_flat, num_rate, num_samples,
rng, samples_flat);
}
private:
GuardedPhiloxRandom generator_;
RandomPoissonOp(const RandomPoissonOp&) = delete;
void operator=(const RandomPoissonOp&) = delete;
};
}
#undef UNIFORM
#define REGISTER(TYPE) \
REGISTER_KERNEL_BUILDER( \
Name("RandomPoisson").Device(DEVICE_CPU).TypeConstraint<TYPE>("dtype"), \
RandomPoissonOp<TYPE, TYPE>);
TF_CALL_half(REGISTER);
TF_CALL_float(REGISTER);
TF_CALL_double(REGISTER);
#define REGISTER_V2(RTYPE, OTYPE) \
template struct functor::PoissonFunctor<CPUDevice, RTYPE, OTYPE>; \
REGISTER_KERNEL_BUILDER(Name("RandomPoissonV2") \
.Device(DEVICE_CPU) \
.TypeConstraint<RTYPE>("R") \
.TypeConstraint<OTYPE>("dtype"), \
RandomPoissonOp<RTYPE, OTYPE>);
#define REGISTER_ALL(RTYPE) \
REGISTER_V2(RTYPE, Eigen::half); \
REGISTER_V2(RTYPE, float); \
REGISTER_V2(RTYPE, double); \
REGISTER_V2(RTYPE, int32); \
REGISTER_V2(RTYPE, int64_t);
REGISTER_ALL(Eigen::half);
REGISTER_ALL(float);
REGISTER_ALL(double);
REGISTER_ALL(int32);
REGISTER_ALL(int64_t);
#undef REGISTER_ALL
#undef REGISTER_V2
#undef REGISTER
} | #include <random>
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
Tensor VecShape(int64_t v) {
if (v >= std::numeric_limits<int32>::max()) {
Tensor shape(DT_INT64, TensorShape({1}));
shape.vec<int64_t>()(0) = v;
return shape;
} else {
Tensor shape(DT_INT32, TensorShape({1}));
shape.vec<int32>()(0) = v;
return shape;
}
}
Tensor VecLam32(int64_t n, int magnitude) {
std::mt19937 gen(0x12345);
std::uniform_real_distribution<float> dist(0.0, 1.0);
Tensor lams(DT_FLOAT, TensorShape({n}));
for (int i = 0; i < n; i++) {
lams.vec<float>()(i) = magnitude * (1 + dist(gen));
}
return lams;
}
Tensor VecLam64(int64_t n, int magnitude) {
std::mt19937 gen(0x12345);
std::uniform_real_distribution<double> dist(0.0, 1.0);
Tensor lams(DT_DOUBLE, TensorShape({n}));
for (int i = 0; i < n; i++) {
lams.vec<double>()(i) = magnitude * (1 + dist(gen));
}
return lams;
}
#define BM_Poisson(DEVICE, BITS, MAGNITUDE) \
static void BM_##DEVICE##_RandomPoisson_lam_##MAGNITUDE##_##BITS( \
::testing::benchmark::State& state) { \
const int nsamp = state.range(0); \
const int nlam = state.range(1); \
\
Graph* g = new Graph(OpRegistry::Global()); \
test::graph::RandomPoisson( \
g, test::graph::Constant(g, VecShape(nsamp)), \
test::graph::Constant(g, VecLam##BITS(nlam, MAGNITUDE))); \
test::Benchmark(#DEVICE, g, false).Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * nsamp * \
nlam); \
} \
BENCHMARK(BM_##DEVICE##_RandomPoisson_lam_##MAGNITUDE##_##BITS) \
->RangePair(1, 64, 2, 50);
BM_Poisson(cpu, 32, 1);
BM_Poisson(cpu, 32, 8);
BM_Poisson(cpu, 32, 32);
BM_Poisson(cpu, 64, 1);
BM_Poisson(cpu, 64, 8);
BM_Poisson(cpu, 64, 32);
}
} |
1,486 | cpp | tensorflow/tensorflow | composite_tensor_variant | tensorflow/core/kernels/composite_tensor_variant.cc | tensorflow/core/kernels/composite_tensor_variant_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_COMPOSITE_TENSOR_VARIANT_H_
#define TENSORFLOW_CORE_KERNELS_COMPOSITE_TENSOR_VARIANT_H_
#include <vector>
#include "absl/types/span.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/variant_tensor_data.h"
namespace tensorflow {
class CompositeTensorVariantMetadata;
class CompositeTensorVariant {
public:
CompositeTensorVariant(const CompositeTensorVariantMetadata& metadata,
absl::Span<Tensor> flat_components);
CompositeTensorVariant();
CompositeTensorVariant(const CompositeTensorVariant& other);
CompositeTensorVariant& operator=(CompositeTensorVariant&& other) = default;
CompositeTensorVariant& operator=(const CompositeTensorVariant& other) =
delete;
absl::Span<const Tensor> flat_components() const {
return absl::MakeConstSpan(flat_components_);
}
const CompositeTensorVariantMetadata& metadata() const { return *metadata_; }
string TypeName() const { return kTypeName; }
void Encode(VariantTensorData* data) const;
bool Decode(const VariantTensorData& data);
string DebugString() const;
static constexpr const char kTypeName[] = "CompositeTensorVariant";
private:
std::vector<Tensor> flat_components_;
std::unique_ptr<CompositeTensorVariantMetadata> metadata_;
};
}
#endif
#include "tensorflow/core/kernels/composite_tensor_variant.h"
#include "tensorflow/core/framework/variant_op_registry.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/protobuf/composite_tensor_variant.pb.h"
#include "tensorflow/core/protobuf/struct.pb.h"
namespace tensorflow {
constexpr const char CompositeTensorVariant::kTypeName[];
CompositeTensorVariant::CompositeTensorVariant(
const CompositeTensorVariantMetadata& metadata,
absl::Span<Tensor> flat_components)
: flat_components_(flat_components.begin(), flat_components.end()),
metadata_(new CompositeTensorVariantMetadata()) {
*metadata_ = metadata;
}
CompositeTensorVariant::CompositeTensorVariant()
: metadata_(new CompositeTensorVariantMetadata()) {}
CompositeTensorVariant::CompositeTensorVariant(
const CompositeTensorVariant& other)
: flat_components_(other.flat_components_),
metadata_(new CompositeTensorVariantMetadata()) {
*metadata_ = *other.metadata_;
}
void CompositeTensorVariant::Encode(VariantTensorData* data) const {
data->set_type_name(TypeName());
metadata_->SerializeToString(&data->metadata_string());
for (const Tensor& tensor : flat_components_) {
data->add_tensor(tensor);
}
}
bool CompositeTensorVariant::Decode(const VariantTensorData& data) {
if (!metadata_->ParseFromString(data.metadata_string())) {
return false;
}
flat_components_ = data.tensors();
return true;
}
string CompositeTensorVariant::DebugString() const {
string result("<CompositeTensorVariant type=");
result.append(TypeSpecProto::TypeSpecClass_Name(
metadata_->type_spec_proto().type_spec_class()));
result.append(", components=[");
for (const auto& tensor : flat_components_) {
if (&tensor != &flat_components_[0]) {
result.append(", ");
}
result.append(tensor.DebugString());
}
result.append("]>");
return result;
}
REGISTER_UNARY_VARIANT_DECODE_FUNCTION(CompositeTensorVariant,
CompositeTensorVariant::kTypeName);
} | #include "tensorflow/core/kernels/composite_tensor_variant.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/framework/variant_encode_decode.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/composite_tensor_variant.pb.h"
namespace tensorflow {
namespace {
constexpr const char* k2DRaggedTensorSpec = R"(
type_spec_proto: {
type_spec_class: RAGGED_TENSOR_SPEC
type_state: {
tuple_value: {
values: [
{tensor_shape_value:
{dim: [{size: -1}, {size: -1}]}}, # shape
{tensor_dtype_value: DT_INT32}, # dtype
{int64_value: 1}, # ragged_rank
{tensor_dtype_value: DT_INT64} # row_splits_dtype
]
}
}
})";
CompositeTensorVariant Make2DRaggedTensor(const std::vector<int32>& values,
const std::vector<int64_t>& splits) {
CompositeTensorVariantMetadata metadata;
EXPECT_TRUE(
protobuf::TextFormat::ParseFromString(k2DRaggedTensorSpec, &metadata));
std::vector<Tensor> components;
components.push_back(test::AsTensor<int32>(values));
components.push_back(test::AsTensor<int64_t>(splits));
CompositeTensorVariant v(metadata, absl::MakeSpan(components));
return v;
}
TEST(CompositeTensorVariantTest, EncodeAndDecodeRagged) {
CompositeTensorVariant v = Make2DRaggedTensor(
{5, 5, 3, 4, 1, 8},
{0, 2, 3, 6});
Tensor t(DT_VARIANT, {});
t.flat<Variant>()(0) = v;
auto* decoded = t.flat<Variant>()(0).get<CompositeTensorVariant>();
EXPECT_EQ(v.metadata().SerializeAsString(),
decoded->metadata().SerializeAsString());
EXPECT_EQ(v.flat_components().size(), 2);
test::ExpectTensorEqual<int32>(v.flat_components()[0],
decoded->flat_components()[0]);
test::ExpectTensorEqual<int64_t>(v.flat_components()[1],
decoded->flat_components()[1]);
}
TEST(CompositeTensorVariantTest, DebugStringForDefaultConstructed) {
CompositeTensorVariant v;
EXPECT_EQ(v.DebugString(),
"<CompositeTensorVariant type=UNKNOWN, components=[]>");
}
TEST(CompositeTensorVariantTest, DebugStringForRagged) {
CompositeTensorVariant v = Make2DRaggedTensor(
{5, 5, 3, 4, 1},
{0, 2, 3, 5});
EXPECT_EQ(v.DebugString(),
"<CompositeTensorVariant type=RAGGED_TENSOR_SPEC, "
"components=[Tensor<type: int32 shape: [5] values: 5 5 3...>, "
"Tensor<type: int64 shape: [4] values: 0 2 3...>]>");
}
TEST(CompositeTensorVariantTest, TypeName) {
CompositeTensorVariant v;
EXPECT_EQ(v.TypeName(), "CompositeTensorVariant");
}
}
} |
1,487 | cpp | tensorflow/tensorflow | checkpoint_callback_manager | tensorflow/core/kernels/checkpoint_callback_manager.cc | tensorflow/core/kernels/checkpoint_callback_manager_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_CHECKPOINT_CALLBACK_MANAGER_H_
#define TENSORFLOW_CORE_KERNELS_CHECKPOINT_CALLBACK_MANAGER_H_
#include <functional>
#include <string>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/container/flat_hash_map.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/resource_base.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace checkpoint {
ABSL_CONST_INIT extern const absl::string_view
kCheckpointCallbackManagerResourceName;
using SaveCallback =
std::function<absl::StatusOr<std::string>(absl::string_view)>;
using RestoreCallback =
std::function<Status(absl::string_view, absl::string_view)>;
class CheckpointCallbackManager : public ResourceBase {
public:
CheckpointCallbackManager() = default;
CheckpointCallbackManager(const CheckpointCallbackManager&) = delete;
CheckpointCallbackManager& operator=(const CheckpointCallbackManager&) =
delete;
std::string DebugString() const override {
return "CheckpointCallbackManager";
}
static absl::StatusOr<std::pair<std::string, std::string>>
GetCheckpointIdAndPathFromPrefix(absl::string_view prefix);
Status RegisterSaveCallback(absl::string_view file_extension,
SaveCallback callback);
bool DoesSaveCallbackExist(absl::string_view file_extension);
Status RegisterRestoreCallback(absl::string_view file_extension,
RestoreCallback callback);
bool DoesRestoreCallbackExist(absl::string_view file_extension);
void Save(absl::string_view prefix);
void Restore(absl::string_view prefix);
private:
mutable mutex mu_;
absl::flat_hash_map<std::string, SaveCallback> save_callbacks_
TF_GUARDED_BY(mu_);
absl::flat_hash_map<std::string, RestoreCallback> restore_callbacks_
TF_GUARDED_BY(mu_);
std::pair<std::string, std::string> last_restored_checkpoint_id_and_dir_
TF_GUARDED_BY(mu_);
std::pair<std::string, std::string> last_saved_checkpoint_id_and_dir_
TF_GUARDED_BY(mu_);
};
}
}
#endif
#include "tensorflow/core/kernels/checkpoint_callback_manager.h"
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tsl/platform/regexp.h"
namespace tensorflow {
namespace checkpoint {
const absl::string_view kCheckpointCallbackManagerResourceName =
"checkpoint_callback_manager";
namespace {
const absl::string_view kCheckpointFileRegex = "^part-[0-9]*-of-[0-9]*";
const absl::string_view kCheckpointTempDirRegex = "-[0-9]*_temp$";
const absl::string_view kCheckpointDirRegex = "-[0-9]*$";
const absl::string_view kCheckpointTempDirSuffix = "_temp";
void TriggerSaveCallbackIfFileNotExist(absl::string_view checkpoint_id,
absl::string_view checkpoint_dir,
absl::string_view file_extension,
SaveCallback callback) {
const std::string file_path = io::JoinPath(
checkpoint_dir, absl::StrCat(checkpoint_id, ".", file_extension));
if (Env::Default()->FileExists(file_path).ok()) {
return;
}
LOG(INFO) << "Calling a save callback: file_extension = " << file_extension
<< ", checkpoint_id = " << checkpoint_id;
absl::StatusOr<std::string> save_content = callback(checkpoint_id);
if (!save_content.ok()) {
LOG(WARNING) << save_content.status();
return;
}
if (save_content->empty()) {
return;
}
Status write_status =
WriteStringToFile(Env::Default(), file_path, *save_content);
if (!write_status.ok()) {
LOG(WARNING) << write_status;
} else {
LOG(INFO) << "A CheckpointCallbackManager has been written to "
<< file_path;
}
}
void TriggerRestoreCallbackIfFileExists(absl::string_view checkpoint_id,
absl::string_view checkpoint_dir,
absl::string_view file_extension,
RestoreCallback callback) {
const std::string file_path = io::JoinPath(
checkpoint_dir, absl::StrCat(checkpoint_id, ".", file_extension));
if (!Env::Default()->FileExists(file_path).ok()) {
return;
}
std::string payload;
Status read_status = ReadFileToString(Env::Default(), file_path, &payload);
if (!read_status.ok()) {
LOG(WARNING) << "Failed to read: " << read_status;
return;
}
LOG(INFO) << "Calling a restore callback: file_extension = " << file_extension
<< ", checkpoint_id = " << checkpoint_id;
Status callback_status = callback(checkpoint_id, payload);
if (!callback_status.ok()) {
LOG(WARNING) << callback_status;
}
}
}
absl::StatusOr<std::pair<std::string, std::string>>
CheckpointCallbackManager::GetCheckpointIdAndPathFromPrefix(
absl::string_view prefix) {
for (absl::string_view path = prefix;; path = io::Dirname(path)) {
absl::string_view basename = io::Basename(path);
if (basename.empty()) break;
if (RE2::PartialMatch(basename, kCheckpointFileRegex)) continue;
if (RE2::PartialMatch(basename, kCheckpointTempDirRegex)) {
return std::make_pair(
std::string(basename.substr(
0, basename.length() - kCheckpointTempDirSuffix.length())),
std::string(io::Dirname(path)));
}
if (RE2::PartialMatch(basename, kCheckpointDirRegex)) {
return std::make_pair(std::string(basename),
std::string(io::Dirname(path)));
}
}
return errors::NotFound(
absl::StrCat("Failed to find a checkpoint id. prefix = ", prefix));
}
Status CheckpointCallbackManager::RegisterSaveCallback(
absl::string_view file_extension, SaveCallback callback) {
SaveCallback lazy_callback = nullptr;
std::string checkpoint_id;
std::string checkpoint_dir;
{
mutex_lock l(mu_);
if (!save_callbacks_.try_emplace(file_extension, std::move(callback))
.second) {
return errors::AlreadyExists("A callback already exists.");
}
if (!last_saved_checkpoint_id_and_dir_.first.empty()) {
lazy_callback = save_callbacks_[file_extension];
checkpoint_id = last_saved_checkpoint_id_and_dir_.first;
checkpoint_dir = last_saved_checkpoint_id_and_dir_.second;
}
}
if (lazy_callback != nullptr) {
TriggerSaveCallbackIfFileNotExist(checkpoint_id, checkpoint_dir,
file_extension, lazy_callback);
}
return absl::OkStatus();
}
bool CheckpointCallbackManager::DoesSaveCallbackExist(
absl::string_view file_extension) {
tf_shared_lock l(mu_);
return save_callbacks_.contains(file_extension);
}
Status CheckpointCallbackManager::RegisterRestoreCallback(
absl::string_view file_extension, RestoreCallback callback) {
RestoreCallback lazy_callback = nullptr;
std::string checkpoint_id;
std::string checkpoint_dir;
{
mutex_lock l(mu_);
if (!restore_callbacks_.try_emplace(file_extension, std::move(callback))
.second) {
return errors::AlreadyExists("A callback already exists.");
}
if (!last_restored_checkpoint_id_and_dir_.first.empty()) {
lazy_callback = restore_callbacks_[file_extension];
checkpoint_id = last_restored_checkpoint_id_and_dir_.first;
checkpoint_dir = last_restored_checkpoint_id_and_dir_.second;
}
}
if (lazy_callback != nullptr) {
TriggerRestoreCallbackIfFileExists(checkpoint_id, checkpoint_dir,
file_extension, lazy_callback);
}
return absl::OkStatus();
}
bool CheckpointCallbackManager::DoesRestoreCallbackExist(
absl::string_view file_extension) {
tf_shared_lock l(mu_);
return restore_callbacks_.contains(file_extension);
}
void CheckpointCallbackManager::Save(absl::string_view prefix) {
absl::StatusOr<std::pair<std::string, std::string>> id_and_dir =
GetCheckpointIdAndPathFromPrefix(prefix);
if (!id_and_dir.ok()) {
return;
}
absl::flat_hash_map<std::string, SaveCallback> copy_of_save_callbacks;
{
mutex_lock l(mu_);
last_saved_checkpoint_id_and_dir_ = *id_and_dir;
copy_of_save_callbacks = save_callbacks_;
}
for (const auto& name_and_callback : copy_of_save_callbacks) {
TriggerSaveCallbackIfFileNotExist(id_and_dir->first, id_and_dir->second,
name_and_callback.first,
name_and_callback.second);
}
}
void CheckpointCallbackManager::Restore(absl::string_view prefix) {
absl::StatusOr<std::pair<std::string, std::string>> id_and_dir =
GetCheckpointIdAndPathFromPrefix(prefix);
if (!id_and_dir.ok()) {
return;
}
absl::flat_hash_map<std::string, RestoreCallback> copy_of_restore_callbacks;
{
mutex_lock l(mu_);
if (*id_and_dir == last_restored_checkpoint_id_and_dir_) {
return;
}
last_restored_checkpoint_id_and_dir_ = *id_and_dir;
copy_of_restore_callbacks = restore_callbacks_;
}
for (const auto& name_and_callback : copy_of_restore_callbacks) {
TriggerRestoreCallbackIfFileExists(id_and_dir->first, id_and_dir->second,
name_and_callback.first,
name_and_callback.second);
}
}
}
} | #include "tensorflow/core/kernels/checkpoint_callback_manager.h"
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/resource_handle.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/lib/core/status_test_util.h"
namespace tensorflow {
namespace checkpoint {
namespace {
class CheckpointCallbackManagerTest : public ::testing::Test {
protected:
void SetUp() override {
checkpoint_callback_manager_ = new CheckpointCallbackManager();
handle_ = ResourceHandle::MakeRefCountingHandle(
checkpoint_callback_manager_, "cpu", {}, {});
}
CheckpointCallbackManager* checkpoint_callback_manager_;
ResourceHandle handle_;
};
TEST_F(CheckpointCallbackManagerTest,
GetCheckpointIdAndPathFromPrefixWithTempDir) {
absl::StatusOr<std::pair<std::string, std::string>> pair =
CheckpointCallbackManager::GetCheckpointIdAndPathFromPrefix(
"/foo/bar/model.ckpt-5_temp/part-00000-of-00001");
TF_ASSERT_OK(pair.status());
EXPECT_EQ(pair->first, "model.ckpt-5");
EXPECT_EQ(pair->second, "/foo/bar");
}
TEST_F(CheckpointCallbackManagerTest,
GetCheckpointIdAndPathFromPrefixWithPartFile) {
absl::StatusOr<std::pair<std::string, std::string>> pair =
CheckpointCallbackManager::GetCheckpointIdAndPathFromPrefix(
"/foo/bar/model.ckpt-5/part-00000-of-00001");
TF_ASSERT_OK(pair.status());
EXPECT_EQ(pair->first, "model.ckpt-5");
EXPECT_EQ(pair->second, "/foo/bar");
}
TEST_F(CheckpointCallbackManagerTest,
GetCheckpointIdAndPathFromPrefixWithoutPartFile) {
absl::StatusOr<std::pair<std::string, std::string>> pair =
CheckpointCallbackManager::GetCheckpointIdAndPathFromPrefix(
"/foo/bar/model.ckpt-5");
TF_ASSERT_OK(pair.status());
EXPECT_EQ(pair->first, "model.ckpt-5");
EXPECT_EQ(pair->second, "/foo/bar");
}
TEST_F(CheckpointCallbackManagerTest,
GetCheckpointIdAndPathFromPrefixForLongerPartName) {
absl::StatusOr<std::pair<std::string, std::string>> pair =
CheckpointCallbackManager::GetCheckpointIdAndPathFromPrefix(
"/foo/bar/ckpt-tensor-1_temp/part-00000-of-00002_dev-0-of-2");
TF_ASSERT_OK(pair.status());
EXPECT_EQ(pair->first, "ckpt-tensor-1");
EXPECT_EQ(pair->second, "/foo/bar");
}
TEST_F(CheckpointCallbackManagerTest,
GetCheckpointIdAndPathFromPrefixUnrecognized) {
EXPECT_FALSE(
CheckpointCallbackManager::GetCheckpointIdAndPathFromPrefix("/foo/bar")
.ok());
}
TEST_F(CheckpointCallbackManagerTest, RegisterSaveCallbackTwice) {
SaveCallback first_callback = [](absl::string_view checkpoint_id) {
return std::string("MockString");
};
SaveCallback second_callback = [](absl::string_view checkpoint_id) {
return std::string("MockString");
};
TF_ASSERT_OK(checkpoint_callback_manager_->RegisterSaveCallback(
"foo", std::move(first_callback)));
EXPECT_FALSE(checkpoint_callback_manager_
->RegisterSaveCallback("foo", std::move(second_callback))
.ok());
}
TEST_F(CheckpointCallbackManagerTest, RegisterRestoreCallbackTwice) {
RestoreCallback first_callback = [](absl::string_view checkpoint_id,
absl::string_view str) {
return absl::OkStatus();
};
RestoreCallback second_callback = [](absl::string_view checkpoint_id,
absl::string_view str) {
return absl::OkStatus();
};
TF_ASSERT_OK(checkpoint_callback_manager_->RegisterRestoreCallback(
"foo", std::move(first_callback)));
EXPECT_FALSE(checkpoint_callback_manager_
->RegisterRestoreCallback("foo", std::move(second_callback))
.ok());
}
TEST_F(CheckpointCallbackManagerTest, DoesSaveCallbackExist) {
SaveCallback first_callback = [](absl::string_view checkpoint_id) {
return std::string("MockString");
};
SaveCallback second_callback = [](absl::string_view checkpoint_id) {
return std::string("MockString");
};
TF_ASSERT_OK(checkpoint_callback_manager_->RegisterSaveCallback(
"foo", std::move(first_callback)));
TF_ASSERT_OK(checkpoint_callback_manager_->RegisterSaveCallback(
"bar", std::move(second_callback)));
EXPECT_TRUE(checkpoint_callback_manager_->DoesSaveCallbackExist("foo"));
EXPECT_TRUE(checkpoint_callback_manager_->DoesSaveCallbackExist("bar"));
EXPECT_FALSE(
checkpoint_callback_manager_->DoesSaveCallbackExist("not_exist"));
}
TEST_F(CheckpointCallbackManagerTest, DoesRestoreCallbackExist) {
RestoreCallback first_callback = [](absl::string_view checkpoint_id,
absl::string_view str) {
return absl::OkStatus();
};
RestoreCallback second_callback = [](absl::string_view checkpoint_id,
absl::string_view str) {
return absl::OkStatus();
};
TF_ASSERT_OK(checkpoint_callback_manager_->RegisterRestoreCallback(
"foo", std::move(first_callback)));
TF_ASSERT_OK(checkpoint_callback_manager_->RegisterRestoreCallback(
"bar", std::move(second_callback)));
EXPECT_TRUE(checkpoint_callback_manager_->DoesRestoreCallbackExist("foo"));
EXPECT_TRUE(checkpoint_callback_manager_->DoesRestoreCallbackExist("bar"));
EXPECT_FALSE(
checkpoint_callback_manager_->DoesRestoreCallbackExist("not_exist"));
}
TEST_F(CheckpointCallbackManagerTest, SaveTwoCallbacks) {
SaveCallback save_callback1 = [](absl::string_view checkpoint_id) {
return absl::StrCat("MockContent1::", checkpoint_id);
};
SaveCallback save_callback2 = [](absl::string_view checkpoint_id) {
return absl::StrCat("MockContent2::", checkpoint_id);
};
TF_ASSERT_OK(checkpoint_callback_manager_->RegisterSaveCallback(
"foo", std::move(save_callback1)));
TF_ASSERT_OK(checkpoint_callback_manager_->RegisterSaveCallback(
"bar", std::move(save_callback2)));
checkpoint_callback_manager_->Save(io::JoinPath(
testing::TmpDir(), "model.ckpt-123_temp/part-00000-of-00001"));
std::string file_content1;
TF_EXPECT_OK(ReadFileToString(
Env::Default(), io::JoinPath(testing::TmpDir(), "model.ckpt-123.foo"),
&file_content1));
EXPECT_EQ(file_content1, "MockContent1::model.ckpt-123");
std::string file_content2;
TF_EXPECT_OK(ReadFileToString(
Env::Default(), io::JoinPath(testing::TmpDir(), "model.ckpt-123.bar"),
&file_content2));
EXPECT_EQ(file_content2, "MockContent2::model.ckpt-123");
}
TEST_F(CheckpointCallbackManagerTest, SaveMultipleTimes) {
SaveCallback save_callback = [](absl::string_view checkpoint_id) {
return absl::StrCat("MockContent::", checkpoint_id);
};
TF_ASSERT_OK(checkpoint_callback_manager_->RegisterSaveCallback(
"foo", std::move(save_callback)));
checkpoint_callback_manager_->Save(io::JoinPath(
testing::TmpDir(), "model.ckpt-100_temp/part-00000-of-00001"));
checkpoint_callback_manager_->Save(io::JoinPath(
testing::TmpDir(), "model.ckpt-100_temp/part-00000-of-00001"));
checkpoint_callback_manager_->Save(io::JoinPath(
testing::TmpDir(), "model.ckpt-200_temp/part-00000-of-00001"));
std::string file_content;
TF_EXPECT_OK(ReadFileToString(
Env::Default(), io::JoinPath(testing::TmpDir(), "model.ckpt-100.foo"),
&file_content));
EXPECT_EQ(file_content, "MockContent::model.ckpt-100");
TF_EXPECT_OK(ReadFileToString(
Env::Default(), io::JoinPath(testing::TmpDir(), "model.ckpt-200.foo"),
&file_content));
EXPECT_EQ(file_content, "MockContent::model.ckpt-200");
}
TEST_F(CheckpointCallbackManagerTest, Restore) {
int callback_call_count = 0;
RestoreCallback restore_callback = [&callback_call_count](
absl::string_view checkpoint_id,
absl::string_view str) {
EXPECT_EQ(checkpoint_id, "model.ckpt-100");
EXPECT_EQ(str, "Apple");
++callback_call_count;
return absl::OkStatus();
};
TF_ASSERT_OK(checkpoint_callback_manager_->RegisterRestoreCallback(
"foo", std::move(restore_callback)));
TF_EXPECT_OK(WriteStringToFile(
Env::Default(), io::JoinPath(testing::TmpDir(), "model.ckpt-100.foo"),
"Apple"));
EXPECT_EQ(callback_call_count, 0);
checkpoint_callback_manager_->Restore(
io::JoinPath(testing::TmpDir(), "model.ckpt-100"));
EXPECT_EQ(callback_call_count, 1);
checkpoint_callback_manager_->Restore(
io::JoinPath(testing::TmpDir(), "model.ckpt-100"));
EXPECT_EQ(callback_call_count, 1);
}
TEST_F(CheckpointCallbackManagerTest, SaveAndRestore) {
SaveCallback save_callback = [](absl::string_view checkpoint_id) {
return std::string("Apple");
};
TF_ASSERT_OK(checkpoint_callback_manager_->RegisterSaveCallback(
"foo", std::move(save_callback)));
int restore_callback_count = 0;
RestoreCallback restore_callback = [&restore_callback_count](
absl::string_view checkpoint_id,
absl::string_view str) {
EXPECT_EQ(checkpoint_id, "model.ckpt-500");
EXPECT_EQ(str, "Apple");
++restore_callback_count;
return absl::OkStatus();
};
TF_ASSERT_OK(checkpoint_callback_manager_->RegisterRestoreCallback(
"foo", std::move(restore_callback)));
checkpoint_callback_manager_->Save(io::JoinPath(
testing::TmpDir(), "model.ckpt-500_temp/part-00000-of-00001"));
EXPECT_EQ(restore_callback_count, 0);
checkpoint_callback_manager_->Restore(
io::JoinPath(testing::TmpDir(), "model.ckpt-500"));
EXPECT_EQ(restore_callback_count, 1);
}
TEST_F(CheckpointCallbackManagerTest, SaveLazyCallback) {
SaveCallback save_callback = [](absl::string_view checkpoint_id) {
return absl::StrCat("MockContent::", checkpoint_id);
};
checkpoint_callback_manager_->Save(io::JoinPath(
testing::TmpDir(), "model.ckpt-456_temp/part-00000-of-00001"));
TF_ASSERT_OK(checkpoint_callback_manager_->RegisterSaveCallback(
"foo", std::move(save_callback)));
std::string file_content;
TF_EXPECT_OK(ReadFileToString(
Env::Default(), io::JoinPath(testing::TmpDir(), "model.ckpt-456.foo"),
&file_content));
EXPECT_EQ(file_content, "MockContent::model.ckpt-456");
}
TEST_F(CheckpointCallbackManagerTest, RestoreLazyCallback) {
int callback_call_count = 0;
RestoreCallback restore_callback = [&callback_call_count](
absl::string_view checkpoint_id,
absl::string_view str) {
EXPECT_EQ(checkpoint_id, "model.ckpt-100");
EXPECT_EQ(str, "Apple");
++callback_call_count;
return absl::OkStatus();
};
TF_EXPECT_OK(WriteStringToFile(
Env::Default(), io::JoinPath(testing::TmpDir(), "model.ckpt-100.foo"),
"Apple"));
EXPECT_EQ(callback_call_count, 0);
checkpoint_callback_manager_->Restore(
io::JoinPath(testing::TmpDir(), "model.ckpt-100"));
EXPECT_EQ(callback_call_count, 0);
TF_ASSERT_OK(checkpoint_callback_manager_->RegisterRestoreCallback(
"foo", std::move(restore_callback)));
EXPECT_EQ(callback_call_count, 1);
}
}
}
} |
1,488 | cpp | tensorflow/tensorflow | deep_conv2d | tensorflow/core/kernels/deep_conv2d.cc | tensorflow/core/kernels/deep_conv2d_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DEEP_CONV2D_H_
#define TENSORFLOW_CORE_KERNELS_DEEP_CONV2D_H_
#include "tensorflow/core/framework/types.h"
namespace tensorflow {
class OpKernelContext;
template <typename T>
class DeepConv2DTransform {
public:
virtual ~DeepConv2DTransform() {}
virtual void GetFilterTransformMatrix(const int64_t rows, const int64_t cols,
T* transform_matrix) const = 0;
virtual void GetInputTransformMatrix(const int64_t rows, const int64_t cols,
T* transform_matrix) const = 0;
virtual void GetOutputTransformMatrix(const int64_t rows, const int64_t cols,
T* transform_matrix) const = 0;
struct Shape {
Shape(int64_t r, int64_t c) : rows(r), cols(c) {}
int64_t rows;
int64_t cols;
};
virtual const Shape& filter_shape() const = 0;
virtual const Shape& input_shape() const = 0;
virtual const Shape& output_shape() const = 0;
};
struct Conv2DArgs {
int batch;
int in_rows;
int in_cols;
int in_depth;
int filter_rows;
int filter_cols;
int pad_rows;
int pad_cols;
int out_rows;
int out_cols;
int out_depth;
Conv2DArgs()
: batch(0),
in_rows(0),
in_cols(0),
in_depth(0),
filter_rows(0),
filter_cols(0),
pad_rows(0),
pad_cols(0),
out_rows(0),
out_cols(0),
out_depth(0) {}
};
bool CanUseDeepConv2D(int stride_rows, int stride_cols, int filter_rows,
int filter_cols, int in_depth, int out_depth,
int out_rows, int out_cols);
namespace functor {
template <typename Device, typename T>
struct DeepConv2D {
void operator()(OpKernelContext* ctx, const Conv2DArgs& args, const T* input,
const T* filter, T* output);
};
}
}
#endif
#define USE_EIGEN_TENSOR
#define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/deep_conv2d.h"
#include <stdlib.h>
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/kernels/winograd_transform.h"
#include "tensorflow/core/util/work_sharder.h"
namespace tensorflow {
static int64_t GetDeepConvCost(int input_tile_rows, int input_tile_cols,
int out_tile_rows, int out_tile_cols,
int in_depth, int out_depth, int out_rows,
int out_cols) {
const int64_t input_tile_spatial_size = input_tile_rows * input_tile_cols;
const int64_t input_transform_cost =
input_tile_spatial_size * input_tile_spatial_size * in_depth;
const int64_t product_cost = input_tile_spatial_size * in_depth * out_depth;
const int64_t output_tile_spatial_size = out_tile_rows * out_tile_cols;
const int64_t output_transform_cost =
output_tile_spatial_size * input_tile_spatial_size * out_depth;
const int64_t row_tiles = (out_rows + out_tile_rows - 1) / out_tile_rows;
const int64_t col_tiles = (out_cols + out_tile_cols - 1) / out_tile_cols;
const int64_t num_tiles = row_tiles * col_tiles;
return num_tiles *
(input_transform_cost + product_cost + output_transform_cost);
}
static int64_t GetDirectConvCost(int filter_rows, int filter_cols, int in_depth,
int out_depth, int out_rows, int out_cols) {
return filter_rows * filter_cols * in_depth * out_depth * out_rows * out_cols;
}
static bool ReadBoolFromEnvVar(const char* env_var_name, bool default_val) {
const char* tf_env_var_val = getenv(env_var_name);
if (tf_env_var_val != nullptr) {
StringPiece tf_env_var_val_str(tf_env_var_val);
if (tf_env_var_val_str == "0") {
return false;
}
return true;
}
return default_val;
}
bool CanUseDeepConv2D(int stride_rows, int stride_cols, int filter_rows,
int filter_cols, int in_depth, int out_depth,
int out_rows, int out_cols) {
if (stride_rows > 1 || stride_cols > 1 || filter_rows != 3 ||
filter_cols != 3) {
return false;
}
if (!ReadBoolFromEnvVar("TF_USE_DEEP_CONV2D", false)) {
return false;
}
WinogradTransform<float> t;
const int64_t deep_conv_cost = GetDeepConvCost(
t.input_shape().rows, t.input_shape().cols, t.output_shape().rows,
t.output_shape().cols, in_depth, out_depth, out_rows, out_cols);
const int64_t direct_conv_cost = GetDirectConvCost(
filter_rows, filter_cols, in_depth, out_depth, out_rows, out_cols);
VLOG(2) << "CanUseDeepConv2D"
<< " deep_conv_cost: " << deep_conv_cost
<< " direct_conv_cost: " << direct_conv_cost << " deep_direct_ratio: "
<< (static_cast<float>(deep_conv_cost) /
static_cast<float>(direct_conv_cost))
<< " use_deep_conv: " << (deep_conv_cost < direct_conv_cost);
return deep_conv_cost < direct_conv_cost;
}
typedef Eigen::ThreadPoolDevice CPUDevice;
template <typename T>
struct CopyFilterDepth {
void operator()(const Conv2DArgs& args, const T* filter_in, T* filter_buf) {
typedef typename Eigen::internal::packet_traits<T>::type Packet;
static constexpr int64_t kPacketSize = (sizeof(Packet) / sizeof(T));
const int64_t vectorized_size = args.in_depth / kPacketSize;
const int64_t scalar_size = args.in_depth % kPacketSize;
const int64_t input_stride = args.out_depth * kPacketSize;
for (int64_t d = 0; d < vectorized_size; ++d) {
auto v = Eigen::internal::pgather<T, Packet>(filter_in + d * input_stride,
args.out_depth);
Eigen::internal::pstoreu<T>(filter_buf + d * kPacketSize, v);
}
const int64_t in_scalar_base = vectorized_size * input_stride;
const int64_t buf_scalar_base = vectorized_size * kPacketSize;
for (int64_t d = 0; d < scalar_size; ++d) {
filter_buf[buf_scalar_base + d] =
filter_in[in_scalar_base + d * args.out_depth];
}
}
};
template <typename T>
struct ComputeFilterRangeTransform {
typedef typename Eigen::internal::packet_traits<T>::type Packet;
static constexpr int64_t kPacketSize = (sizeof(Packet) / sizeof(T));
typedef Eigen::Map<
Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>>
MatrixMap;
typedef Eigen::Map<
const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>>
ConstMatrixMap;
void operator()(const Conv2DArgs& args,
const DeepConv2DTransform<T>* transform,
const int64_t od_start, const int64_t num_filters,
const int64_t shard_rows, const int64_t shard_cols,
const T* filter_in, const int64_t in_stride,
const int64_t out_stride, const T* transform_matrix,
T* out_buffer, T* filter_out) {
namespace ei = Eigen::internal;
const int64_t in_depth = args.in_depth;
const int64_t base_filter_rows = transform->filter_shape().rows;
const int64_t base_filter_cols = transform->filter_shape().cols;
const int64_t base_filter_spatial_size =
base_filter_rows * base_filter_cols;
const int64_t tile_rows = transform->input_shape().rows;
const int64_t tile_cols = transform->input_shape().cols;
const int64_t tile_spatial_size = tile_rows * tile_cols;
ConstMatrixMap A(transform_matrix, tile_spatial_size,
base_filter_spatial_size);
ConstMatrixMap B(filter_in, base_filter_spatial_size, in_stride);
MatrixMap C(out_buffer, tile_spatial_size, in_stride);
C.noalias() = A * B;
const int64_t scalar_size = in_depth % kPacketSize;
const int64_t vectorized_size = in_depth / kPacketSize;
const int64_t shard_stride = args.in_depth;
const int64_t out_depth_stride = shard_rows * shard_cols * shard_stride;
for (int64_t od = 0; od < num_filters; ++od) {
const int64_t out_depth_buf_base = od * out_depth_stride;
const int64_t out_depth_base = (od_start + od) * out_depth_stride;
for (int64_t s_r = 0; s_r < shard_rows; ++s_r) {
for (int64_t s_c = 0; s_c < shard_cols; ++s_c) {
const int64_t shard_base = shard_stride * (s_r * shard_cols + s_c);
for (int64_t i = 0; i < tile_spatial_size; ++i) {
const int64_t in_base =
i * in_stride + out_depth_buf_base + shard_base;
const int64_t out_base =
i * out_stride + out_depth_base + shard_base;
for (int64_t d = 0; d < vectorized_size; ++d) {
auto v =
ei::ploadu<Packet>(out_buffer + in_base + d * kPacketSize);
ei::pstoreu<T>(filter_out + out_base + d * kPacketSize, v);
}
const int64_t scalar_base = vectorized_size * kPacketSize;
for (int64_t d = 0; d < scalar_size; ++d) {
filter_out[out_base + scalar_base + d] =
out_buffer[in_base + scalar_base + d];
}
}
}
}
}
}
};
template <typename T>
struct TransformFilterRange {
void operator()(const Conv2DArgs& args,
const DeepConv2DTransform<T>* transform,
const int64_t od_start, const int64_t od_limit,
const T* filter_in, const T* transform_matrix, T* out_buffer,
T* filter_buf, T* filter_out) {
const int64_t num_filters = od_limit - od_start;
const int64_t base_filter_rows = transform->filter_shape().rows;
const int64_t base_filter_cols = transform->filter_shape().cols;
const int64_t base_filter_spatial_size =
base_filter_rows * base_filter_cols;
const int64_t residual_row =
std::max(int64_t{0}, args.filter_rows - base_filter_rows);
const int64_t shard_rows = 1 + (residual_row + 2 - 1) / 2;
const int64_t residual_col =
std::max(int64_t{0}, args.filter_cols - base_filter_cols);
const int64_t shard_cols = 1 + (residual_col + 2 - 1) / 2;
const int64_t shard_stride = args.in_depth;
const int64_t out_depth_stride = shard_rows * shard_cols * shard_stride;
const int64_t coord_stride = out_depth_stride * args.out_depth;
const int64_t filter_buf_stride =
num_filters * shard_rows * shard_cols * args.in_depth;
const int64_t tile_stride_rows = transform->output_shape().rows;
const int64_t tile_stride_cols = transform->output_shape().cols;
const int64_t filter_buf_size = base_filter_spatial_size * num_filters *
shard_rows * shard_cols * args.in_depth;
memset(filter_buf, 0, sizeof(T) * filter_buf_size);
for (int64_t od = 0; od < num_filters; ++od) {
const int64_t out_depth_base = od * out_depth_stride;
for (int64_t s_r = 0; s_r < shard_rows; ++s_r) {
const int64_t row_offset = s_r == 0 ? 0 : 1;
for (int64_t s_c = 0; s_c < shard_cols; ++s_c) {
const int64_t col_offset = s_c == 0 ? 0 : 1;
const int64_t f_r_start = s_r * tile_stride_rows;
const int64_t f_c_start = s_c * tile_stride_cols;
const int64_t shard_base = shard_stride * (s_r * shard_cols + s_c);
for (int64_t b_r = row_offset; b_r < base_filter_rows; ++b_r) {
const int64_t f_r = f_r_start + b_r;
if (f_r >= args.filter_rows) continue;
for (int64_t b_c = col_offset; b_c < base_filter_cols; ++b_c) {
const int64_t f_c = f_c_start + b_c;
if (f_c >= args.filter_cols) continue;
const int64_t in_index =
args.out_depth *
(args.in_depth * (f_r * args.filter_cols + f_c)) +
(od_start + od);
const int64_t buf_index =
filter_buf_stride * (b_r * base_filter_cols + b_c) +
out_depth_base + shard_base;
CopyFilterDepth<T>()(args, filter_in + in_index,
filter_buf + buf_index);
}
}
}
}
}
ComputeFilterRangeTransform<T>()(args, transform, od_start, num_filters,
shard_rows, shard_cols, filter_buf,
filter_buf_stride, coord_stride,
transform_matrix, out_buffer, filter_out);
}
};
template <typename T>
struct TransformFilters {
void operator()(OpKernelContext* ctx, const Conv2DArgs& args,
const DeepConv2DTransform<T>* transform,
const int64_t filter_shards_row,
const int64_t filter_shards_col, const T* filter_in,
T* filter_out) {
const int64_t in_depth = args.in_depth;
const int64_t out_depth = args.out_depth;
const int64_t tile_rows = transform->input_shape().rows;
const int64_t tile_cols = transform->input_shape().cols;
const int64_t tile_spatial_size = tile_rows * tile_cols;
const int64_t base_filter_rows = transform->filter_shape().rows;
const int64_t base_filter_cols = transform->filter_shape().cols;
const int64_t base_filter_spatial_size =
base_filter_rows * base_filter_cols;
const int64_t filter_shards_total = filter_shards_row * filter_shards_col;
const int64_t cache_size = (256LL << 10) / sizeof(T);
const int64_t filter_transform_matrix_size =
tile_spatial_size * base_filter_spatial_size;
const int64_t filter_total_size =
base_filter_spatial_size * in_depth * filter_shards_total;
const int64_t filter_transform_buffer_size =
base_filter_spatial_size * filter_shards_total * in_depth;
const int64_t filter_out_buf_size =
tile_spatial_size * filter_shards_total * in_depth;
const int64_t per_filter_cost =
filter_total_size + filter_transform_buffer_size + filter_out_buf_size;
const int64_t num_filters_cache =
std::max(int64_t{1},
(cache_size - filter_transform_matrix_size) / per_filter_cost);
const int64_t num_filters_transform =
std::min(out_depth, num_filters_cache);
Tensor filter_transform_matrix;
OP_REQUIRES_OK(
ctx, ctx->allocate_temp(
DataTypeToEnum<T>::value,
TensorShape({tile_spatial_size, base_filter_spatial_size}),
&filter_transform_matrix));
T* transform_matrix = filter_transform_matrix.template flat<T>().data();
transform->GetFilterTransformMatrix(
tile_spatial_size, base_filter_spatial_size, transform_matrix);
auto shard = [&ctx, &args, &transform, &base_filter_rows, &base_filter_cols,
&num_filters_transform, &in_depth, &filter_shards_row,
&filter_shards_col, &tile_spatial_size, &filter_in,
&transform_matrix,
&filter_out](int64_t start, int64_t limit) {
Tensor filter_transform_buffer;
OP_REQUIRES_OK(ctx,
ctx->allocate_temp(
DataTypeToEnum<T>::value,
TensorShape({base_filter_rows, base_filter_cols,
num_filters_transform, filter_shards_row,
filter_shards_col, in_depth}),
&filter_transform_buffer));
T* filter_buf = filter_transform_buffer.template flat<T>().data();
Tensor filter_output_buffer;
OP_REQUIRES_OK(
ctx,
ctx->allocate_temp(
DataTypeToEnum<T>::value,
TensorShape({tile_spatial_size, num_filters_transform,
filter_shards_row, filter_shards_col, in_depth}),
&filter_output_buffer));
T* out_buffer = filter_output_buffer.template flat<T>().data();
const int64_t num_filters = limit - start;
const int64_t od_unroll = num_filters_transform;
const int64_t od_unroll_limit = (num_filters / od_unroll) * od_unroll;
for (int64_t od = start; od < od_unroll_limit; od += od_unroll) {
TransformFilterRange<T>()(args, transform, od, od + od_unroll,
filter_in, transform_matrix, out_buffer,
filter_buf, filter_out);
}
if (od_unroll_limit < limit) {
TransformFilterRange<T>()(args, transform, od_unroll_limit, limit,
filter_in, transform_matrix, out_buffer,
filter_buf, filter_out);
}
};
auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads());
const int64_t shard_cost = args.filter_rows * args.filter_cols * in_depth *
filter_shards_total * tile_spatial_size;
Shard(1, worker_threads.workers, out_depth, shard_cost, shard);
}
};
template <typename T>
class GemmFilterPacker {
public:
typedef Eigen::internal::const_blas_data_mapper<T, int64_t, Eigen::RowMajor>
LhsMapper;
typedef Eigen::internal::gebp_traits<T, T> Traits;
Eigen::internal::gemm_pack_lhs<
T, int64_t, LhsMapper, Traits::mr, Traits::LhsProgress,
typename Traits::LhsPacket4Packing, Eigen::RowMajor>
pack_lhs;
GemmFilterPacker(const int64_t rows, const int64_t depth, const T* lhs_input,
T* lhs_block)
: rows_(rows),
depth_(depth),
lhs_block_(lhs_block),
lhs_mapper_(lhs_input, depth_) {}
void Run() { pack_lhs(lhs_block_, lhs_mapper_, depth_, rows_); }
private:
const int64_t rows_;
const int64_t depth_;
T* lhs_block_;
LhsMapper lhs_mapper_;
};
template <typename T>
struct PackFilters {
void operator()(OpKernelContext* ctx, const Conv2DArgs& args,
const int64_t tile_spatial_size,
const int64_t filter_shards_row,
const int64_t filter_shards_col,
const T* filter_transform_data,
std::vector<Tensor>* packed_filters) {
const int64_t in_depth = args.in_depth;
const int64_t out_depth = args.out_depth;
const int64_t num_filters =
filter_shards_row * filter_shards_col * out_depth;
auto shard = [&ctx, &packed_filters, &filter_transform_data, &in_depth,
&out_depth, &filter_shards_row, &filter_shards_col,
&num_filters](int64_t start, int64_t limit) {
const int64_t filter_coord_stride = num_filters * in_depth;
for (int64_t i = start; i < limit; ++i) {
OP_REQUIRES_OK(
ctx, ctx->allocate_temp(DataTypeToEnum<T>::value,
TensorShape({out_depth, filter_shards_row,
filter_shards_col, in_depth}),
&(*packed_filters)[i]));
T* packed_filter = (*packed_filters)[i].template flat<T>().data();
GemmFilterPacker<T> packer(
num_filters, in_depth,
filter_transform_data + i * filter_coord_stride, packed_filter);
packer.Run();
}
};
auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads());
Shard(worker_threads.num_threads, worker_threads.workers, tile_spatial_size,
num_filters * in_depth, shard);
}
};
template <typename T>
class GemmState {
public:
typedef Eigen::internal::const_blas_data_mapper<T, int64_t, Eigen::ColMajor>
RhsMapper;
typedef Eigen::internal::blas_data_mapper<T, int64_t, Eigen::ColMajor>
OutputMapper;
typedef Eigen::internal::gebp_traits<T, T> Traits;
Eigen::internal::gemm_pack_rhs<T, int64_t, RhsMapper, Traits::nr,
Eigen::ColMajor>
pack_rhs;
Eigen::internal::gebp_kernel<T, T, int64_t, OutputMapper, Traits::mr,
Traits::nr, false, false>
gebp;
GemmState(const int64_t rows, const int64_t cols, const int64_t depth,
const int64_t out_buffer_size, const T* lhs_block,
const T* rhs_input, T* rhs_block, T* out_buffer)
: rows_(rows),
cols_(cols),
depth_(depth),
out_buffer_size_(out_buffer_size),
lhs_block_(lhs_block),
rhs_block_(rhs_block),
out_buffer_(out_buffer),
rhs_mapper_(rhs_input, depth_),
out_mapper_(out_buffer, rows_) {}
void PackRhs() { pack_rhs(rhs_block_, rhs_mapper_, depth_, cols_); }
void Compute() {
memset(out_buffer_, 0, sizeof(T) * out_buffer_size_);
gebp(out_mapper_, lhs_block_, rhs_block_, rows_, depth_, cols_, 1.0);
}
private:
const int64_t rows_;
const int64_t cols_;
const int64_t depth_;
const int64_t out_buffer_size_;
const T* lhs_block_;
T* rhs_block_;
T* out_buffer_;
RhsMapper rhs_mapper_;
OutputMapper out_mapper_;
};
template <typename T>
struct CopyInputTile {
void operator()(const Conv2DArgs& args,
const DeepConv2DTransform<T>* transform,
const int64_t num_tiles, const int64_t in_r_start,
const int64_t in_c_start, const T* input, T* tile_buffer) {
typedef typename Eigen::internal::packet_traits<T>::type Packet;
static const int64_t kPacketSize = (sizeof(Packet) / sizeof(T));
const int64_t tile_rows = transform->input_shape().rows;
const int64_t tile_cols = transform->input_shape().cols;
const int64_t coord_stride = num_tiles * args.in_depth;
const int64_t input_vectorized_size =
(args.in_depth / kPacketSize) * kPacketSize;
const int64_t input_scalar_size = args.in_depth % kPacketSize;
for (int64_t r = 0; r < tile_rows; ++r) {
const int64_t in_r = in_r_start + r;
if (in_r < 0 || in_r >= args.in_rows) continue;
for (int64_t c = 0; c < tile_cols; ++c) {
const int64_t in_c = in_c_start + c;
if (in_c < 0 || in_c >= args.in_cols) continue;
auto* in = input + (in_r * args.in_cols + in_c) * args.in_depth;
auto* tile = tile_buffer + coord_stride * (r * tile_rows + c);
for (int64_t d = 0; d < input_vectorized_size; d += kPacketSize) {
auto v = Eigen::internal::ploadu<Packet>(in + d);
Eigen::internal::pstoreu<T>(tile, v);
tile += kPacketSize;
}
for (int64_t d = 0; d < input_scalar_size; ++d) {
tile[d] = in[input_vectorized_size + d];
}
}
}
}
};
template <typename T>
struct TransformInputTiles {
typedef Eigen::Map<
Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>>
MatrixMap;
typedef Eigen::Map<
const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>>
ConstMatrixMap;
void operator()(const Conv2DArgs& args,
const DeepConv2DTransform<T>* transform,
const int64_t num_tiles, const int64_t in_r_start,
const int64_t in_c_start, const T* input,
const T* transform_matrix, T* tile_buffer,
T* tile_transform) {
const int64_t tile_rows = transform->input_shape().rows;
const int64_t tile_cols = transform->input_shape().cols;
const int64_t tile_spatial_size = tile_rows * tile_cols;
const int64_t tile_stride_cols = transform->output_shape().cols;
const int64_t coord_stride = num_tiles * args.in_depth;
const int64_t num_tiles_stride = args.in_depth;
memset(tile_buffer, 0, sizeof(T) * tile_spatial_size * coord_stride);
const int64_t in_r = in_r_start;
for (int64_t t = 0; t < num_tiles; ++t) {
const int64_t num_tiles_base = t * num_tiles_stride;
const int64_t in_c = in_c_start + t * tile_stride_cols;
CopyInputTile<T>()(args, transform, num_tiles, in_r, in_c, input, | #include "tensorflow/core/kernels/winograd_transform.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
static void ComputeKroneckerProduct(const int rows, const int cols,
const float* matrix, float* matrix_out) {
for (int i = 0; i < rows; ++i) {
for (int j = 0; j < cols; ++j) {
const float v = matrix[i * cols + j];
const int output_index_base = cols * (i * rows * cols + j);
for (int k = 0; k < rows; ++k) {
for (int l = 0; l < cols; ++l) {
const int input_index = k * cols + l;
const int output_index = k * cols * cols + l;
matrix_out[output_index_base + output_index] =
matrix[input_index] * v;
}
}
}
}
}
TEST(DeepConv2DTransformTest, Basic) {
const int rows = 2;
const int cols = 2;
float transform_matrix[] = {1, 2, 3, 4};
const int kron_rows = rows * rows;
const int kron_cols = cols * cols;
float transform_matrix_kron[kron_rows * kron_cols];
ComputeKroneckerProduct(rows, cols, &transform_matrix[0],
&transform_matrix_kron[0]);
float transform_matrix_test[] = {1, 2, 2, 4, 3, 4, 6, 8,
3, 6, 4, 8, 9, 12, 12, 16};
for (int i = 0; i < kron_rows * kron_cols; ++i) {
EXPECT_FLOAT_EQ(transform_matrix_kron[i], transform_matrix_test[i]);
}
}
TEST(DeepConv2DTransformTest, WingradFilterTransformMatrix) {
const int rows = 4;
const int cols = 3;
float transform_matrix[] = {1, 0, 0, 0.5, 0.5, 0.5, 0.5, -0.5, 0.5, 0, 0, 1};
const int kron_rows = rows * rows;
const int kron_cols = cols * cols;
float transform_matrix_kron[kron_rows * kron_cols];
ComputeKroneckerProduct(rows, cols, &transform_matrix[0],
&transform_matrix_kron[0]);
float transform_matrix_test[kron_rows * kron_cols];
WinogradTransform<float> t;
t.GetFilterTransformMatrix(kron_rows, kron_cols, &transform_matrix_test[0]);
for (int i = 0; i < kron_rows * kron_cols; ++i) {
EXPECT_FLOAT_EQ(transform_matrix_kron[i], transform_matrix_test[i]);
}
}
TEST(DeepConv2DTransformTest, WingradInputTransformMatrix) {
const int rows = 4;
const int cols = 4;
float transform_matrix[] = {1, 0, -1, 0, 0, 1, 1, 0,
0, -1, 1, 0, 0, 1, 0, -1};
const int kron_rows = rows * rows;
const int kron_cols = cols * cols;
float transform_matrix_kron[kron_rows * kron_cols];
ComputeKroneckerProduct(rows, cols, &transform_matrix[0],
&transform_matrix_kron[0]);
float transform_matrix_test[kron_rows * kron_cols];
WinogradTransform<float> t;
t.GetInputTransformMatrix(kron_rows, kron_cols, &transform_matrix_test[0]);
for (int i = 0; i < kron_rows * kron_cols; ++i) {
EXPECT_FLOAT_EQ(transform_matrix_kron[i], transform_matrix_test[i]);
}
}
TEST(DeepConv2DTransformTest, WingradOutputTransformMatrix) {
const int rows = 2;
const int cols = 4;
float transform_matrix[] = {1, 1, 1, 0, 0, 1, -1, -1};
const int kron_rows = rows * rows;
const int kron_cols = cols * cols;
float transform_matrix_kron[kron_rows * kron_cols];
ComputeKroneckerProduct(rows, cols, &transform_matrix[0],
&transform_matrix_kron[0]);
float transform_matrix_test[kron_rows * kron_cols];
WinogradTransform<float> t;
t.GetOutputTransformMatrix(kron_rows, kron_cols, &transform_matrix_test[0]);
for (int i = 0; i < kron_rows * kron_cols; ++i) {
EXPECT_FLOAT_EQ(transform_matrix_kron[i], transform_matrix_test[i]);
}
}
}
} |
1,489 | cpp | tensorflow/tensorflow | batch_kernels | tensorflow/core/kernels/batch_kernels.cc | tensorflow/core/kernels/batch_kernels_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_BATCH_KERNELS_H_
#define TENSORFLOW_CORE_KERNELS_BATCH_KERNELS_H_
#include <cstdint>
#include <string>
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/types.h"
namespace tensorflow {
ABSL_CONST_INIT extern const int64_t kMinInflightBatches;
ABSL_CONST_INIT extern const int64_t kInitialInflightBatches;
ABSL_CONST_INIT extern const int64_t kBatchesToAverageOver;
ABSL_CONST_INIT extern const int64_t kMaxInflightBatches;
namespace test_util {
class BatchFunctionKernelTestAccess;
}
void RecordBatchSplitUsage(
std::optional<bool> maybe_enable_large_batch_splitting,
absl::string_view model_name);
void RecordBatchParamNumBatchThreads(int64_t num_batch_threads,
absl::string_view model_name);
absl::string_view GetModelName(OpKernelContext* ctx);
class BatchFunctionKernel : public AsyncOpKernel {
public:
explicit BatchFunctionKernel(OpKernelConstruction* c);
bool IsExpensive() override;
void ComputeAsync(OpKernelContext* c, DoneCallback done) final;
private:
friend class test_util::BatchFunctionKernelTestAccess;
Status ValidateAllowedBatchSizes() const;
Status GetOrCreateFunctionHandle(OpKernelContext* c,
FunctionLibraryRuntime::Handle* handle);
Status InstantiateFunction(OpKernelContext* c,
FunctionLibraryRuntime::Handle* handle) const;
void SetAdaptiveBatchSchedulerOptions(OpKernelConstruction* c,
int32_t num_batch_threads);
string container_;
string shared_name_;
string batcher_queue_;
int32 num_batch_threads_;
int32 max_batch_size_;
int32 batch_timeout_micros_;
int32 max_enqueued_batches_;
std::vector<int32> allowed_batch_sizes_;
int32 low_priority_max_batch_size_;
int32 low_priority_batch_timeout_micros_;
int32 low_priority_max_enqueued_batches_;
std::vector<int32> low_priority_allowed_batch_sizes_;
std::string mixed_priority_policy_;
std::string batch_padding_policy_;
NameAttrList func_;
absl::optional<FunctionLibraryRuntime::Handle> fhandle_ TF_GUARDED_BY(mu_);
bool enable_large_batch_splitting_ = false;
bool has_attribute_enable_large_batch_splitting_ = false;
bool enable_adaptive_batch_threads_ = false;
mutex mu_;
struct AdaptiveBatchSchedulerOptions {
int32 min_in_flight_batches_limit = kMinInflightBatches;
int32 initial_in_flight_batches_limit = kInitialInflightBatches;
int32 max_in_flight_batches_limit = kMaxInflightBatches;
int32 batches_to_average_over = kBatchesToAverageOver;
int64 full_batch_scheduling_boost_micros = -1;
};
absl::optional<AdaptiveBatchSchedulerOptions>
adaptive_batch_scheduler_options_ = absl::nullopt;
};
}
#endif
#include "tensorflow/core/kernels/batch_kernels.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/batching_util/adaptive_shared_batch_scheduler.h"
#include "tensorflow/core/kernels/batching_util/batch_resource_base.h"
#include "tensorflow/core/kernels/batching_util/batch_scheduler.h"
#include "tensorflow/core/kernels/batching_util/bounded_executor.h"
#include "tensorflow/core/kernels/batching_util/concat_split_util.h"
#include "tensorflow/core/kernels/batching_util/periodic_function.h"
#include "tensorflow/core/kernels/batching_util/warmup.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/monitoring/gauge.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/numbers.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/threadpool.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
constexpr char kEnableAdaptiveSchedulerAttr[] = "_enable_adaptive_scheduler";
constexpr char kMinInflightBatchesAttr[] = "_min_inflight_batches";
constexpr char kInitialInflightBatchesAttr[] = "_initial_inflight_batches";
constexpr char kMaxInflightBatchesAttr[] = "_max_inflight_batches";
constexpr char kBatchesToAverageOverAttr[] = "_batches_to_average_over";
constexpr char kFullBatchSchedulingBoostMicros[] =
"_full_batch_scheduling_boost_micros";
constexpr int64_t kBatchThreadPoolSize = 128;
}
const int64_t kMinInflightBatches = 1;
const int64_t kInitialInflightBatches = 2;
const int64_t kBatchesToAverageOver = 10;
const int64_t kMaxInflightBatches = 64;
void RecordBatchSplitUsage(
std::optional<bool> maybe_enable_large_batch_splitting,
absl::string_view model_name) {
static auto* cell = monitoring::Gauge<std::string, 1>::New(
"/tensorflow/serving/batching/enable_large_batch_splitting",
"Tracks the usage of attribute `enable_large_batch_splitting` for "
"BatchFunction kernel in a saved model.",
"model_name");
if (maybe_enable_large_batch_splitting.has_value()) {
if (maybe_enable_large_batch_splitting.value()) {
cell->GetCell(std::string(model_name))->Set("true");
} else {
cell->GetCell(std::string(model_name))->Set("false");
}
} else {
cell->GetCell(std::string(model_name))->Set("unset");
}
}
void RecordBatchParamNumBatchThreads(int64_t num_batch_threads,
absl::string_view model_name) {
static auto* cell = monitoring::Gauge<int64_t, 1>::New(
"/tensorflow/serving/batching/num_batch_threads",
"Tracks the number of batch threads of a model.", "model_name");
cell->GetCell(std::string(model_name))->Set(num_batch_threads);
}
absl::string_view GetModelName(OpKernelContext* ctx) {
if (ctx->session_metadata() == nullptr ||
ctx->session_metadata()->name().empty()) {
return "model_name_unset";
}
return ctx->session_metadata()->name();
}
using ::tensorflow::concat_split_util::Concat;
using ::tensorflow::concat_split_util::Split;
int32 NumBatchThreadsFromEnvironmentWithDefault(int default_num_batch_threads) {
int32_t num;
const char* val = std::getenv("TF_NUM_BATCH_THREADS");
return (val && strings::safe_strto32(val, &num)) ? num
: default_num_batch_threads;
}
static thread::ThreadPool* GetOrCreateBatchThreadsPool() {
static thread::ThreadPool* shared_thread_pool = [&]() -> thread::ThreadPool* {
serving::BoundedExecutor::Options options;
options.num_threads =
NumBatchThreadsFromEnvironmentWithDefault(kBatchThreadPoolSize);
options.thread_name = std::string("adaptive_batch_threads");
auto status_or_executor = serving::BoundedExecutor::Create(options);
if (!status_or_executor.ok()) {
LOG(WARNING) << "Failed to create a batch threads pool with error "
<< status_or_executor.status();
return nullptr;
}
static serving::BoundedExecutor* executor =
status_or_executor.value().release();
return new thread::ThreadPool(executor);
}();
return shared_thread_pool;
}
class BatchResource : public serving::BatchResourceBase {
public:
struct BatchTask : serving::BatchResourceBase::BatchTask {
FunctionLibraryRuntime::Handle fhandle;
explicit BatchTask(FunctionLibraryRuntime::Handle fhandle)
: fhandle(fhandle) {}
protected:
std::unique_ptr<serving::BatchResourceBase::BatchTask> CreateDerivedTask()
override {
return std::make_unique<BatchTask>(fhandle);
}
};
static Status Create(bool has_process_batch_function,
int32_t num_batch_threads,
int32_t max_execution_batch_size,
int32_t batch_timeout_micros,
int32_t max_enqueued_batches,
const std::vector<int32>& allowed_batch_sizes,
bool enable_large_batch_splitting,
std::unique_ptr<BatchResource>* resource) {
return Create(has_process_batch_function, num_batch_threads,
max_execution_batch_size, batch_timeout_micros,
max_enqueued_batches, allowed_batch_sizes,
0,
0,
0,
{},
serving::MixedPriorityBatchingPolicy::
kLowPriorityPaddingWithMaxBatchSize,
enable_large_batch_splitting, resource);
}
static Status Create(
bool has_process_batch_function, int32_t num_batch_threads,
int32_t max_execution_batch_size, int32_t batch_timeout_micros,
int32_t max_enqueued_batches,
const std::vector<int32>& allowed_batch_sizes,
int32_t low_priority_max_batch_size,
int32_t low_priority_batch_timeout_micros,
int32_t low_priority_max_enqueued_batches,
const std::vector<int32>& low_priority_allowed_batch_sizes,
serving::MixedPriorityBatchingPolicy mixed_priority_batching_policy,
bool enable_large_batch_splitting,
std::unique_ptr<BatchResource>* resource) {
BatcherT::Options batcher_options;
batcher_options.num_batch_threads = num_batch_threads;
std::shared_ptr<BatcherT> batcher;
TF_RETURN_IF_ERROR(BatcherT::Create(batcher_options, &batcher));
resource->reset(new BatchResource(
has_process_batch_function, std::move(batcher),
GetBatcherQueueOptions(
num_batch_threads, max_execution_batch_size, batch_timeout_micros,
max_enqueued_batches, allowed_batch_sizes,
enable_large_batch_splitting,
false, low_priority_max_batch_size,
low_priority_batch_timeout_micros,
low_priority_max_enqueued_batches, low_priority_allowed_batch_sizes,
mixed_priority_batching_policy),
allowed_batch_sizes));
return absl::OkStatus();
}
static Status Create(
bool has_process_batch_function,
AdaptiveBatcherT::Options adaptive_shared_batch_scheduler_options,
int32_t max_batch_size, int32_t batch_timeout_micros,
int32_t max_enqueued_batches,
const std::vector<int32>& allowed_batch_sizes,
std::unique_ptr<BatchResource>* resource) {
std::shared_ptr<AdaptiveBatcherT> batcher;
TF_RETURN_IF_ERROR(AdaptiveBatcherT::Create(
adaptive_shared_batch_scheduler_options, &batcher));
resource->reset(new BatchResource(
has_process_batch_function, std::move(batcher),
GetAdaptiveBatcherQueueOptions(
max_batch_size, batch_timeout_micros, max_enqueued_batches,
true, allowed_batch_sizes,
false),
allowed_batch_sizes));
return absl::OkStatus();
}
string DebugString() const final { return "BatchResource"; }
private:
BatchResource(bool has_process_batch_function,
std::shared_ptr<BatcherT> batcher,
const BatcherT::QueueOptions& batcher_queue_options,
std::vector<int32> allowed_batch_sizes)
: BatchResourceBase(has_process_batch_function, std::move(batcher),
batcher_queue_options,
std::move(allowed_batch_sizes)) {}
BatchResource(bool has_process_batch_function,
std::shared_ptr<AdaptiveBatcherT> batcher,
const AdaptiveBatcherT::QueueOptions& batcher_queue_options,
std::vector<int32> allowed_batch_sizes)
: BatchResourceBase(has_process_batch_function, std::move(batcher),
batcher_queue_options,
std::move(allowed_batch_sizes)) {}
void ProcessFuncBatchImpl(
const serving::BatchResourceBase::BatchTask& last_task,
absl::Span<const Tensor> inputs, std::vector<Tensor>* combined_outputs,
std::function<void(const Status&)> done) const override {
auto* last_task_context = last_task.context;
FunctionLibraryRuntime::Options opts;
opts.step_container = last_task_context->step_container();
opts.cancellation_manager = last_task_context->cancellation_manager();
opts.collective_executor = last_task_context->collective_executor();
opts.stats_collector = last_task_context->stats_collector();
opts.runner = last_task_context->runner();
opts.run_all_kernels_inline = last_task_context->run_all_kernels_inline();
Notification done_notif;
auto* flib = last_task_context->function_library();
FunctionLibraryRuntime::Handle fhandle =
down_cast<const BatchTask&>(last_task).fhandle;
flib->Run(opts, fhandle, inputs, combined_outputs,
[&](const Status& run_status) {
done(run_status);
done_notif.Notify();
});
done_notif.WaitForNotification();
}
};
BatchFunctionKernel::BatchFunctionKernel(OpKernelConstruction* c)
: AsyncOpKernel(c) {
OP_REQUIRES_OK(c, c->GetAttr("container", &container_));
OP_REQUIRES_OK(c, c->GetAttr("shared_name", &shared_name_));
OP_REQUIRES_OK(c, c->GetAttr("batching_queue", &batcher_queue_));
OP_REQUIRES_OK(c, c->GetAttr("num_batch_threads", &num_batch_threads_));
OP_REQUIRES_OK(c, c->GetAttr("max_batch_size", &max_batch_size_));
OP_REQUIRES_OK(c, c->GetAttr("batch_timeout_micros", &batch_timeout_micros_));
OP_REQUIRES_OK(c, c->GetAttr("max_enqueued_batches", &max_enqueued_batches_));
OP_REQUIRES_OK(c, c->GetAttr("allowed_batch_sizes", &allowed_batch_sizes_));
OP_REQUIRES_OK(c, c->GetAttr("low_priority_max_batch_size",
&low_priority_max_batch_size_));
OP_REQUIRES_OK(c, c->GetAttr("low_priority_batch_timeout_micros",
&low_priority_batch_timeout_micros_));
OP_REQUIRES_OK(c, c->GetAttr("low_priority_allowed_batch_sizes",
&low_priority_allowed_batch_sizes_));
OP_REQUIRES_OK(c, c->GetAttr("low_priority_max_enqueued_batches",
&low_priority_max_enqueued_batches_));
OP_REQUIRES_OK(c,
c->GetAttr("mixed_priority_policy", &mixed_priority_policy_));
OP_REQUIRES_OK(c, c->GetAttr("batch_padding_policy", &batch_padding_policy_));
OP_REQUIRES_OK(c, c->GetAttr("f", &func_));
if (c->HasAttr("enable_large_batch_splitting")) {
OP_REQUIRES_OK(c, c->GetAttr("enable_large_batch_splitting",
&enable_large_batch_splitting_));
has_attribute_enable_large_batch_splitting_ = true;
}
SetAdaptiveBatchSchedulerOptions(c, num_batch_threads_);
if (!c->status().ok()) {
return;
}
if (enable_adaptive_batch_threads_) {
batcher_queue_ = name() + "/" + shared_name_ + batcher_queue_;
}
if (shared_name_.empty()) {
shared_name_ = name();
}
OP_REQUIRES_OK(c, ValidateAllowedBatchSizes());
}
bool BatchFunctionKernel::IsExpensive() { return false; }
void BatchFunctionKernel::ComputeAsync(OpKernelContext* c, DoneCallback done) {
RecordBatchSplitUsage(has_attribute_enable_large_batch_splitting_
? std::make_optional(enable_large_batch_splitting_)
: std::nullopt,
GetModelName(c));
RecordBatchParamNumBatchThreads(num_batch_threads_, GetModelName(c));
std::function<Status(BatchResource**)> creator;
FunctionLibraryRuntime::Handle handle;
OP_REQUIRES_OK_ASYNC(c, GetOrCreateFunctionHandle(c, &handle), done);
if (adaptive_batch_scheduler_options_ != std::nullopt) {
creator = [this,
session_metadata = c->session_metadata()](BatchResource** r) {
serving::AdaptiveSharedBatchScheduler<
serving::BatchResourceBase::BatchTask>::Options
adaptive_shared_batch_scheduler_options;
adaptive_shared_batch_scheduler_options.thread_pool_name =
"adaptive_batch_threads";
adaptive_shared_batch_scheduler_options.thread_pool =
GetOrCreateBatchThreadsPool();
adaptive_shared_batch_scheduler_options.num_batch_threads = std::min(
NumBatchThreadsFromEnvironmentWithDefault(kBatchThreadPoolSize),
adaptive_batch_scheduler_options_->max_in_flight_batches_limit);
adaptive_shared_batch_scheduler_options.min_in_flight_batches_limit =
std::min(
NumBatchThreadsFromEnvironmentWithDefault(kBatchThreadPoolSize),
adaptive_batch_scheduler_options_->min_in_flight_batches_limit);
adaptive_shared_batch_scheduler_options
.initial_in_flight_batches_limit = std::min(
NumBatchThreadsFromEnvironmentWithDefault(kBatchThreadPoolSize),
adaptive_batch_scheduler_options_->initial_in_flight_batches_limit);
adaptive_shared_batch_scheduler_options.batches_to_average_over =
adaptive_batch_scheduler_options_->batches_to_average_over;
if (adaptive_batch_scheduler_options_
->full_batch_scheduling_boost_micros != -1) {
adaptive_shared_batch_scheduler_options
.full_batch_scheduling_boost_micros =
adaptive_batch_scheduler_options_
->full_batch_scheduling_boost_micros;
adaptive_shared_batch_scheduler_options.fifo_scheduling = false;
} else {
adaptive_shared_batch_scheduler_options.fifo_scheduling = true;
}
std::unique_ptr<BatchResource> new_resource;
TF_RETURN_IF_ERROR(BatchResource::Create(
true,
adaptive_shared_batch_scheduler_options, max_batch_size_,
batch_timeout_micros_, max_enqueued_batches_, allowed_batch_sizes_,
&new_resource));
if (session_metadata) {
new_resource->set_session_metadata(*session_metadata);
}
*r = new_resource.release();
return absl::OkStatus();
};
} else {
creator = [this,
session_metadata = c->session_metadata()](BatchResource** r) {
TF_ASSIGN_OR_RETURN(
serving::MixedPriorityBatchingPolicy mixed_priority_batching_policy,
serving::GetMixedPriorityBatchingPolicy(mixed_priority_policy_));
std::unique_ptr<BatchResource> new_resource;
TF_RETURN_IF_ERROR(BatchResource::Create(
true, num_batch_threads_,
max_batch_size_, batch_timeout_micros_, max_enqueued_batches_,
allowed_batch_sizes_, low_priority_max_batch_size_,
low_priority_batch_timeout_micros_,
low_priority_max_enqueued_batches_, low_priority_allowed_batch_sizes_,
mixed_priority_batching_policy, enable_large_batch_splitting_,
&new_resource));
if (session_metadata) {
new_resource->set_session_metadata(*session_metadata);
}
*r = new_resource.release();
return absl::OkStatus();
};
}
BatchResource* br;
OP_REQUIRES_OK_ASYNC(c,
c->resource_manager()->LookupOrCreate(
container_, shared_name_, &br, creator),
done);
const uint64_t guid = random::New64();
auto create_batch_task_fn =
[handle]() -> absl::StatusOr<
std::unique_ptr<serving::BatchResourceBase::BatchTask>> {
return {std::make_unique<BatchResource::BatchTask>(handle)};
};
Status status;
if (serving::ShouldWarmupAllBatchSizes(c)) {
status = br->RegisterWarmupInputs(guid, c, batcher_queue_,
create_batch_task_fn, done);
} else {
status =
br->RegisterInput(guid, c, batcher_queue_, create_batch_task_fn, done);
}
br->Unref();
OP_REQUIRES_OK_ASYNC(c, status, done);
}
Status BatchFunctionKernel::InstantiateFunction(
OpKernelContext* c, FunctionLibraryRuntime::Handle* handle) const {
FunctionLibraryRuntime* flib = c->function_library();
if (!flib) {
return errors::Internal("No function library");
}
FunctionLibraryRuntime::InstantiateOptions opts;
opts.target = flib->device() == nullptr ? "" : flib->device()->name();
opts.is_multi_device_function = true;
const ConfigProto* config = flib->config_proto();
if (config) {
opts.config_proto = *config;
}
Device* cpu_device;
TF_RETURN_IF_ERROR(flib->device_mgr()->LookupDevice("CPU:0", &cpu_device));
const FunctionDef* fdef =
flib->GetFunctionLibraryDefinition()->Find(func_.name());
if (!fdef) {
return errors::NotFound("Failed to find definition for function \"",
func_.name(), "\"");
}
OpInputList in_tensors;
TF_RETURN_IF_ERROR(c->input_list("in_tensors", &in_tensors));
for (int i = 0; i < in_tensors.size(); i++) {
if (in_tensors[i].dtype() == DT_RESOURCE) {
return errors::InvalidArgument(
"BatchFunction cannot take resource inputs but input ", i,
" is a resource.");
} else {
opts.input_devices.push_back(cpu_device->name());
}
}
OpInputList captured_tensors;
TF_RETURN_IF_ERROR(c->input_list("captured_tensors", &captured_tensors));
for (const Tensor& t : captured_tensors) {
if (t.dtype() == DT_RESOURCE) {
const ResourceHandle& rhandle = t.flat<ResourceHandle>()(0);
opts.input_devices.push_back(rhandle.device());
} else {
opts.input_devices.push_back(cpu_device->name());
}
}
const OpDef& signature = fdef->signature();
for (int i = 0; i < signature.output_arg_size(); i++) {
opts.output_devices.push_back(cpu_device->name());
}
if (opts.input_devices.size() != signature.input_arg_size()) {
return errors::InvalidArgument(
"Function takes ", signature.input_arg_size(), " argument(s) but ",
opts.input_devices.size(), " argument(s) were passed");
}
return flib->Instantiate(func_.name(), AttrSlice(&func_.attr()), opts,
handle);
}
Status BatchFunctionKernel::GetOrCreateFunctionHandle(
OpKernelContext* c, FunctionLibraryRuntime::Handle* handle) {
mutex_lock ml(mu_);
if (!fhandle_) {
TF_RETURN_IF_ERROR(InstantiateFunction(c, handle));
fhandle_ = *handle;
} else {
*handle = fhandle_.value();
}
return absl::OkStatus();
}
Status BatchFunctionKernel::ValidateAllowedBatchSizes() const {
if (allowed_batch_sizes_.empty()) {
return absl::OkStatus();
}
int32_t last_size = 0;
for (size_t i = 0; i < allowed_batch_sizes_.size(); ++i) {
const int32_t size = allowed_batch_sizes_.at(i);
if (i > 0 && size <= last_size) {
return errors::InvalidArgument(
"allowed_batch_sizes entries must be monotonically increasing");
}
if ((!enable_large_batch_splitting_) &&
(i == allowed_batch_sizes_.size() - 1) && (size != max_batch_size_)) {
return errors::InvalidArgument(
"final entry in allowed_batch_sizes must equal max_batch_size when "
"enable_large_batch_splitting is False");
}
last_size = size;
}
return absl::OkStatus();
}
void BatchFunctionKernel::SetAdaptiveBatchSchedulerOptions(
OpKernelConstruction* c, int32_t num_batch_threads) {
if (c->HasAttr(kEnableAdaptiveSchedulerAttr)) {
OP_REQUIRES_OK(c, c->GetAttr(kEnableAdaptiveSchedulerAttr,
&enable_adaptive_batch_threads_));
}
if (num_batch_threads <= 0) {
enable_adaptive_batch_threads_ = true;
}
if (!enable_adaptive_batch_threads_) {
return;
}
AdaptiveBatchSchedulerOptions options;
if (c->HasAttr(kBatchesToAverageOverAttr)) {
OP_REQUIRES_OK(c, c->GetAttr(kBatchesToAverageOverAttr,
&options.batches_to_average_over));
}
if (c->HasAttr(kMinInflightBatchesAttr)) {
OP_REQUIRES_OK(c, c->GetAttr(kMinInflightBatchesAttr,
&options.min_in_flight_batches_limit));
}
if (c->HasAttr(kInitialInflightBatchesAttr)) {
OP_REQUIRES_OK(c, c->GetAttr(kInitialInflightBatchesAttr,
&options.initial_in_flight_batches_limit));
}
if (c->HasAttr(kMaxInflightBatchesAttr)) {
OP_REQUIRES_OK(c, c->GetAttr(kMaxInflightBatchesAttr,
&options.max_in_flight_batches_limit));
}
if (c->HasAttr(kFullBatchSchedulingBoostMicros)) {
OP_REQUIRES_OK(c, c->GetAttr(kFullBatchSchedulingBoostMicros,
&options.full_batch_scheduling_boost_micros));
}
thread::ThreadPool* thread_pool = GetOrCreateBatchThreadsPool();
OP_REQUIRES( | #include "tensorflow/core/kernels/batch_kernels.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/common_runtime/rendezvous_mgr.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/batch_kernel_test_util.h"
#include "tensorflow/core/kernels/batching_util/batch_scheduler.h"
#include "tensorflow/core/kernels/batching_util/warmup.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/version.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/blocking_counter.h"
#include "tsl/platform/criticality.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/refcount.h"
#include "tsl/platform/status.h"
namespace tensorflow {
namespace {
using PerModelData = serving::WarmupStateRegistry::PerModelData;
class BatchFunctionKernelTest : public test_util::BatchFunctionKernelTestBase {
};
TEST_P(BatchFunctionKernelTest, EnableAdaptiveScheduler) {
const bool adaptive_scheduler_enabled = GetParam();
TF_EXPECT_OK(Init(adaptive_scheduler_enabled));
BatchFunctionKernel *batch_kernel =
dynamic_cast<BatchFunctionKernel *>(op_kernel());
EXPECT_EQ(adaptive_scheduler_enabled,
test_util::BatchFunctionKernelTestAccess(batch_kernel)
.enable_adaptive_batch_threads());
}
INSTANTIATE_TEST_SUITE_P(Params, BatchFunctionKernelTest, ::testing::Bool());
class SharedBatchFunctionTestState : public OpsTestBase {
public:
void CreateFunctionLibraryRuntime() {
pflr_ = std::make_unique<ProcessFunctionLibraryRuntime>(
device_mgr_.get(), Env::Default(), nullptr,
TF_GRAPH_DEF_VERSION, flib_def_.get(), OptimizerOptions(),
nullptr, nullptr,
nullptr,
Rendezvous::Factory{[](const int64_t, const DeviceMgr *device_mgr,
tsl::core::RefCountPtr<Rendezvous> *r) {
*r = tsl::core::RefCountPtr<Rendezvous>(
new IntraProcessRendezvous(device_mgr));
return absl::OkStatus();
}});
}
};
class BatchFunctionTestState : public SharedBatchFunctionTestState {
public:
absl::Status Init(Device *device, bool enable_low_priority_queue,
absl::string_view mixed_priority_policy,
int64_t expected_batch_size) {
device_ = device;
NameAttrList f;
f.set_name("ShapeEnforcingFunction");
FunctionDef func = FunctionDefHelper::Create(
f.name(),
{"x:int64"},
{"o:int64"},
{},
{{{"o"},
"EnsureShape",
{"x"},
{{"T", DataType::DT_INT64},
{"shape", TensorShape({expected_batch_size, 2})}}}},
{{"o", "o:output"}});
TF_RETURN_IF_ERROR(flib_def_->AddFunctionDef(func));
SharedBatchFunctionTestState::CreateFunctionLibraryRuntime();
std::vector<NodeDefBuilder::NodeOut> inputs(
{NodeDefBuilder::NodeOut({"n1", 0, DataType::DT_INT64})});
TF_RETURN_IF_ERROR(NodeDefBuilder("BatchTPUInput", "BatchFunction")
.Attr("max_batch_size", 8)
.Attr("num_batch_threads", 8)
.Attr("allowed_batch_sizes", {4, 8})
.Attr("batch_timeout_micros", 1000000)
.Attr("max_enqueued_batches", 10)
.Attr("enable_large_batch_splitting", true)
.Attr("low_priority_max_batch_size",
enable_low_priority_queue ? 8 : 0)
.Attr("low_priority_batch_timeout_micros",
enable_low_priority_queue ? 2000000 : 0)
.Attr("low_priority_allowed_batch_sizes",
enable_low_priority_queue
? std::vector<int>{4, 8}
: std::vector<int>())
.Attr("low_priority_max_enqueued_batches",
enable_low_priority_queue ? 2 : 0)
.Attr("mixed_priority_policy", mixed_priority_policy)
.Attr("batch_padding_policy", "PAD_UP")
.Attr("Tin", {DataType::DT_INT64})
.Input(inputs)
.Attr("Tcaptured", std::vector<DataType>{})
.Input(std::vector<NodeDefBuilder::NodeOut>{})
.Attr("Tout", std::vector<DataType>{DT_INT64})
.Attr("f", f)
.Finalize(node_def()));
return OpsTestBase::InitOp();
}
void TestBody() override {}
};
class BatchFunctionTest : public ::testing::TestWithParam<bool> {
protected:
void SetUp() override {
cpu_device_ =
DeviceFactory::NewDevice("CPU", {}, "/job:a/replica:0/task:0");
}
std::unique_ptr<Device> cpu_device_;
};
TEST_P(BatchFunctionTest, BatchingWorksWithoutCriticality) {
SessionMetadata session_metadata;
session_metadata.set_name("test_model");
session_metadata.set_version(123);
bool enable_low_priority_queue = GetParam();
{
tsl::BlockingCounter blocking_counter(8);
for (int i = 0; i < 8; ++i) {
Env::Default()->SchedClosure([&]() {
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kCritical);
BatchFunctionTestState test_state;
test_state.set_session_metadata(session_metadata);
TF_ASSERT_OK(test_state.Init(
cpu_device_.get(), enable_low_priority_queue,
serving::kLowPriorityPaddingWithMaxBatchSizeAttrValue,
8));
test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {123, 456});
TF_EXPECT_OK(test_state.RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*test_state.GetOutput(0),
test::AsTensor<int64_t>({123, 456}, TensorShape({1, 2})));
blocking_counter.DecrementCount();
});
}
blocking_counter.Wait();
}
}
TEST_P(BatchFunctionTest, PaddingWorksWithoutCriticality) {
SessionMetadata session_metadata;
session_metadata.set_name("test_model");
session_metadata.set_version(123);
bool enable_low_priority_queue = GetParam();
{
tsl::BlockingCounter blocking_counter(2);
for (int i = 0; i < 2; ++i) {
Env::Default()->SchedClosure([&]() {
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kCritical);
BatchFunctionTestState test_state;
test_state.set_session_metadata(session_metadata);
TF_ASSERT_OK(test_state.Init(
cpu_device_.get(), enable_low_priority_queue,
serving::kLowPriorityPaddingWithMaxBatchSizeAttrValue,
4));
test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {123, 456});
TF_EXPECT_OK(test_state.RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*test_state.GetOutput(0),
test::AsTensor<int64_t>({123, 456}, TensorShape({1, 2})));
blocking_counter.DecrementCount();
});
}
blocking_counter.Wait();
}
}
#if defined(PLATFORM_GOOGLE)
TEST_P(BatchFunctionTest,
LowPriorityTaskPaddingHighPriorityBatchUptoMaxBatchSize) {
SessionMetadata session_metadata;
session_metadata.set_name("test_model");
session_metadata.set_version(123);
bool enable_low_priority_queue = GetParam();
{
tsl::BlockingCounter blocking_counter(8);
for (int i = 0; i < 4; ++i) {
Env::Default()->SchedClosure([&]() {
tsl::criticality::ScopedCriticality scoped_criticality(
tsl::criticality::Criticality::kCriticalPlus);
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kCriticalPlus);
BatchFunctionTestState test_state;
test_state.set_session_metadata(session_metadata);
TF_ASSERT_OK(test_state.Init(
cpu_device_.get(), enable_low_priority_queue,
serving::kLowPriorityPaddingWithMaxBatchSizeAttrValue,
8));
test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {123, 456});
TF_EXPECT_OK(test_state.RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*test_state.GetOutput(0),
test::AsTensor<int64_t>({123, 456}, TensorShape({1, 2})));
blocking_counter.DecrementCount();
});
}
for (int i = 0; i < 4; ++i) {
Env::Default()->SchedClosure([&]() {
tsl::criticality::ScopedCriticality scoped_criticality(
tsl::criticality::Criticality::kSheddable);
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kSheddable);
BatchFunctionTestState test_state;
test_state.set_session_metadata(session_metadata);
TF_ASSERT_OK(test_state.Init(
cpu_device_.get(), enable_low_priority_queue,
serving::kLowPriorityPaddingWithMaxBatchSizeAttrValue,
8));
test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {234, 567});
TF_EXPECT_OK(test_state.RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*test_state.GetOutput(0),
test::AsTensor<int64_t>({234, 567}, TensorShape({1, 2})));
blocking_counter.DecrementCount();
});
}
blocking_counter.Wait();
}
}
TEST_P(BatchFunctionTest,
LowPriorityTaskPaddingHighPriorityBatchWithExtraPadding) {
SessionMetadata session_metadata;
session_metadata.set_name("test_model");
session_metadata.set_version(123);
bool enable_low_priority_queue = GetParam();
{
tsl::BlockingCounter blocking_counter(2);
Env::Default()->SchedClosure([&]() {
tsl::criticality::ScopedCriticality scoped_criticality(
tsl::criticality::Criticality::kCriticalPlus);
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kCriticalPlus);
BatchFunctionTestState test_state;
test_state.set_session_metadata(session_metadata);
TF_ASSERT_OK(
test_state.Init(cpu_device_.get(), enable_low_priority_queue,
serving::kLowPriorityPaddingWithMaxBatchSizeAttrValue,
4));
test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {123, 456});
TF_EXPECT_OK(test_state.RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*test_state.GetOutput(0),
test::AsTensor<int64_t>({123, 456}, TensorShape({1, 2})));
blocking_counter.DecrementCount();
});
Env::Default()->SchedClosure([&]() {
tsl::criticality::ScopedCriticality scoped_criticality(
tsl::criticality::Criticality::kSheddable);
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kSheddable);
BatchFunctionTestState test_state;
test_state.set_session_metadata(session_metadata);
TF_ASSERT_OK(
test_state.Init(cpu_device_.get(), enable_low_priority_queue,
serving::kLowPriorityPaddingWithMaxBatchSizeAttrValue,
4));
test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {234, 567});
TF_EXPECT_OK(test_state.RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*test_state.GetOutput(0),
test::AsTensor<int64_t>({234, 567}, TensorShape({1, 2})));
blocking_counter.DecrementCount();
});
blocking_counter.Wait();
}
}
TEST_P(BatchFunctionTest,
LowPriorityTaskPaddingHighPriorityBatchUptoNextAllowedBatchSize) {
SessionMetadata session_metadata;
session_metadata.set_name("test_model");
session_metadata.set_version(123);
bool enable_low_priority_queue = GetParam();
{
tsl::BlockingCounter blocking_counter(4);
for (int i = 0; i < 2; ++i) {
Env::Default()->SchedClosure([&]() {
tsl::criticality::ScopedCriticality scoped_criticality(
tsl::criticality::Criticality::kCriticalPlus);
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kCriticalPlus);
BatchFunctionTestState test_state;
test_state.set_session_metadata(session_metadata);
TF_ASSERT_OK(test_state.Init(
cpu_device_.get(), enable_low_priority_queue,
serving::kLowPriorityPaddingWithNextAllowedBatchSizeAttrValue,
4));
test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {123, 456});
TF_EXPECT_OK(test_state.RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*test_state.GetOutput(0),
test::AsTensor<int64_t>({123, 456}, TensorShape({1, 2})));
blocking_counter.DecrementCount();
});
}
for (int i = 0; i < 2; ++i) {
Env::Default()->SchedClosure([&]() {
tsl::criticality::ScopedCriticality scoped_criticality(
tsl::criticality::Criticality::kSheddable);
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kSheddable);
BatchFunctionTestState test_state;
test_state.set_session_metadata(session_metadata);
TF_ASSERT_OK(test_state.Init(
cpu_device_.get(), enable_low_priority_queue,
serving::kLowPriorityPaddingWithNextAllowedBatchSizeAttrValue,
4));
test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {234, 567});
TF_EXPECT_OK(test_state.RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*test_state.GetOutput(0),
test::AsTensor<int64_t>({234, 567}, TensorShape({1, 2})));
blocking_counter.DecrementCount();
});
}
blocking_counter.Wait();
}
}
#endif
INSTANTIATE_TEST_SUITE_P(BatchFunctionTest, BatchFunctionTest,
::testing::Bool());
#if defined(PLATFORM_GOOGLE)
TEST_F(BatchFunctionTest, HighPriorityBatchNotPaddedWithLowPriorityTasks) {
SessionMetadata session_metadata;
session_metadata.set_name("test_model");
session_metadata.set_version(123);
{
tsl::BlockingCounter blocking_counter(8);
for (int i = 0; i < 4; ++i) {
Env::Default()->SchedClosure([&]() {
tsl::criticality::ScopedCriticality scoped_criticality(
tsl::criticality::Criticality::kCriticalPlus);
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kCriticalPlus);
BatchFunctionTestState test_state;
test_state.set_session_metadata(session_metadata);
TF_ASSERT_OK(test_state.Init(cpu_device_.get(),
true,
serving::kPriorityIsolationAttrValue,
4));
test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {123, 456});
TF_EXPECT_OK(test_state.RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*test_state.GetOutput(0),
test::AsTensor<int64_t>({123, 456}, TensorShape({1, 2})));
blocking_counter.DecrementCount();
});
}
for (int i = 0; i < 4; ++i) {
Env::Default()->SchedClosure([&]() {
tsl::criticality::ScopedCriticality scoped_criticality(
tsl::criticality::Criticality::kSheddable);
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kSheddable);
BatchFunctionTestState test_state;
test_state.set_session_metadata(session_metadata);
TF_ASSERT_OK(test_state.Init(cpu_device_.get(),
true,
serving::kPriorityIsolationAttrValue,
4));
test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {234, 567});
TF_EXPECT_OK(test_state.RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*test_state.GetOutput(0),
test::AsTensor<int64_t>({234, 567}, TensorShape({1, 2})));
blocking_counter.DecrementCount();
});
}
blocking_counter.Wait();
}
}
TEST_F(BatchFunctionTest, LowPriorityOnlyBatchAtMaxLowPriorityBatchSize) {
SessionMetadata session_metadata;
session_metadata.set_name("test_model");
session_metadata.set_version(123);
{
tsl::BlockingCounter blocking_counter(8);
for (int i = 0; i < 8; ++i) {
Env::Default()->SchedClosure([&]() {
tsl::criticality::ScopedCriticality scoped_criticality(
tsl::criticality::Criticality::kSheddable);
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kSheddable);
BatchFunctionTestState test_state;
test_state.set_session_metadata(session_metadata);
TF_ASSERT_OK(test_state.Init(
cpu_device_.get(),
true,
serving::kLowPriorityPaddingWithMaxBatchSizeAttrValue,
8));
test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {234, 567});
TF_EXPECT_OK(test_state.RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*test_state.GetOutput(0),
test::AsTensor<int64_t>({234, 567}, TensorShape({1, 2})));
blocking_counter.DecrementCount();
});
}
blocking_counter.Wait();
}
}
TEST_F(BatchFunctionTest, LowPriorityBatchPaddedToLowPriorityAllowedBatchSize) {
SessionMetadata session_metadata;
session_metadata.set_name("test_model");
session_metadata.set_version(123);
{
tsl::BlockingCounter blocking_counter(2);
for (int i = 0; i < 2; ++i) {
Env::Default()->SchedClosure([&]() {
tsl::criticality::ScopedCriticality scoped_criticality(
tsl::criticality::Criticality::kSheddable);
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kSheddable);
BatchFunctionTestState test_state;
test_state.set_session_metadata(session_metadata);
TF_ASSERT_OK(test_state.Init(
cpu_device_.get(),
true,
serving::kLowPriorityPaddingWithMaxBatchSizeAttrValue,
4));
test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {234, 567});
TF_EXPECT_OK(test_state.RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*test_state.GetOutput(0),
test::AsTensor<int64_t>({234, 567}, TensorShape({1, 2})));
blocking_counter.DecrementCount();
});
}
blocking_counter.Wait();
}
}
#endif
class BatchFunctionKernelParallelWarmupTestState
: public SharedBatchFunctionTestState {
public:
absl::Status Init(bool enable_splitting) {
static auto *const cpu_device = []() {
auto device =
DeviceFactory::NewDevice("CPU", {}, "/job:a/replica:0/task:0");
return device.release();
}();
device_ = cpu_device;
NameAttrList f;
f.set_name("BatchFunctionKernelParallelWarmupTestStateFunc");
FunctionDef func = FunctionDefHelper::Create(
f.name(),
{"x:int64"},
{"o:int64"},
{},
{{{"o"},
"EnsureShape",
{"x"},
{{"T", DataType::DT_INT64}, {"shape", TensorShape({2})}}}},
{{"o", "o:output"}});
TF_RETURN_IF_ERROR(flib_def_->AddFunctionDef(func));
SharedBatchFunctionTestState::CreateFunctionLibraryRuntime();
std::vector<NodeDefBuilder::NodeOut> inputs(
{NodeDefBuilder::NodeOut({"n1", 0, DataType::DT_INT64})});
TF_RETURN_IF_ERROR(NodeDefBuilder("BatchTPUInput", "BatchFunction")
.Attr("max_batch_size", enable_splitting ? 16 : 8)
.Attr("num_batch_threads", 8)
.Attr("allowed_batch_sizes", {2, 4, 8})
.Attr("batch_timeout_micros", 1000000)
.Attr("max_enqueued_batches", 10)
.Attr("enable_large_batch_splitting", true)
.Attr("low_priority_max_batch_size", 64)
.Attr("low_priority_batch_timeout_micros", 8000)
.Attr("low_priority_allowed_batch_sizes", {32, 64})
.Attr("low_priority_max_enqueued_batches", 1000)
.Attr("batch_padding_policy", "PAD_UP")
.Attr("Tin", {DataType::DT_INT64})
.Input(inputs)
.Attr("Tcaptured", std::vector<DataType>{})
.Input(std::vector<NodeDefBuilder::NodeOut>{})
.Attr("Tout", std::vector<DataType>{DT_INT64})
.Attr("f", f)
.Finalize(node_def()));
return OpsTestBase::InitOp();
}
void TestBody() override {}
};
class BatchFunctionKernelParallelWarmupTest
: public ::testing::TestWithParam<bool> {};
TEST_P(BatchFunctionKernelParallelWarmupTest, ParallelWarmup) {
SessionMetadata session_metadata;
session_metadata.set_name("test_model");
session_metadata.set_version(123);
serving::WarmupStateRegistry::Key key(session_metadata.name(),
session_metadata.version());
int num_requests = 16;
bool enable_splitting = GetParam();
{
auto per_model_data = std::make_unique<PerModelData>();
auto handle = serving::GetGlobalWarmupStateRegistry().Register(
key, std::move(per_model_data));
tsl::BlockingCounter blocking_counter(num_requests);
for (int i = 0; i < num_requests; ++i) {
Env::Default()->SchedClosure([&]() {
BatchFunctionKernelParallelWarmupTestState test;
test.set_session_metadata(session_metadata);
TF_CHECK_OK(test.Init(enable_splitting));
test.AddInputFromList<int64_t>(TensorShape({2}), {123, 456});
TF_CHECK_OK(test.RunOpKernel());
test::ExpectTensorEqual<int64_t>(*test.GetOutput(0),
test::AsTensor<int64_t>({123, 456}));
blocking_counter.DecrementCount();
});
}
blocking_counter.Wait();
}
EXPECT_FALSE(serving::GetGlobalWarmupStateRegistry().Lookup(key));
{
tsl::BlockingCounter blocking_counter(num_requests);
for (int i = 0; i < num_requests; ++i) {
Env::Default()->SchedClosure([&]() {
BatchFunctionKernelParallelWarmupTestState test;
test.set_session_metadata(session_metadata);
TF_CHECK_OK(test.Init(enable_splitting));
test.AddInputFromList<int64_t>(TensorShape({2}), {123, 456});
EXPECT_FALSE(test.RunOpKernel().ok());
blocking_counter.DecrementCount();
});
}
blocking_counter.Wait();
}
}
INSTANTIATE_TEST_SUITE_P(BatchFunctionKernelParallelWarmupTestSuite,
BatchFunctionKernelParallelWarmupTest,
::testing::Bool());
}
} |
1,490 | cpp | tensorflow/tensorflow | parameterized_truncated_normal_op | tensorflow/core/kernels/parameterized_truncated_normal_op.cc | tensorflow/core/kernels/parameterized_truncated_normal_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_PARAMETERIZED_TRUNCATED_NORMAL_OP_H_
#define TENSORFLOW_CORE_KERNELS_PARAMETERIZED_TRUNCATED_NORMAL_OP_H_
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/lib/random/random_distributions.h"
#include "tensorflow/core/util/bcast.h"
namespace tensorflow {
class OpKernelContext;
namespace functor {
template <typename Device, typename T>
struct TruncatedNormalFunctor {
void operator()(OpKernelContext* ctx, const Device& d, int64_t num_batches,
int64_t samples_per_batch, int64_t num_elements,
typename TTypes<T>::ConstFlat means,
typename TTypes<T>::ConstFlat stddevs,
typename TTypes<T>::ConstFlat minvals,
typename TTypes<T>::ConstFlat maxvals,
const random::PhiloxRandom& gen,
typename TTypes<T>::Flat output);
};
template <typename Device, typename T>
struct TruncatedNormalFunctorV2 {
void operator()(OpKernelContext* ctx, const Device& d, int64_t num_batches,
int64_t samples_per_batch, int64_t num_elements,
const BCastList<4>& bcast,
typename TTypes<T>::ConstFlat means,
typename TTypes<T>::ConstFlat stddevs,
typename TTypes<T>::ConstFlat minvals,
typename TTypes<T>::ConstFlat maxvals,
const random::PhiloxRandom& gen,
typename TTypes<T>::Flat output);
};
}
}
#endif
#define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/parameterized_truncated_normal_op.h"
#include <algorithm>
#include <cmath>
#include <memory>
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/kernels/stateless_random_ops.h"
#include "tensorflow/core/lib/random/random_distributions.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/guarded_philox_random.h"
#include "tensorflow/core/util/work_sharder.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
namespace functor {
using random::PhiloxRandom;
static constexpr int kMaxIterations = 1000;
template <typename T>
struct TruncatedNormalFunctor<CPUDevice, T> {
void operator()(OpKernelContext* ctx, const CPUDevice& d, int64_t num_batches,
int64_t samples_per_batch, int64_t num_elements,
typename TTypes<T>::ConstFlat means,
typename TTypes<T>::ConstFlat stddevs,
typename TTypes<T>::ConstFlat minvals,
typename TTypes<T>::ConstFlat maxvals,
const random::PhiloxRandom& gen,
typename TTypes<T>::Flat output) {
const T kStdDevsInsideBoundsToUseRandnSampler = T(1.3);
auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads());
auto do_work = [samples_per_batch, num_elements, &ctx, &means, &stddevs,
&minvals, &maxvals, &gen, &output,
kStdDevsInsideBoundsToUseRandnSampler](
int64_t start_batch, int64_t limit_batch) {
random::PhiloxRandom gen_copy = gen;
gen_copy.Skip(start_batch * 2 * kMaxIterations * (samples_per_batch + 3) /
4);
using Uniform = random::UniformDistribution<random::PhiloxRandom, T>;
Uniform dist;
using Normal = random::NormalDistribution<random::PhiloxRandom, T>;
Normal normal_dist;
Eigen::array<T, 4> z;
Eigen::array<T, 4> g;
for (int64_t b = start_batch; b < limit_batch; ++b) {
T mean = means((means.dimension(0) == 1) ? 0 : b);
T stddev = stddevs((stddevs.dimension(0) == 1) ? 0 : b);
T minval = minvals((minvals.dimension(0) == 1) ? 0 : b);
T maxval = maxvals((maxvals.dimension(0) == 1) ? 0 : b);
const int64_t limit_sample =
std::min((b + 1) * samples_per_batch, num_elements);
int64_t sample = b * samples_per_batch;
OP_REQUIRES(ctx,
stddev > T(0) && minval < maxval &&
(Eigen::numext::isfinite(minval) ||
Eigen::numext::isfinite(maxval)),
errors::InvalidArgument("Invalid parameters"));
int num_iterations = 0;
if ((Eigen::numext::isinf(minval) && minval < T(0)) || maxval < mean) {
std::swap(minval, maxval);
stddev = -stddev;
}
const T normMin = (minval - mean) / stddev;
const T normMax = (maxval - mean) / stddev;
const T sqrtFactor = Eigen::numext::sqrt((normMin * normMin) + T(4));
const T cutoff =
T(2) *
Eigen::numext::exp(T(0.5) +
(normMin * (normMin - sqrtFactor)) / T(4)) /
(normMin + sqrtFactor);
const T diff = normMax - normMin;
if (((normMin < -kStdDevsInsideBoundsToUseRandnSampler) &&
(normMax >= T(0.))) ||
((normMax > kStdDevsInsideBoundsToUseRandnSampler) &&
(normMin <= T(0.)))) {
while (sample < limit_sample) {
const auto randn_sample = normal_dist(&gen_copy);
const int size = randn_sample.size();
for (int i = 0; i < size; i++) {
if ((randn_sample[i] >= normMin) &&
(randn_sample[i] <= normMax)) {
output(sample) = randn_sample[i] * stddev + mean;
sample++;
if (sample >= limit_sample) {
break;
}
num_iterations = 0;
} else {
num_iterations++;
if (num_iterations > kMaxIterations) {
LOG(ERROR) << "TruncatedNormal randn rejection sampler "
<< "exceeded maximum iterations for "
<< "normMin=" << normMin << " normMax=" << normMax
<< " kMaxIterations=" << kMaxIterations;
ctx->SetStatus(errors::Internal(
"TruncatedNormal randn rejection sampler failed to accept"
" a sample."));
return;
}
}
}
}
} else if (diff < cutoff) {
const T plusFactor = (normMin < T(0)) ? T(0) : normMin * normMin;
while (sample < limit_sample) {
const auto rand = dist(&gen_copy);
const int size = rand.size();
for (int i = 0; i < size; i++) {
z[i] = rand[i] * diff + normMin;
}
for (int i = 0; i < size; i++) {
g[i] = (plusFactor - z[i] * z[i]) / T(2.0);
}
const auto u = dist(&gen_copy);
for (int i = 0; i < size; i++) {
auto accept = u[i] <= Eigen::numext::exp(g[i]);
if (accept || num_iterations + 1 >= kMaxIterations) {
if (!accept) {
LOG(ERROR) << "TruncatedNormal uniform rejection sampler "
<< "exceeded max iterations. Sample may contain "
<< "outliers.";
ctx->SetStatus(errors::Internal(
"TruncatedNormal uniform rejection sampler failed to "
" accept a sample."));
return;
}
output(sample) = z[i] * stddev + mean;
sample++;
if (sample >= limit_sample) {
break;
}
num_iterations = 0;
} else {
num_iterations++;
}
}
}
} else {
const T alpha =
(normMin + Eigen::numext::sqrt((normMin * normMin) + T(4))) /
T(2);
while (sample < limit_sample) {
auto rand = dist(&gen_copy);
const int size = rand.size();
int i = 0;
while (i < size) {
const T z = -Eigen::numext::log(rand[i]) / alpha + normMin;
i++;
const T x = normMin < alpha ? alpha - z : normMin - alpha;
const T g = Eigen::numext::exp(-x * x / T(2.0));
const T u = rand[i];
i++;
auto accept = (u <= g && z < normMax);
if (accept || num_iterations + 1 >= kMaxIterations) {
if (!accept) {
LOG(ERROR) << "TruncatedNormal exponential distribution "
<< "rejection sampler exceeds max iterations. "
<< "Sample may contain outliers.";
ctx->SetStatus(errors::Internal(
"TruncatedNormal exponential distribution rejection"
" sampler failed to accept a sample."));
return;
}
output(sample) = z * stddev + mean;
sample++;
if (sample >= limit_sample) {
break;
}
num_iterations = 0;
} else {
num_iterations++;
}
}
}
}
}
};
const int64_t batchInitCost =
(Eigen::TensorOpCost::AddCost<T>() +
Eigen::TensorOpCost::MulCost<T>()) *
2
+ Eigen::TensorOpCost::AddCost<T>() +
Eigen::TensorOpCost::MulCost<T>() +
Eigen::internal::functor_traits<
Eigen::internal::scalar_sqrt_op<T>>::Cost
+ Eigen::TensorOpCost::MulCost<T>() * 4 +
Eigen::internal::functor_traits<Eigen::internal::scalar_exp_op<T>>::Cost
+ Eigen::TensorOpCost::AddCost<T>();
const int64_t uniformSampleCost =
random::PhiloxRandom::kElementCost +
random::UniformDistribution<random::PhiloxRandom, T>::kElementCost;
const int64_t uniformRejectionSamplingCost =
uniformSampleCost + Eigen::TensorOpCost::MulCost<T>() +
Eigen::TensorOpCost::AddCost<T>() +
Eigen::TensorOpCost::MulCost<T>() * 2 +
Eigen::TensorOpCost::AddCost<T>() + uniformSampleCost +
Eigen::internal::functor_traits<
Eigen::internal::scalar_exp_op<T>>::Cost +
Eigen::TensorOpCost::MulCost<T>() + Eigen::TensorOpCost::AddCost<T>();
const int64_t batchCost =
batchInitCost + uniformRejectionSamplingCost * 2 * samples_per_batch;
Shard(worker_threads.num_threads, worker_threads.workers, num_batches,
batchCost, do_work);
}
};
template <typename T>
struct TruncatedNormalFunctorV2<CPUDevice, T> {
void operator()(OpKernelContext* ctx, const CPUDevice& d, int64_t num_batches,
int64_t samples_per_batch, int64_t num_elements,
const BCastList<4>& bcast,
typename TTypes<T>::ConstFlat means,
typename TTypes<T>::ConstFlat stddevs,
typename TTypes<T>::ConstFlat minvals,
typename TTypes<T>::ConstFlat maxvals,
const random::PhiloxRandom& gen,
typename TTypes<T>::Flat output) {
const T kStdDevsInsideBoundsToUseRandnSampler = T(1.3);
auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads());
auto do_work = [num_batches, samples_per_batch, &ctx, &bcast, &means,
&stddevs, &minvals, &maxvals, &gen, &output,
kStdDevsInsideBoundsToUseRandnSampler](
int64_t start_output, int64_t limit_output) {
random::PhiloxRandom gen_copy = gen;
using Uniform = random::UniformDistribution<random::PhiloxRandom, T>;
Uniform dist;
using Normal = random::NormalDistribution<random::PhiloxRandom, T>;
Normal normal_dist;
gen_copy.Skip((start_output * 2 * kMaxIterations +
Uniform::kResultElementCount - 1) /
Uniform::kResultElementCount);
Eigen::array<T, Uniform::kResultElementCount> z;
Eigen::array<T, Uniform::kResultElementCount> g;
const bool should_bcast = bcast.IsBroadcastingRequired();
const auto& means_batch_indices = bcast.batch_indices(0);
const auto& stddevs_batch_indices = bcast.batch_indices(1);
const auto& minvals_batch_indices = bcast.batch_indices(2);
const auto& maxvals_batch_indices = bcast.batch_indices(3);
auto output_flat = output.data();
for (int64_t output_idx = start_output; output_idx < limit_output;
) {
int64_t batch_idx = output_idx / samples_per_batch;
T* const output_batch_offset = output_flat + batch_idx;
T mean, stddev, minval, maxval;
if (should_bcast) {
mean = means(means_batch_indices[batch_idx]);
stddev = stddevs(stddevs_batch_indices[batch_idx]);
minval = minvals(minvals_batch_indices[batch_idx]);
maxval = maxvals(maxvals_batch_indices[batch_idx]);
} else {
mean = means(batch_idx);
stddev = stddevs(batch_idx);
minval = minvals(batch_idx);
maxval = maxvals(batch_idx);
}
OP_REQUIRES(ctx,
stddev > T(0) && minval < maxval &&
(Eigen::numext::isfinite(minval) ||
Eigen::numext::isfinite(maxval)),
errors::InvalidArgument("Invalid parameters"));
int num_iterations = 0;
if ((Eigen::numext::isinf(minval) && minval < T(0)) || maxval < mean) {
std::swap(minval, maxval);
stddev = -stddev;
}
const T normMin = (minval - mean) / stddev;
const T normMax = (maxval - mean) / stddev;
const T sqrtFactor = Eigen::numext::sqrt((normMin * normMin) + T(4));
const T cutoff =
T(2) *
Eigen::numext::exp(T(0.5) +
(normMin * (normMin - sqrtFactor)) / T(4)) /
(normMin + sqrtFactor);
const T diff = normMax - normMin;
if (((normMin < -kStdDevsInsideBoundsToUseRandnSampler) &&
(normMax >= T(0.))) ||
((normMax > kStdDevsInsideBoundsToUseRandnSampler) &&
(normMin <= T(0.)))) {
for (int64_t sample_idx = output_idx % samples_per_batch;
sample_idx < samples_per_batch && output_idx < limit_output;) {
const auto randn_sample = normal_dist(&gen_copy);
const int size = randn_sample.size();
for (int i = 0; i < size; ++i) {
if ((randn_sample[i] >= normMin) &&
(randn_sample[i] <= normMax)) {
output_batch_offset[sample_idx * num_batches] =
randn_sample[i] * stddev + mean;
++sample_idx;
++output_idx;
if (sample_idx >= samples_per_batch ||
output_idx >= limit_output) {
break;
}
num_iterations = 0;
} else {
++num_iterations;
if (num_iterations > kMaxIterations) {
LOG(ERROR) << "TruncatedNormal randn rejection sampler "
<< "exceeded maximum iterations for "
<< "normMin=" << normMin << " normMax=" << normMax
<< " kMaxIterations=" << kMaxIterations;
ctx->SetStatus(errors::Internal(
"TruncatedNormal randn rejection sampler failed to accept"
" a sample."));
return;
}
}
}
}
} else if (diff < cutoff) {
const T plusFactor = (normMin < T(0)) ? T(0) : normMin * normMin;
for (int64_t sample_idx = output_idx % samples_per_batch;
sample_idx < samples_per_batch && output_idx < limit_output;) {
const auto rand = dist(&gen_copy);
const int size = rand.size();
for (int i = 0; i < size; i++) {
z[i] = rand[i] * diff + normMin;
g[i] = (plusFactor - z[i] * z[i]) / T(2.0);
}
const auto u = dist(&gen_copy);
for (int i = 0; i < size; i++) {
auto accept = u[i] <= Eigen::numext::exp(g[i]);
if (accept || num_iterations + 1 >= kMaxIterations) {
if (!accept) {
LOG(ERROR) << "TruncatedNormal uniform rejection sampler "
<< "exceeded max iterations. Sample may contain "
<< "outliers.";
ctx->SetStatus(errors::Internal(
"TruncatedNormal uniform rejection sampler failed to "
" accept a sample."));
return;
}
output_batch_offset[sample_idx * num_batches] =
z[i] * stddev + mean;
++sample_idx;
++output_idx;
if (sample_idx >= samples_per_batch ||
output_idx >= limit_output) {
break;
}
num_iterations = 0;
} else {
num_iterations++;
}
}
}
} else {
const T alpha =
(normMin + Eigen::numext::sqrt((normMin * normMin) + T(4))) /
T(2);
for (int64_t sample_idx = output_idx % samples_per_batch;
sample_idx < samples_per_batch && output_idx < limit_output;) {
auto rand = dist(&gen_copy);
const int size = rand.size();
int i = 0;
while (i < size) {
const T z = -Eigen::numext::log(rand[i]) / alpha + normMin;
i++;
const T x = normMin < alpha ? alpha - z : normMin - alpha;
const T g = Eigen::numext::exp(-x * x / T(2.0));
const T u = rand[i];
i++;
auto accept = (u <= g && z < normMax);
if (accept || num_iterations + 1 >= kMaxIterations) {
if (!accept) {
LOG(ERROR) << "TruncatedNormal exponential distribution "
<< "rejection sampler exceeds max iterations. "
<< "Sample may contain outliers.";
ctx->SetStatus(errors::Internal(
"TruncatedNormal exponential distribution rejection"
" sampler failed to accept a sample."));
return;
}
output_batch_offset[sample_idx * num_batches] =
z * stddev + mean;
++sample_idx;
++output_idx;
if (sample_idx >= samples_per_batch ||
output_idx >= limit_output) {
break;
}
num_iterations = 0;
} else {
num_iterations++;
}
}
}
}
}
};
const int64_t batchInitCost =
(Eigen::TensorOpCost::AddCost<T>() +
Eigen::TensorOpCost::MulCost<T>()) *
2
+ Eigen::TensorOpCost::AddCost<T>() +
Eigen::TensorOpCost::MulCost<T>() +
Eigen::internal::functor_traits<
Eigen::internal::scalar_sqrt_op<T>>::Cost
+ Eigen::TensorOpCost::MulCost<T>() * 4 +
Eigen::internal::functor_traits<Eigen::internal::scalar_exp_op<T>>::Cost
+ Eigen::TensorOpCost::AddCost<T>();
const int64_t uniformSampleCost =
random::PhiloxRandom::kElementCost +
random::UniformDistribution<random::PhiloxRandom, T>::kElementCost;
const int64_t uniformRejectionSamplingCost =
uniformSampleCost + Eigen::TensorOpCost::MulCost<T>() +
Eigen::TensorOpCost::AddCost<T>() +
Eigen::TensorOpCost::MulCost<T>() * 2 +
Eigen::TensorOpCost::AddCost<T>() + uniformSampleCost +
Eigen::internal::functor_traits<
Eigen::internal::scalar_exp_op<T>>::Cost +
Eigen::TensorOpCost::MulCost<T>() + Eigen::TensorOpCost::AddCost<T>();
const int64_t batchCost = batchInitCost + uniformRejectionSamplingCost * 2;
Shard(worker_threads.num_threads, worker_threads.workers, num_elements,
batchCost, do_work);
}
};
}
namespace {
template <typename Device, typename T>
class ParameterizedTruncatedNormalOp : public OpKernel {
static constexpr int32_t kDesiredBatchSize = 100;
public:
explicit ParameterizedTruncatedNormalOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, generator_.Init(context));
}
void Compute(OpKernelContext* ctx) override {
const Tensor& shape_tensor = ctx->input(0);
const Tensor& means_tensor = ctx->input(1);
const Tensor& stddevs_tensor = ctx->input(2);
const Tensor& minvals_tensor = ctx->input(3);
const Tensor& maxvals_tensor = ctx->input(4);
OP_REQUIRES(
ctx, TensorShapeUtils::IsVector(shape_tensor.shape()),
errors::InvalidArgument("Input shape should be a vector, got shape: ",
shape_tensor.shape().DebugString()));
OP_REQUIRES(ctx, shape_tensor.NumElements() > 0,
errors::InvalidArgument("Shape tensor must not be empty, got ",
shape_tensor.DebugString()));
TensorShape tensor_shape;
OP_REQUIRES_OK(ctx, tensor::MakeShape(shape_tensor, &tensor_shape));
int32_t num_batches = tensor_shape.dim_size(0);
int32_t samples_per_batch = 1;
const int32_t num_dims = tensor_shape.dims();
for (int32_t i = 1; i < num_dims; i++) {
samples_per_batch *= tensor_shape.dim_size(i);
}
const int32_t num_elements = num_batches * samples_per_batch;
Tensor* samples_tensor;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, tensor_shape, &samples_tensor));
OP_REQUIRES(ctx, means_tensor.dims() <= 1,
errors::InvalidArgument(
"Input means should be a scalar or vector, got shape: ",
means_tensor.shape().DebugString()));
OP_REQUIRES(ctx, stddevs_tensor.dims() <= 1,
errors::InvalidArgument(
"Input stddevs should be a scalar or vector, got shape: ", | #include <functional>
#include <memory>
#include <vector>
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
static Graph* PTruncatedNormal(int num_batches, int samples_per_batch) {
Graph* g = new Graph(OpRegistry::Global());
Tensor shape_t(DT_INT32, TensorShape({2}));
shape_t.flat<int32>().setValues({num_batches, samples_per_batch});
Tensor means_t(DT_FLOAT, TensorShape({num_batches}));
means_t.flat<float>().setConstant(0.0);
Tensor stdevs_t(DT_FLOAT, TensorShape({num_batches}));
stdevs_t.flat<float>().setConstant(1.0);
Tensor minvals_t(DT_FLOAT, TensorShape({num_batches}));
minvals_t.flat<float>().setRandom();
Tensor maxvals_t(DT_FLOAT, TensorShape({num_batches}));
maxvals_t.flat<float>().setConstant(5.0);
Node* ret;
TF_CHECK_OK(
NodeBuilder(g->NewName("truncatednormal"), "ParameterizedTruncatedNormal")
.Input(test::graph::Constant(g, shape_t))
.Input(test::graph::Constant(g, means_t))
.Input(test::graph::Constant(g, stdevs_t))
.Input(test::graph::Constant(g, minvals_t))
.Input(test::graph::Constant(g, maxvals_t))
.Attr("dtype", DT_FLOAT)
.Finalize(g, &ret));
return g;
}
static Graph* PTruncatedNormal2SD(int num_batches, int samples_per_batch) {
Graph* g = new Graph(OpRegistry::Global());
Tensor shape_t(DT_INT32, TensorShape({2}));
shape_t.flat<int32>().setValues({num_batches, samples_per_batch});
Tensor means_t(DT_FLOAT, TensorShape({num_batches}));
means_t.flat<float>().setConstant(0.0);
Tensor stdevs_t(DT_FLOAT, TensorShape({num_batches}));
stdevs_t.flat<float>().setConstant(1.0);
Tensor minvals_t(DT_FLOAT, TensorShape({num_batches}));
minvals_t.flat<float>().setConstant(-2.0);
Tensor maxvals_t(DT_FLOAT, TensorShape({num_batches}));
maxvals_t.flat<float>().setConstant(2.0);
Node* ret;
TF_CHECK_OK(
NodeBuilder(g->NewName("truncatednormal"), "ParameterizedTruncatedNormal")
.Input(test::graph::Constant(g, shape_t))
.Input(test::graph::Constant(g, means_t))
.Input(test::graph::Constant(g, stdevs_t))
.Input(test::graph::Constant(g, minvals_t))
.Input(test::graph::Constant(g, maxvals_t))
.Attr("dtype", DT_FLOAT)
.Finalize(g, &ret));
return g;
}
static Graph* PTruncatedNormalOneTail(int num_batches, int samples_per_batch) {
Graph* g = new Graph(OpRegistry::Global());
Tensor shape_t(DT_INT32, TensorShape({2}));
shape_t.flat<int32>().setValues({num_batches, samples_per_batch});
Tensor means_t(DT_FLOAT, TensorShape({num_batches}));
means_t.flat<float>().setConstant(0.0);
Tensor stdevs_t(DT_FLOAT, TensorShape({num_batches}));
stdevs_t.flat<float>().setConstant(1.0);
Tensor minvals_t(DT_FLOAT, TensorShape({num_batches}));
minvals_t.flat<float>().setConstant(2.0);
Tensor maxvals_t(DT_FLOAT, TensorShape({num_batches}));
maxvals_t.flat<float>().setConstant(std::numeric_limits<float>::infinity());
Node* ret;
TF_CHECK_OK(
NodeBuilder(g->NewName("truncatednormal"), "ParameterizedTruncatedNormal")
.Input(test::graph::Constant(g, shape_t))
.Input(test::graph::Constant(g, means_t))
.Input(test::graph::Constant(g, stdevs_t))
.Input(test::graph::Constant(g, minvals_t))
.Input(test::graph::Constant(g, maxvals_t))
.Attr("dtype", DT_FLOAT)
.Finalize(g, &ret));
return g;
}
#define BM_PTruncatedNormalDev(DEVICE, B, S) \
static void BM_PTruncatedNormal_##DEVICE##_##B##_##S( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, PTruncatedNormal(B, S), \
false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(B) * S * state.iterations()); \
} \
BENCHMARK(BM_PTruncatedNormal_##DEVICE##_##B##_##S);
#define BM_PTruncatedNormalDev_2SD(DEVICE, B, S) \
static void BM_PTruncatedNormal_2SD_##DEVICE##_##B##_##S( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, PTruncatedNormal2SD(B, S), \
false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(B) * S * state.iterations()); \
} \
BENCHMARK(BM_PTruncatedNormal_2SD_##DEVICE##_##B##_##S);
#define BM_PTruncatedNormalDev_OneTail(DEVICE, B, S) \
static void BM_PTruncatedNormal_OneTail_##DEVICE##_##B##_##S( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, PTruncatedNormalOneTail(B, S), \
false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(B) * S * state.iterations()); \
} \
BENCHMARK(BM_PTruncatedNormal_OneTail_##DEVICE##_##B##_##S);
BM_PTruncatedNormalDev(cpu, 1000, 1000);
BM_PTruncatedNormalDev_2SD(cpu, 10000, 100);
BM_PTruncatedNormalDev_OneTail(cpu, 10000, 100);
BM_PTruncatedNormalDev(gpu, 1000, 1000);
BM_PTruncatedNormalDev_2SD(gpu, 10000, 100);
BM_PTruncatedNormalDev_OneTail(gpu, 10000, 100);
} |
1,491 | cpp | tensorflow/tensorflow | tensor_map | tensorflow/core/kernels/tensor_map.cc | tensorflow/core/kernels/tensor_map_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_TENSOR_MAP_H_
#define TENSORFLOW_CORE_KERNELS_TENSOR_MAP_H_
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_key.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/framework/variant_tensor_data.h"
#include "tensorflow/core/lib/core/refcount.h"
namespace tensorflow {
class TensorMap {
public:
TensorMap() : tensors_(new Tensors) {}
~TensorMap();
TensorMap(const TensorMap& other) : tensors_(other.tensors_) {
tensors_->Ref();
}
TensorMap(TensorMap&& rhs) : tensors_(rhs.tensors_) {
rhs.tensors_ = nullptr;
}
TensorMap& operator=(const TensorMap& rhs) {
if (this == &rhs) return *this;
tensors_->Unref();
tensors_ = rhs.tensors_;
tensors_->Ref();
return *this;
}
TensorMap& operator=(TensorMap&& rhs) {
if (this == &rhs) return *this;
std::swap(tensors_, rhs.tensors_);
return *this;
}
static const char kTypeName[];
string TypeName() const { return kTypeName; }
void Encode(VariantTensorData* data) const;
bool Decode(const VariantTensorData& data);
string DebugString() const { return "TensorMap"; }
absl::flat_hash_map<TensorKey, Tensor>& tensors() {
return tensors_->values_;
}
const absl::flat_hash_map<TensorKey, Tensor>& tensors() const {
return tensors_->values_;
}
TensorMap Copy() const {
TensorMap out;
out.tensors_->values_ = tensors_->values_;
return out;
}
bool insert(const TensorKey& key, const Tensor& value) {
auto r = tensors_->values_.try_emplace(key, value);
return r.second;
}
absl::flat_hash_map<TensorKey, Tensor>::iterator find(TensorKey key) {
return tensors_->values_.find(key);
}
Tensor& lookup(TensorKey key) { return tensors_->values_.find(key)->second; }
Tensor& operator[](TensorKey& k) { return tensors_->values_[k]; }
bool replace(const TensorKey& k, const Tensor& v) {
tensors_->values_[k] = v;
return true;
}
size_t erase(TensorKey key) { return tensors_->values_.erase(key); }
size_t size() const { return tensors_->values_.size(); }
std::vector<Tensor> keys() const {
std::vector<Tensor> keys;
keys.reserve(tensors_->values_.size());
absl::flat_hash_map<TensorKey, Tensor>::iterator it =
tensors_->values_.begin();
while (it != tensors_->values_.end()) {
keys.push_back(it->first);
it++;
}
return keys;
}
bool RefCountIsOne() const { return tensors_->RefCountIsOne(); }
private:
class Tensors : public core::RefCounted {
public:
absl::flat_hash_map<TensorKey, Tensor> values_;
};
Tensors* tensors_;
};
#if defined(PLATFORM_GOOGLE)
static_assert(Variant::CanInlineType<TensorMap>() || sizeof(void*) < 8,
"Must be able to inline TensorMap into a Variant");
#endif
}
#endif
#include "tensorflow/core/kernels/tensor_map.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/variant_op_registry.h"
#include "tensorflow/core/lib/core/coding.h"
namespace tensorflow {
TensorMap::~TensorMap() {
if (tensors_) tensors_->Unref();
}
void TensorMap::Encode(VariantTensorData* data) const {
data->set_type_name(TypeName());
absl::flat_hash_map<TensorKey, Tensor>::const_iterator map_it =
tensors().begin();
while (map_it != tensors().end()) {
Tensor k = map_it->first;
Tensor v = map_it->second;
CHECK_NE(k.dtype(), DT_INVALID);
CHECK_NE(v.dtype(), DT_INVALID);
*data->add_tensors() = k;
*data->add_tensors() = v;
map_it++;
}
}
static Status TensorMapDeviceCopy(
const TensorMap& from, TensorMap* to,
const UnaryVariantOpRegistry::AsyncTensorDeviceCopyFn& copy) {
for (const std::pair<TensorKey, Tensor>& p : from.tensors()) {
TensorKey to_key(p.first.dtype());
Tensor to_val(p.second.dtype());
TF_RETURN_IF_ERROR(copy(p.first, &to_key));
TF_RETURN_IF_ERROR(copy(p.second, &to_val));
to->tensors().emplace(to_key, to_val);
}
return absl::OkStatus();
}
#define REGISTER_LIST_COPY(DIRECTION) \
INTERNAL_REGISTER_UNARY_VARIANT_DEVICE_COPY_FUNCTION(TensorMap, DIRECTION, \
TensorMapDeviceCopy)
REGISTER_LIST_COPY(VariantDeviceCopyDirection::HOST_TO_DEVICE);
REGISTER_LIST_COPY(VariantDeviceCopyDirection::DEVICE_TO_HOST);
REGISTER_LIST_COPY(VariantDeviceCopyDirection::DEVICE_TO_DEVICE);
REGISTER_UNARY_VARIANT_DECODE_FUNCTION(TensorMap, TensorMap::kTypeName);
bool TensorMap::Decode(const VariantTensorData& data) {
std::vector<Tensor>::const_iterator tensors_it = data.tensors().begin();
while (tensors_it != data.tensors().end()) {
if (std::next(tensors_it) == data.tensors().end()) {
return false;
}
tensors().emplace(tensors_it[0], tensors_it[1]);
tensors_it += 2;
}
return true;
}
const char TensorMap::kTypeName[] = "tensorflow::TensorMap";
} | #include "tensorflow/core/kernels/tensor_map.h"
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
TEST(TensorMapTest, Empty) {
TensorMap tm;
EXPECT_EQ(tm.tensors().size(), 0);
EXPECT_EQ(tm.tensors().begin(), tm.tensors().end());
}
TEST(TensorKeyTest, Equal) {
TensorKey k1 = Tensor(15);
TensorKey k2 = Tensor(15);
EXPECT_EQ(k1, k2);
EXPECT_EQ(k1.shape(), k2.shape());
EXPECT_EQ(k1.dtype(), k2.dtype());
TensorKey k3 = Tensor(37.0);
EXPECT_NE(k1, k3);
EXPECT_NE(k1.dtype(), k3.dtype());
}
TEST(TensorMapTest, Insert) {
TensorMap tm;
TensorKey k = Tensor(11);
Tensor v = Tensor(22);
tm.insert(k, v);
absl::flat_hash_map<TensorKey, Tensor> am;
am.try_emplace(k, v);
absl::flat_hash_map<TensorKey, Tensor>::iterator map_it =
tm.tensors().begin();
EXPECT_EQ(map_it->first, k);
test::ExpectTensorEqual<int32>(map_it->second, v);
map_it++;
EXPECT_EQ(map_it, tm.tensors().end());
}
TEST(TensorMapTest, Lookup) {
TensorMap tm;
TensorKey k = Tensor(11);
Tensor v = Tensor(22);
tm.insert(k, v);
absl::flat_hash_map<TensorKey, Tensor>::iterator map_it = tm.find(k);
Tensor f = map_it->second;
EXPECT_EQ(map_it->first, k);
test::ExpectTensorEqual<int32>(f, v);
}
TEST(TensorMapTest, Erase) {
TensorMap tm;
TensorKey k = Tensor(11);
Tensor v = Tensor(22);
tm.insert(k, v);
tm.erase(k);
EXPECT_EQ(tm.find(k), tm.tensors().end());
}
TEST(TensorMapTest, SameKeyInsert) {
TensorMap tm;
TensorKey k = Tensor(11);
Tensor v1 = Tensor(22);
Tensor v2 = Tensor(23);
bool b1 = tm.insert(k, v1);
bool b2 = tm.insert(k, v2);
EXPECT_EQ(b1, true);
EXPECT_EQ(b2, false);
absl::flat_hash_map<TensorKey, Tensor>::iterator map_it = tm.find(k);
EXPECT_EQ(map_it->first, k);
test::ExpectTensorEqual<int32>(map_it->second, v1);
}
TEST(TensorMapTest, Replace) {
TensorMap tm;
TensorKey k = Tensor(11);
Tensor v1 = Tensor(22);
Tensor v2 = Tensor(23);
tm[k] = v2;
absl::flat_hash_map<TensorKey, Tensor>::iterator map_it = tm.find(k);
EXPECT_EQ(map_it->first, k);
test::ExpectTensorEqual<int32>(map_it->second, v2);
}
TEST(TensorMapTest, ListKeys) {
TensorMap tm;
TensorKey k = Tensor(11.0);
TensorKey k2 = Tensor(12.0);
Tensor v = Tensor(22);
Tensor v2 = Tensor(23);
tm.insert(k, v);
tm.insert(k2, v2);
std::vector<Tensor> keys = tm.keys();
std::vector<std::pair<double, int>> key_doubles;
for (int i = 0; i < keys.size(); i++) {
double x = keys[i].scalar<double>()();
std::pair<double, int> p = std::pair<double, int>(x, i);
key_doubles.push_back(p);
}
sort(key_doubles.begin(), key_doubles.end());
EXPECT_EQ(keys.size(), 2);
EXPECT_EQ(key_doubles[0].first, 11.0);
EXPECT_EQ(key_doubles[1].first, 12.0);
int ind1 = key_doubles[0].second;
int ind2 = key_doubles[1].second;
EXPECT_EQ(keys[ind1].shape(), k.shape());
EXPECT_EQ(keys[ind2].shape(), k2.shape());
}
TEST(TensorMapTest, Size) {
TensorMap tm;
EXPECT_EQ(tm.size(), 0);
TensorKey k = Tensor(11);
Tensor v = Tensor(22);
tm.insert(k, v);
EXPECT_EQ(tm.size(), 1);
}
TEST(TensorMapTest, Copy) {
TensorMap tm;
TensorKey k = Tensor(11);
Tensor v = Tensor(22);
tm.insert(k, v);
TensorMap tmc = tm.Copy();
EXPECT_EQ(tm.size(), tmc.size());
EXPECT_NE(tm.find(k), tm.tensors().end());
EXPECT_NE(tmc.find(k), tmc.tensors().end());
EXPECT_EQ(tm.find(k)->first, tmc.find(k)->first);
test::ExpectTensorEqual<int32>(tm.find(k)->second, tmc.find(k)->second);
}
TEST(TensorMapTest, EncodeDecode) {
TensorMap tm;
TensorKey k = Tensor(11);
Tensor v = Tensor(22);
tm.insert(k, v);
VariantTensorData data;
tm.Encode(&data);
TensorMap tmc;
tmc.Decode(data);
EXPECT_EQ(tm.size(), tmc.size());
EXPECT_NE(tm.find(k), tm.tensors().end());
EXPECT_NE(tmc.find(k), tmc.tensors().end());
EXPECT_EQ(tm.find(k)->first, tmc.find(k)->first);
test::ExpectTensorEqual<int32>(tm.find(k)->second, tmc.find(k)->second);
}
}
} |
1,492 | cpp | tensorflow/tensorflow | bias_op | tensorflow/core/kernels/bias_op.cc | tensorflow/core/kernels/bias_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_BIAS_OP_H_
#define TENSORFLOW_CORE_KERNELS_BIAS_OP_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor_types.h"
namespace tensorflow {
namespace functor {
template <typename Device, typename T>
struct Bias {
void operator()(const Device& d, typename TTypes<T>::ConstFlat input,
typename TTypes<T>::ConstVec bias,
typename TTypes<T>::Flat output) {
const Eigen::Index rest_size = input.size() / bias.dimension(0);
Eigen::DSizes<Eigen::Index, 1> bcast(rest_size);
MaybeWith32BitIndexing<Device>(
[&](auto input32, auto bias32, auto output32, const auto& bcast32) {
output32.device(d) = input32 + bias32.broadcast(bcast32);
},
input, bias, output, bcast);
}
void operator()(const Device& d, typename TTypes<T>::ConstMatrix input,
typename TTypes<T>::ConstMatrix bias1,
typename TTypes<T>::Matrix output) {
const Eigen::Index rest_size = input.dimension(0) / bias1.dimension(0);
Eigen::DSizes<Eigen::Index, 2> bcast(rest_size, input.dimension(1));
MaybeWith32BitIndexing<Device>(
[&](auto input32, auto bias32, auto output32, const auto& bcast32) {
output32.device(d) = input32 + bias32.broadcast(bcast32);
},
input, bias1, output, bcast);
}
};
}
}
#endif
#define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/bias_op.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/numeric_op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/redux_functor.h"
#include "tensorflow/core/profiler/lib/scoped_annotation.h"
#include "tensorflow/core/util/determinism.h"
#include "tensorflow/core/util/tensor_format.h"
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include "xla/stream_executor/gpu/gpu_stream.h"
#include "xla/stream_executor/gpu/gpu_timer.h"
#include "tensorflow/core/kernels/bias_op_gpu.h"
#include "tensorflow/core/platform/stream_executor.h"
#endif
#if GOOGLE_CUDA
#include "xla/stream_executor/cuda/cuda_stream.h"
#endif
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
namespace {
void GetBiasValueDims(const Tensor& value_tensor, TensorFormat data_format,
int32* batch, int32* height, int32* width, int32* depth,
int32* channel) {
*batch = 1;
*height = 1;
*width = 1;
*depth = 1;
*channel = 1;
if (data_format == FORMAT_NHWC) {
int32_t channel_dim = value_tensor.dims() - 1;
*channel = static_cast<int32>(value_tensor.dim_size(channel_dim));
for (int32_t i = 0; i < channel_dim; i++) {
*batch *= static_cast<int32>(value_tensor.dim_size(i));
}
} else if (data_format == FORMAT_NCHW) {
*batch = static_cast<int32>(value_tensor.dim_size(0));
*channel = static_cast<int32>(value_tensor.dim_size(1));
*height = static_cast<int32>(value_tensor.dim_size(2));
if (value_tensor.dims() > 3) {
*width = static_cast<int32>(value_tensor.dim_size(3));
}
if (value_tensor.dims() > 4) {
*depth = static_cast<int32>(value_tensor.dim_size(4));
}
}
}
template <class T>
struct AccumulatorType {
typedef T type;
};
template <>
struct AccumulatorType<Eigen::half> {
typedef float type;
};
}
template <typename Device, typename T>
class BiasOp : public BinaryOp<T> {
public:
explicit BiasOp(OpKernelConstruction* context) : BinaryOp<T>(context) {
string data_format;
if (context->GetAttr("data_format", &data_format).ok()) {
OP_REQUIRES(context, FormatFromString(data_format, &data_format_),
errors::InvalidArgument("Invalid data format"));
} else {
data_format_ = FORMAT_NHWC;
}
}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
const Tensor& bias = context->input(1);
OP_REQUIRES(context, TensorShapeUtils::IsMatrixOrHigher(input.shape()),
errors::InvalidArgument("Input tensor must be at least 2D: ",
input.shape()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(bias.shape()),
errors::InvalidArgument("Biases must be 1D: ", bias.shape()));
int channel_dim;
if (data_format_ == FORMAT_NCHW) {
channel_dim = 1;
} else {
channel_dim = input.shape().dims() - 1;
}
OP_REQUIRES(context,
bias.shape().dim_size(0) == input.shape().dim_size(channel_dim),
errors::InvalidArgument(
"Must provide as many biases as the last dimension "
"of the input tensor: ",
bias.shape(), " vs. ", input.shape()));
Tensor* output = nullptr;
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
{0}, 0, input.shape(), &output));
if (input.NumElements() == 0) return;
functor::Bias<Device, T> functor;
const Device& d = context->eigen_device<Device>();
if (data_format_ == FORMAT_NCHW && input.shape().dims() > 2) {
functor(d, input.flat_inner_outer_dims<T, 2>(1),
bias.flat_outer_dims<T, 2>(),
output->flat_inner_outer_dims<T, 2>(1));
} else {
functor(d, input.flat<T>(), bias.vec<T>(), output->flat<T>());
}
}
private:
TensorFormat data_format_;
};
#define REGISTER_KERNEL(type) \
REGISTER_KERNEL_BUILDER( \
Name("BiasAdd").Device(DEVICE_CPU).TypeConstraint<type>("T"), \
BiasOp<CPUDevice, type>); \
REGISTER_KERNEL_BUILDER( \
Name("BiasAddV1").Device(DEVICE_CPU).TypeConstraint<type>("T"), \
BiasOp<CPUDevice, type>);
TF_CALL_NUMBER_TYPES(REGISTER_KERNEL);
#undef REGISTER_KERNEL
template <typename Device, typename T>
class BiasGradOp : public OpKernel {
public:
explicit BiasGradOp(OpKernelConstruction* context) : OpKernel(context) {
string data_format;
if (context->GetAttr("data_format", &data_format).ok()) {
OP_REQUIRES(context, FormatFromString(data_format, &data_format_),
errors::InvalidArgument("Invalid data format"));
} else {
data_format_ = FORMAT_NHWC;
}
}
void Compute(OpKernelContext* context) override {
const Tensor& output_backprop = context->input(0);
OP_REQUIRES(context,
TensorShapeUtils::IsMatrixOrHigher(output_backprop.shape()),
errors::InvalidArgument("Input tensor must be at least 2D: ",
output_backprop.shape()));
OP_REQUIRES(
context,
FastBoundsCheck(output_backprop.NumElements(),
std::numeric_limits<int32>::max()),
errors::InvalidArgument("BiasGrad requires tensor size <= int32 max"));
int channel_dim;
if (data_format_ == FORMAT_NCHW) {
channel_dim = 1;
} else {
channel_dim = output_backprop.shape().dims() - 1;
}
Tensor* output = nullptr;
TensorShape output_shape{output_backprop.shape().dim_size(channel_dim)};
OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output));
if (output_backprop.NumElements() == 0) {
output->template flat<T>().setZero();
} else {
using AccumT = typename AccumulatorType<T>::type;
if (data_format_ == FORMAT_NCHW) {
const functor::ReduceMiddleDimensions<
T, AccumT, T, Eigen::internal::scalar_sum_op<AccumT>,
Eigen::internal::SumReducer<T>>
redux;
auto flat_outer = output_backprop.flat_outer_dims<T, 3>();
redux(context->eigen_device<Device>(), flat_outer.dimensions(),
output_backprop, output, 1);
} else {
const functor::ReduceOuterDimensions<
T, AccumT, T, Eigen::internal::scalar_sum_op<AccumT>>
redux;
auto flat_inner = output_backprop.flat_inner_dims<T, 2>();
redux(context->eigen_device<Device>(), flat_inner.dimensions(),
output_backprop, output);
}
}
}
private:
TensorFormat data_format_;
};
#define REGISTER_KERNEL(type) \
REGISTER_KERNEL_BUILDER( \
Name("BiasAddGrad").Device(DEVICE_CPU).TypeConstraint<type>("T"), \
BiasGradOp<CPUDevice, type>);
TF_CALL_NUMBER_TYPES(REGISTER_KERNEL);
#undef REGISTER_KERNEL
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
template <typename T>
class BiasOp<GPUDevice, T> : public BinaryOp<T> {
public:
typedef GPUDevice Device;
explicit BiasOp(OpKernelConstruction* context) : BinaryOp<T>(context) {
string data_format;
if (context->GetAttr("data_format", &data_format).ok()) {
OP_REQUIRES(context, FormatFromString(data_format, &data_format_),
errors::InvalidArgument("Invalid data format"));
} else {
data_format_ = FORMAT_NHWC;
}
}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
const Tensor& bias = context->input(1);
OP_REQUIRES(context, TensorShapeUtils::IsMatrixOrHigher(input.shape()),
errors::InvalidArgument("Input tensor must be at least 2D: ",
input.shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(bias.shape()),
errors::InvalidArgument("Biases must be 1D: ",
bias.shape().DebugString()));
int32_t batch, height, width, depth, channel;
GetBiasValueDims(input, data_format_, &batch, &height, &width, &depth,
&channel);
OP_REQUIRES(context, bias.shape().dim_size(0) == channel,
errors::InvalidArgument(
"Must provide as many biases as the channel dimension "
"of the input tensor: ",
bias.shape().DebugString(), " vs. ", channel, " in ",
input.shape().DebugString()));
Tensor* output = nullptr;
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
{0}, 0, input.shape(), &output));
if (input.NumElements() > 0) {
BiasGPU<T>::compute(context->template eigen_device<Device>(),
input.flat<T>().data(), bias.flat<T>().data(),
output->flat<T>().data(), batch, width, height, depth,
channel, data_format_);
}
}
private:
TensorFormat data_format_;
};
#define REGISTER_GPU_KERNEL(type) \
REGISTER_KERNEL_BUILDER( \
Name("BiasAdd").Device(DEVICE_GPU).TypeConstraint<type>("T"), \
BiasOp<GPUDevice, type>); \
REGISTER_KERNEL_BUILDER( \
Name("BiasAddV1").Device(DEVICE_GPU).TypeConstraint<type>("T"), \
BiasOp<GPUDevice, type>);
TF_CALL_GPU_NUMBER_TYPES(REGISTER_GPU_KERNEL);
REGISTER_GPU_KERNEL(int32);
#undef REGISTER_GPU_KERNEL
struct BiasGradAutotuneGroup {
static string name() { return "BiasGrad"; }
};
class BiasAddGradGPUConfig {
public:
BiasAddGradGPUConfig() : mode_(BiasAddGradGPUMode::kReduction) {}
string ToString() const {
if (mode_ == BiasAddGradGPUMode::kNative) {
return "native CUDA kernel.";
}
if (mode_ == BiasAddGradGPUMode::kReduction) {
return "cub reduction kernel.";
}
return "unknown kernel.";
}
BiasAddGradGPUMode get_mode() const { return mode_; }
void set_mode(BiasAddGradGPUMode val) { mode_ = val; }
bool operator==(const BiasAddGradGPUConfig& other) const {
return this->mode_ == other.get_mode();
}
bool operator!=(const BiasAddGradGPUConfig& other) const {
return !(*this == other);
}
private:
BiasAddGradGPUMode mode_;
};
class BiasAddParams {
public:
using SpatialArray = gtl::InlinedVector<int64_t, 4>;
BiasAddParams(const SpatialArray& in_shape, TensorFormat data_format,
DataType dtype, int device_id)
: in_shape_(in_shape),
data_format_(data_format),
dtype_(dtype),
device_id_(device_id) {
for (int64_t val : in_shape_) {
hash_code_ = Hash64Combine(hash_code_, val);
}
hash_code_ = Hash64Combine(hash_code_, data_format);
hash_code_ = Hash64Combine(hash_code_, dtype);
hash_code_ = Hash64Combine(hash_code_, device_id);
}
bool operator==(const BiasAddParams& other) const {
return this->get_data_as_tuple() == other.get_data_as_tuple();
}
bool operator!=(const BiasAddParams& other) const {
return !(*this == other);
}
uint64 hash() const { return hash_code_; }
string ToString() const {
return strings::StrCat(
"(", absl::StrJoin(in_shape_, ", "), "), ",
data_format_, ", ", dtype_, ", ", device_id_);
}
protected:
using ParamsDataType = std::tuple<SpatialArray, TensorFormat, DataType, int>;
ParamsDataType get_data_as_tuple() const {
return std::make_tuple(in_shape_, data_format_, dtype_, device_id_);
}
uint64 hash_code_ = 0;
private:
SpatialArray in_shape_;
TensorFormat data_format_;
DataType dtype_;
int device_id_;
};
typedef AutotuneSingleton<BiasGradAutotuneGroup, BiasAddParams,
BiasAddGradGPUConfig>
AutotuneBiasGrad;
template <typename T>
class BiasGradOp<GPUDevice, T> : public OpKernel {
public:
typedef GPUDevice Device;
explicit BiasGradOp(OpKernelConstruction* context) : OpKernel(context) {
string data_format;
if (context->GetAttr("data_format", &data_format).ok()) {
OP_REQUIRES(context, FormatFromString(data_format, &data_format_),
errors::InvalidArgument("Invalid data format"));
} else {
data_format_ = FORMAT_NCHW;
}
}
void ComputeWithCustomKernel(OpKernelContext* context,
const Tensor& output_backprop, int32_t batch,
int32_t width, int32_t height, int32_t depth,
int32_t channel, Tensor* output) {
BiasGradGPU<T>::compute(context->template eigen_device<Device>(),
output_backprop.template flat<T>().data(),
output->flat<T>().data(), batch, width, height,
depth, channel, data_format_);
}
void ComputeWithReduceSum(OpKernelContext* context,
const Tensor& output_backprop, int32_t batch,
int32_t width, int32_t height, int32_t depth,
int32_t channel, Tensor* output) {
if (data_format_ == FORMAT_NCHW) {
int32_t row_count = batch * channel;
int32_t col_count = height * width * depth;
Tensor temp_grad_outputs;
TensorShape temp_grad_output_shape{row_count, col_count};
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value,
temp_grad_output_shape,
&temp_grad_outputs));
BiasGradGPU<T>::DoRowReduction(
context, temp_grad_outputs.flat<T>().data(),
output_backprop.template flat<T>().data(), row_count, col_count);
row_count = batch;
col_count = channel;
BiasGradGPU<T>::DoColReduction(context, output->flat<T>().data(),
temp_grad_outputs.flat<T>().data(),
row_count, col_count);
} else {
int32_t row_count = batch * height * width * depth;
int32_t col_count = channel;
BiasGradGPU<T>::DoColReduction(
context, const_cast<T*>(output->flat<T>().data()),
reinterpret_cast<const T*>(output_backprop.template flat<T>().data()),
row_count, col_count);
}
}
void Compute(OpKernelContext* context) override {
const Tensor& output_backprop = context->input(0);
OP_REQUIRES(context,
TensorShapeUtils::IsMatrixOrHigher(output_backprop.shape()),
errors::InvalidArgument("Input tensor must be at least 2D: ",
output_backprop.shape().DebugString()));
int32_t batch, height, width, depth, channel;
GetBiasValueDims(output_backprop, data_format_, &batch, &height, &width,
&depth, &channel);
Tensor* output = nullptr;
TensorShape output_shape{channel};
OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output));
if (channel == 0) return;
auto* stream = context->op_device_context()->stream();
OP_REQUIRES(context, stream, errors::Internal("No GPU stream available."));
se::DeviceMemoryBase output_ptr(output->flat<T>().data(),
output->NumElements() * sizeof(T));
OP_REQUIRES_OK(context, stream->MemZero(&output_ptr,
output->NumElements() * sizeof(T)));
if (output_backprop.NumElements() <= 0) return;
if (OpDeterminismRequired()) {
ComputeWithReduceSum(context, output_backprop, batch, width, height,
depth, channel, output);
return;
}
int device_id = stream->parent()->device_ordinal();
DataType dtype = output_backprop.dtype();
BiasAddParams bias_parameters = {
{batch, height * width * depth, channel},
data_format_,
dtype,
device_id,
};
BiasAddGradGPUConfig algo_config;
if (!AutotuneBiasGrad::GetInstance()->Find(bias_parameters, &algo_config)) {
profiler::ScopedAnnotation trace("bias_grad_autotuning");
BiasGradGPUProfileResult best_result;
StatusOr<se::gpu::GpuTimer> timer =
se::gpu::GpuTimer::Create(se::gpu::AsGpuStream(stream));
OP_REQUIRES_OK(context, timer.status());
ComputeWithCustomKernel(context, output_backprop, batch, width, height,
depth, channel, output);
StatusOr<absl::Duration> bias_duration = timer->GetElapsedDuration();
OP_REQUIRES_OK(context, bias_duration.status());
int64_t elapsed_microseconds = absl::ToInt64Microseconds(*bias_duration);
VLOG(1) << "BiasAddGrad " << bias_parameters.ToString()
<< " Native algo latency: " << elapsed_microseconds << "us";
if (elapsed_microseconds < best_result.elapsed_time()) {
best_result.set_algorithm(BiasAddGradGPUMode::kNative);
best_result.set_elapsed_time(elapsed_microseconds);
}
StatusOr<se::gpu::GpuTimer> reduction_timer =
se::gpu::GpuTimer::Create(se::gpu::AsGpuStream(stream));
OP_REQUIRES_OK(context, reduction_timer.status());
ComputeWithReduceSum(context, output_backprop, batch, width, height,
depth, channel, output);
StatusOr<absl::Duration> reduction_duration =
reduction_timer->GetElapsedDuration();
OP_REQUIRES_OK(context, reduction_duration.status());
elapsed_microseconds += absl::ToInt64Microseconds(*reduction_duration);
VLOG(1) << "BiasAddGrad " << bias_parameters.ToString()
<< " Reduction algo latency: " << elapsed_microseconds;
if (elapsed_microseconds < best_result.elapsed_time()) {
best_result.set_algorithm(BiasAddGradGPUMode::kReduction);
best_result.set_elapsed_time(elapsed_microseconds);
}
algo_config.set_mode(best_result.algorithm());
AutotuneBiasGrad::GetInstance()->Insert(bias_parameters, algo_config);
return;
}
if (algo_config.get_mode() == BiasAddGradGPUMode::kReduction) {
ComputeWithReduceSum(context, output_backprop, batch, width, height,
depth, channel, output);
} else {
ComputeWithCustomKernel(context, output_backprop, batch, width, height,
depth, channel, output);
}
}
private:
TensorFormat data_format_;
};
#define REGISTER_GPU_KERNEL(type) \
REGISTER_KERNEL_BUILDER( \
Name("BiasAddGrad").Device(DEVICE_GPU).TypeConstraint<type>("T"), \
BiasGradOp<GPUDevice, type>);
TF_CALL_GPU_NUMBER_TYPES(REGISTER_GPU_KERNEL);
#undef REGISTER_GPU_KERNEL
#endif
} | #include "tensorflow/core/kernels/bias_op.h"
#include <random>
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
static Graph* BiasAdd(int d0, int d1, int d2, int d3) {
auto* g = new Graph(OpRegistry::Global());
Tensor input(DT_FLOAT, TensorShape({d0, d1, d2, d3}));
Tensor bias(DT_FLOAT, TensorShape({d3}));
input.flat<float>().setRandom();
bias.flat<float>().setRandom();
test::graph::Binary(g, "BiasAdd", test::graph::Constant(g, input),
test::graph::Constant(g, bias));
return g;
}
static Graph* BiasAddGrad(int d0, int d1, int d2, int d3) {
auto* g = new Graph(OpRegistry::Global());
Tensor out_backprop(DT_FLOAT, TensorShape({d0, d1, d2, d3}));
out_backprop.flat<float>().setRandom();
test::graph::Unary(g, "BiasAddGrad", test::graph::Constant(g, out_backprop));
return g;
}
#define BM_BiasAddNHWC(N, W, H, C, DEVICE) \
static void BM_BiasAddNHWC##_##N##_##H##_##W##_##C##_##DEVICE( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, BiasAdd(N, H, W, C), false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * N * H * \
W * C); \
} \
BENCHMARK(BM_BiasAddNHWC##_##N##_##H##_##W##_##C##_##DEVICE)->UseRealTime();
#define BM_BiasAddGradNHWC(N, W, H, C, DEVICE) \
static void BM_BiasAddGradNHWC##_##N##_##H##_##W##_##C##_##DEVICE( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, BiasAddGrad(N, H, W, C), \
false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * N * H * \
W * C); \
} \
BENCHMARK(BM_BiasAddGradNHWC##_##N##_##H##_##W##_##C##_##DEVICE) \
->UseRealTime();
BM_BiasAddNHWC(32, 32, 32, 128, cpu);
BM_BiasAddNHWC(32, 32, 32, 256, cpu);
BM_BiasAddNHWC(32, 32, 32, 512, cpu);
BM_BiasAddNHWC(32, 32, 32, 1024, cpu);
BM_BiasAddNHWC(32, 64, 64, 128, cpu);
BM_BiasAddNHWC(32, 64, 64, 256, cpu);
BM_BiasAddNHWC(32, 64, 64, 512, cpu);
BM_BiasAddNHWC(32, 64, 64, 1024, cpu);
BM_BiasAddGradNHWC(32, 32, 32, 128, cpu);
BM_BiasAddGradNHWC(32, 32, 32, 256, cpu);
BM_BiasAddGradNHWC(32, 32, 32, 512, cpu);
BM_BiasAddGradNHWC(32, 32, 32, 1024, cpu);
BM_BiasAddGradNHWC(32, 64, 64, 128, cpu);
BM_BiasAddGradNHWC(32, 64, 64, 256, cpu);
BM_BiasAddGradNHWC(32, 64, 64, 512, cpu);
BM_BiasAddGradNHWC(32, 64, 64, 1024, cpu);
#ifdef GOOGLE_CUDA
BM_BiasAddGradNHWC(32, 32, 32, 128, gpu);
BM_BiasAddGradNHWC(32, 32, 32, 256, gpu);
BM_BiasAddGradNHWC(32, 32, 32, 512, gpu);
BM_BiasAddGradNHWC(32, 32, 32, 1024, gpu);
BM_BiasAddGradNHWC(32, 64, 64, 128, gpu);
BM_BiasAddGradNHWC(32, 64, 64, 256, gpu);
BM_BiasAddGradNHWC(32, 64, 64, 512, gpu);
BM_BiasAddGradNHWC(32, 64, 64, 1024, gpu);
#endif
} |
1,493 | cpp | tensorflow/tensorflow | random_binomial_op | tensorflow/core/kernels/random_binomial_op.cc | tensorflow/core/kernels/random_binomial_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_RANDOM_BINOMIAL_OP_H_
#define TENSORFLOW_CORE_KERNELS_RANDOM_BINOMIAL_OP_H_
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/lib/random/random_distributions.h"
namespace tensorflow {
class OpKernelContext;
namespace functor {
template <typename Device, typename T, typename U>
struct RandomBinomialFunctor {
void operator()(OpKernelContext* ctx, const Device& d, int64_t num_batches,
int64_t samples_per_batch, int64_t num_elements,
typename TTypes<T>::ConstFlat counts,
typename TTypes<T>::ConstFlat probs,
const random::PhiloxRandom& gen,
typename TTypes<U>::Flat output);
};
}
}
#endif
#define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/random_binomial_op.h"
#include <algorithm>
#include <cmath>
#include <memory>
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/rng_alg.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/kernels/random_ops_util.h"
#include "tensorflow/core/kernels/stateful_random_ops_cpu_gpu.h"
#include "tensorflow/core/kernels/stateless_random_ops.h"
#include "tensorflow/core/kernels/training_op_helpers.h"
#include "tensorflow/core/lib/core/refcount.h"
#include "tensorflow/core/lib/random/random_distributions.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/bcast.h"
#include "tensorflow/core/util/guarded_philox_random.h"
#include "tensorflow/core/util/work_sharder.h"
#define UNIFORM(X) \
if (uniform_remaining == 0) { \
uniform_remaining = Uniform::kResultElementCount; \
uniform_result = uniform(gen); \
} \
uniform_remaining--; \
double X = uniform_result[uniform_remaining]
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
namespace {
typedef random::UniformDistribution<random::PhiloxRandom, double> Uniform;
double binomial_inversion(double count, double prob,
random::PhiloxRandom* gen) {
using Eigen::numext::ceil;
using Eigen::numext::log;
using Eigen::numext::log1p;
double geom_sum = 0;
int num_geom = 0;
Uniform uniform;
typename Uniform::ResultType uniform_result;
int16_t uniform_remaining = 0;
while (true) {
UNIFORM(u);
double geom = ceil(log(u) / log1p(-prob));
geom_sum += geom;
if (geom_sum > count) {
break;
}
++num_geom;
}
return num_geom;
}
inline double stirling_approx_tail(double k) {
static double kTailValues[] = {0.0810614667953272, 0.0413406959554092,
0.0276779256849983, 0.02079067210376509,
0.0166446911898211, 0.0138761288230707,
0.0118967099458917, 0.0104112652619720,
0.00925546218271273, 0.00833056343336287};
if (k <= 9) {
return kTailValues[static_cast<int>(k)];
}
double kp1sq = (k + 1) * (k + 1);
return (1.0 / 12 - (1.0 / 360 - 1.0 / 1260 / kp1sq) / kp1sq) / (k + 1);
}
inline double btrs(double count, double prob, random::PhiloxRandom* gen) {
using Eigen::numext::abs;
using Eigen::numext::floor;
using Eigen::numext::log;
using Eigen::numext::log1p;
using Eigen::numext::sqrt;
const double stddev = sqrt(count * prob * (1 - prob));
const double b = 1.15 + 2.53 * stddev;
const double a = -0.0873 + 0.0248 * b + 0.01 * prob;
const double c = count * prob + 0.5;
const double v_r = 0.92 - 4.2 / b;
const double r = prob / (1 - prob);
const double alpha = (2.83 + 5.1 / b) * stddev;
const double m = floor((count + 1) * prob);
Uniform uniform;
typename Uniform::ResultType uniform_result;
int16_t uniform_remaining = 0;
while (true) {
UNIFORM(u);
UNIFORM(v);
u = u - 0.5;
double us = 0.5 - abs(u);
double k = floor((2 * a / us + b) * u + c);
if (us >= 0.07 && v <= v_r) {
return k;
}
if (k < 0 || k > count) {
continue;
}
v = log(v * alpha / (a / (us * us) + b));
double upperbound =
((m + 0.5) * log((m + 1) / (r * (count - m + 1))) +
(count + 1) * log((count - m + 1) / (count - k + 1)) +
(k + 0.5) * log(r * (count - k + 1) / (k + 1)) +
stirling_approx_tail(m) + stirling_approx_tail(count - m) -
stirling_approx_tail(k) - stirling_approx_tail(count - k));
if (v <= upperbound) {
return k;
}
}
}
}
namespace functor {
template <typename T, typename U>
struct RandomBinomialFunctor<CPUDevice, T, U> {
void operator()(OpKernelContext* ctx, const CPUDevice& d, int64_t num_batches,
int64_t samples_per_batch, int64_t num_elements,
const BCast& bcast, typename TTypes<T>::ConstFlat counts,
typename TTypes<T>::ConstFlat probs,
const random::PhiloxRandom& gen,
typename TTypes<U>::Flat output) {
auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads());
auto DoWork = [num_batches, samples_per_batch, &bcast, &counts, &probs,
&gen, &output](int64_t start_output, int64_t limit_output) {
const bool should_bcast = bcast.IsBroadcastingRequired();
const auto& counts_batch_indices = bcast.x_batch_indices();
const auto& probs_batch_indices = bcast.y_batch_indices();
auto output_flat = output.data();
for (int64_t output_idx = start_output; output_idx < limit_output;
) {
int64_t batch_idx = output_idx / samples_per_batch;
U* const output_batch_offset = output_flat + batch_idx;
T count, prob;
if (should_bcast) {
count = counts(counts_batch_indices[batch_idx]);
prob = probs(probs_batch_indices[batch_idx]);
} else {
count = counts(batch_idx);
prob = probs(batch_idx);
}
double dcount = static_cast<double>(count);
if (dcount <= 0.0 || prob <= T(0.0)) {
for (int64_t sample_idx = output_idx % samples_per_batch;
sample_idx < samples_per_batch && output_idx < limit_output;
++sample_idx, ++output_idx) {
output_batch_offset[sample_idx * num_batches] = static_cast<U>(0.0);
}
} else if (prob >= T(1.0)) {
for (int64_t sample_idx = output_idx % samples_per_batch;
sample_idx < samples_per_batch && output_idx < limit_output;
++sample_idx, ++output_idx) {
output_batch_offset[sample_idx * num_batches] =
static_cast<U>(dcount);
}
} else if (prob <= T(0.5)) {
double dp = static_cast<double>(prob);
if (count * prob >= T(10)) {
for (int64_t sample_idx = output_idx % samples_per_batch;
sample_idx < samples_per_batch && output_idx < limit_output;
++sample_idx, ++output_idx) {
random::PhiloxRandom gen_copy = gen;
gen_copy.Skip(256 * output_idx);
output_batch_offset[sample_idx * num_batches] =
static_cast<U>(btrs(dcount, dp, &gen_copy));
}
} else {
for (int64_t sample_idx = output_idx % samples_per_batch;
sample_idx < samples_per_batch && output_idx < limit_output;
++sample_idx, ++output_idx) {
random::PhiloxRandom gen_copy = gen;
gen_copy.Skip(42 * output_idx);
output_batch_offset[sample_idx * num_batches] =
static_cast<U>(binomial_inversion(dcount, dp, &gen_copy));
}
}
} else if (prob > T(0.5)) {
T q = T(1) - prob;
double dq = static_cast<double>(q);
if (count * q >= T(10)) {
for (int64_t sample_idx = output_idx % samples_per_batch;
sample_idx < samples_per_batch && output_idx < limit_output;
++sample_idx, ++output_idx) {
random::PhiloxRandom gen_copy = gen;
gen_copy.Skip(256 * output_idx);
output_batch_offset[sample_idx * num_batches] =
static_cast<U>(dcount - btrs(dcount, dq, &gen_copy));
}
} else {
for (int64_t sample_idx = output_idx % samples_per_batch;
sample_idx < samples_per_batch && output_idx < limit_output;
++sample_idx, ++output_idx) {
random::PhiloxRandom gen_copy = gen;
gen_copy.Skip(42 * output_idx);
output_batch_offset[sample_idx * num_batches] = static_cast<U>(
dcount - binomial_inversion(dcount, dq, &gen_copy));
}
}
} else {
for (int64_t sample_idx = output_idx % samples_per_batch;
sample_idx < samples_per_batch && output_idx < limit_output;
++sample_idx, ++output_idx) {
output_batch_offset[sample_idx * num_batches] = static_cast<U>(NAN);
}
}
}
};
static const int kElementCost = 329 + 6 * Uniform::kElementCost +
6 * random::PhiloxRandom::kElementCost;
Shard(worker_threads.num_threads, worker_threads.workers, num_elements,
kElementCost, DoWork);
}
};
}
namespace {
template <typename Device, typename T, typename U>
class RandomBinomialOp : public OpKernel {
static constexpr int32_t kDesiredBatchSize = 100;
public:
explicit RandomBinomialOp(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* ctx) override {
const Tensor& alg_tensor = ctx->input(1);
const Tensor& shape_tensor = ctx->input(2);
const Tensor& counts_tensor = ctx->input(3);
const Tensor& probs_tensor = ctx->input(4);
tensorflow::BCast bcast(counts_tensor.shape().dim_sizes(),
probs_tensor.shape().dim_sizes(),
false,
true);
OP_REQUIRES(ctx, bcast.IsValid(),
errors::InvalidArgument(
"counts and probs must have compatible batch dimensions: ",
counts_tensor.shape().DebugString(), " vs. ",
probs_tensor.shape().DebugString()));
OP_REQUIRES(
ctx, TensorShapeUtils::IsVector(shape_tensor.shape()),
errors::InvalidArgument("Input shape should be a vector, got shape: ",
shape_tensor.shape().DebugString()));
OP_REQUIRES(ctx,
(shape_tensor.dtype() == DataType::DT_INT32 ||
shape_tensor.dtype() == DataType::DT_INT64),
errors::InvalidArgument(
"Input shape should have dtype {int32, int64}."));
TensorShape bcast_shape = BCast::ToShape(bcast.output_shape());
TensorShape output_shape;
if (shape_tensor.dtype() == DataType::DT_INT32) {
OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape(shape_tensor.vec<int32>(),
&output_shape));
} else {
OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape(
shape_tensor.vec<int64_t>(), &output_shape));
}
OP_REQUIRES(ctx, TensorShapeUtils::EndsWith(output_shape, bcast_shape),
errors::InvalidArgument(
"Shape passed in must end with broadcasted shape."));
OP_REQUIRES(ctx, alg_tensor.dims() == 0,
errors::InvalidArgument("algorithm must be of shape [], not ",
alg_tensor.shape().DebugString()));
Algorithm alg = Algorithm(alg_tensor.flat<int64_t>()(0));
int64_t samples_per_batch = 1;
const int64_t num_sample_dims =
(shape_tensor.dim_size(0) - bcast.output_shape().size());
for (int64_t i = 0; i < num_sample_dims; ++i) {
samples_per_batch *= shape_tensor.flat<int32>()(i);
}
int64_t num_batches = 1;
for (int64_t i = num_sample_dims; i < shape_tensor.dim_size(0); ++i) {
num_batches *= shape_tensor.flat<int32>()(i);
}
const int64_t num_elements = num_batches * samples_per_batch;
Tensor* samples_tensor;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, output_shape, &samples_tensor));
core::RefCountPtr<Var> var;
OP_REQUIRES_OK(ctx, LookupResource(ctx, HandleFromInput(ctx, 0), &var));
Tensor* var_tensor = var->tensor();
OP_REQUIRES(
ctx, var_tensor->dtype() == STATE_ELEMENT_DTYPE,
errors::InvalidArgument("dtype of RNG state variable must be ",
DataTypeString(STATE_ELEMENT_DTYPE), ", not ",
DataTypeString(var_tensor->dtype())));
OP_REQUIRES(ctx, var_tensor->dims() == 1,
errors::InvalidArgument(
"RNG state must have one and only one dimension, not ",
var_tensor->dims()));
auto var_tensor_flat = var_tensor->flat<StateElementType>();
OP_REQUIRES(ctx, alg == RNG_ALG_PHILOX,
errors::InvalidArgument("Unsupported algorithm id: ", alg));
static_assert(std::is_same<StateElementType, int64_t>::value,
"StateElementType must be int64");
static_assert(std::is_same<PhiloxRandom::ResultElementType, uint32>::value,
"PhiloxRandom::ResultElementType must be uint32");
OP_REQUIRES(ctx, var_tensor_flat.size() >= PHILOX_MIN_STATE_SIZE,
errors::InvalidArgument(
"For Philox algorithm, the size of state must be at least ",
PHILOX_MIN_STATE_SIZE, "; got ", var_tensor_flat.size()));
OP_REQUIRES_OK(ctx, PrepareToUpdateVariable<Device, StateElementType>(
ctx, var_tensor, var->copy_on_read_mode.load()));
auto var_data = var_tensor_flat.data();
auto philox = GetPhiloxRandomFromMem(var_data);
UpdateMemWithPhiloxRandom(
philox, num_batches * 2 * 100 * (samples_per_batch + 3) / 4, var_data);
auto binomial_functor = functor::RandomBinomialFunctor<Device, T, U>();
binomial_functor(ctx, ctx->eigen_device<Device>(), num_batches,
samples_per_batch, num_elements, bcast,
counts_tensor.flat<T>(), probs_tensor.flat<T>(), philox,
samples_tensor->flat<U>());
}
private:
RandomBinomialOp(const RandomBinomialOp&) = delete;
void operator=(const RandomBinomialOp&) = delete;
};
template <typename Device, typename T, typename U>
class StatelessRandomBinomialOp : public OpKernel {
static constexpr int32_t kDesiredBatchSize = 100;
public:
explicit StatelessRandomBinomialOp(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* ctx) override {
const Tensor& shape_tensor = ctx->input(0);
const Tensor& seed_tensor = ctx->input(1);
const Tensor& counts_tensor = ctx->input(2);
const Tensor& probs_tensor = ctx->input(3);
OP_REQUIRES(ctx, seed_tensor.dims() == 1 && seed_tensor.dim_size(0) == 2,
errors::InvalidArgument("seed must have shape [2], not ",
seed_tensor.shape().DebugString()));
tensorflow::BCast bcast(counts_tensor.shape().dim_sizes(),
probs_tensor.shape().dim_sizes(),
false,
true);
OP_REQUIRES(ctx, bcast.IsValid(),
errors::InvalidArgument(
"counts and probs must have compatible batch dimensions: ",
counts_tensor.shape().DebugString(), " vs. ",
probs_tensor.shape().DebugString()));
OP_REQUIRES(
ctx, TensorShapeUtils::IsVector(shape_tensor.shape()),
errors::InvalidArgument("Input shape should be a vector, got shape: ",
shape_tensor.shape().DebugString()));
OP_REQUIRES(ctx,
(shape_tensor.dtype() == DataType::DT_INT32 ||
shape_tensor.dtype() == DataType::DT_INT64),
errors::InvalidArgument(
"Input shape should have dtype {int32, int64}."));
TensorShape bcast_shape = BCast::ToShape(bcast.output_shape());
TensorShape output_shape;
if (shape_tensor.dtype() == DataType::DT_INT32) {
OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape(shape_tensor.vec<int32>(),
&output_shape));
} else {
OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape(
shape_tensor.vec<int64_t>(), &output_shape));
}
OP_REQUIRES(ctx, TensorShapeUtils::EndsWith(output_shape, bcast_shape),
errors::InvalidArgument(
"Shape passed in must end with broadcasted shape."));
int64_t samples_per_batch = 1;
const int64_t num_sample_dims =
(shape_tensor.dim_size(0) - bcast.output_shape().size());
for (int64_t i = 0; i < num_sample_dims; ++i) {
samples_per_batch *= shape_tensor.dtype() == DataType::DT_INT32
? shape_tensor.flat<int32>()(i)
: shape_tensor.flat<int64>()(i);
}
int64_t num_batches = 1;
for (int64_t i = num_sample_dims; i < shape_tensor.dim_size(0); ++i) {
num_batches *= shape_tensor.dtype() == DataType::DT_INT32
? shape_tensor.flat<int32>()(i)
: shape_tensor.flat<int64>()(i);
}
const int64_t num_elements = num_batches * samples_per_batch;
Tensor* samples_tensor;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, output_shape, &samples_tensor));
if (output_shape.num_elements() == 0) return;
random::PhiloxRandom::Key key;
random::PhiloxRandom::ResultType counter;
OP_REQUIRES_OK(ctx, GenerateKey(seed_tensor, &key, &counter));
auto philox = random::PhiloxRandom(counter, key);
auto binomial_functor = functor::RandomBinomialFunctor<Device, T, U>();
binomial_functor(ctx, ctx->eigen_device<Device>(), num_batches,
samples_per_batch, num_elements, bcast,
counts_tensor.flat<T>(), probs_tensor.flat<T>(), philox,
samples_tensor->flat<U>());
}
private:
StatelessRandomBinomialOp(const StatelessRandomBinomialOp&) = delete;
void operator=(const StatelessRandomBinomialOp&) = delete;
};
}
#define REGISTER(RTYPE, TYPE) \
REGISTER_KERNEL_BUILDER(Name("StatefulRandomBinomial") \
.Device(DEVICE_CPU) \
.HostMemory("resource") \
.HostMemory("algorithm") \
.HostMemory("shape") \
.HostMemory("counts") \
.HostMemory("probs") \
.TypeConstraint<RTYPE>("dtype") \
.TypeConstraint<TYPE>("T"), \
RandomBinomialOp<CPUDevice, TYPE, RTYPE>); \
REGISTER_KERNEL_BUILDER(Name("StatelessRandomBinomial") \
.Device(DEVICE_CPU) \
.HostMemory("shape") \
.HostMemory("seed") \
.HostMemory("counts") \
.HostMemory("probs") \
.TypeConstraint<RTYPE>("dtype") \
.TypeConstraint<TYPE>("T"), \
StatelessRandomBinomialOp<CPUDevice, TYPE, RTYPE>)
#define REGISTER_ALL(RTYPE) \
REGISTER(RTYPE, Eigen::half); \
REGISTER(RTYPE, float); \
REGISTER(RTYPE, double);
REGISTER_ALL(Eigen::half);
REGISTER_ALL(float);
REGISTER_ALL(double);
REGISTER_ALL(int32);
REGISTER_ALL(int64_t);
#undef REGISTER
#undef REGISTER_ALL
} | #include <functional>
#include <memory>
#include <vector>
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
static Graph* RandomBinomialGraph(double count, double prob, int num_batches,
int samples_per_batch) {
Graph* g = new Graph(OpRegistry::Global());
Tensor shape_t(DT_INT32, TensorShape({2}));
shape_t.flat<int32>().setValues({num_batches, samples_per_batch});
Tensor counts_t(DT_FLOAT, TensorShape({num_batches}));
counts_t.flat<float>().setConstant(count);
Tensor probs_t(DT_FLOAT, TensorShape({num_batches}));
probs_t.flat<float>().setConstant(prob);
Node* ret;
TF_CHECK_OK(NodeBuilder(g->NewName("randombinomial"), "RandomBinomial")
.Input(test::graph::Constant(g, shape_t))
.Input(test::graph::Constant(g, counts_t))
.Input(test::graph::Constant(g, probs_t))
.Attr("dtype", DT_FLOAT)
.Finalize(g, &ret));
return g;
}
static Graph* RandomBinomialInv(int num_batches, int samples_per_batch) {
return RandomBinomialGraph(10., 0.3, num_batches, samples_per_batch);
}
static Graph* RandomBinomialRej(int num_batches, int samples_per_batch) {
return RandomBinomialGraph(100., 0.3, num_batches, samples_per_batch);
}
static Graph* RandomBinomialInvComplement(int num_batches,
int samples_per_batch) {
return RandomBinomialGraph(10., 0.8, num_batches, samples_per_batch);
}
static Graph* RandomBinomialRejComplement(int num_batches,
int samples_per_batch) {
return RandomBinomialGraph(100., 0.2, num_batches, samples_per_batch);
}
#define BM_RandomBinomialInv(DEVICE, B, S) \
static void BM_RandomBinomialInv_##DEVICE##_##B##_##S( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, RandomBinomialInv(B, S), \
false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(B) * S * state.iterations()); \
} \
BENCHMARK(BM_RandomBinomialInv_##DEVICE##_##B##_##S);
#define BM_RandomBinomialRej(DEVICE, B, S) \
static void BM_RandomBinomialRej_##DEVICE##_##B##_##S( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, RandomBinomialRej(B, S), \
false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(B) * S * state.iterations()); \
} \
BENCHMARK(BM_RandomBinomialRej_##DEVICE##_##B##_##S);
#define BM_RandomBinomialInvComplement(DEVICE, B, S) \
static void BM_RandomBinomialInvComplement_##DEVICE##_##B##_##S( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, RandomBinomialInvComplement(B, S), \
false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(B) * S * state.iterations()); \
} \
BENCHMARK(BM_RandomBinomialInvComplement_##DEVICE##_##B##_##S);
#define BM_RandomBinomialRejComplement(DEVICE, B, S) \
static void BM_RandomBinomialRejComplement_##DEVICE##_##B##_##S( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, RandomBinomialRejComplement(B, S), \
false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(B) * S * state.iterations()); \
} \
BENCHMARK(BM_RandomBinomialRejComplement_##DEVICE##_##B##_##S);
BM_RandomBinomialInv(cpu, 1000, 1000);
BM_RandomBinomialRej(cpu, 1000, 1000);
BM_RandomBinomialInvComplement(cpu, 1000, 1000);
BM_RandomBinomialRejComplement(cpu, 1000, 1000);
BM_RandomBinomialInv(gpu, 1000, 1000);
BM_RandomBinomialRej(gpu, 1000, 1000);
BM_RandomBinomialInvComplement(gpu, 1000, 1000);
BM_RandomBinomialRejComplement(gpu, 1000, 1000);
} |
1,494 | cpp | tensorflow/tensorflow | immutable_constant_op | tensorflow/core/kernels/immutable_constant_op.cc | tensorflow/core/kernels/immutable_constant_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_IMMUTABLE_CONSTANT_OP_H_
#define TENSORFLOW_CORE_KERNELS_IMMUTABLE_CONSTANT_OP_H_
#include <memory>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/macros.h"
namespace tensorflow {
class ImmutableConstantOp : public OpKernel {
public:
explicit ImmutableConstantOp(OpKernelConstruction* context);
void Compute(OpKernelContext* ctx) override;
bool IsExpensive() override { return false; }
~ImmutableConstantOp() override;
static constexpr char const* kDTypeAttr = "dtype";
static constexpr char const* kShapeAttr = "shape";
static constexpr char const* kMemoryRegionNameAttr = "memory_region_name";
private:
string region_name_;
DataType dtype_;
TensorShape shape_;
ImmutableConstantOp(const ImmutableConstantOp&) = delete;
void operator=(const ImmutableConstantOp&) = delete;
};
}
#endif
#include "tensorflow/core/kernels/immutable_constant_op.h"
#include <unordered_set>
#include "tensorflow/core/framework/types.pb.h"
namespace tensorflow {
namespace {
class MemmappedTensorAllocator : public Allocator {
public:
MemmappedTensorAllocator() {}
Status InitializeFromRegion(const string& name, Env* env) {
const auto status =
env->NewReadOnlyMemoryRegionFromFile(name, &memory_region_);
if (!status.ok()) {
return status;
}
return absl::OkStatus();
}
string Name() override { return "MemmappedTensorAllocator"; }
void* AllocateRaw(size_t alignment, size_t num_bytes) override {
if ((reinterpret_cast<intptr_t>(memory_region_->data())) % alignment != 0) {
allocation_status_ =
errors::Internal("Readonly memory region has wrong alignment");
return nullptr;
}
if (num_bytes > memory_region_->length()) {
allocation_status_ = errors::Internal(
"Readonly memory region has wrong length (", memory_region_->length(),
") when allocating ", num_bytes);
return nullptr;
}
return const_cast<void*>(memory_region_->data());
}
void DeallocateRaw(void* ptr) override {
if (ptr != memory_region_->data()) {
LOG(ERROR)
<< "Deallocating not allocated region for readonly memory region";
}
if (delete_on_deallocate_) {
delete this;
}
}
const Status& allocation_status() const { return allocation_status_; }
void set_delete_on_deallocate() { delete_on_deallocate_ = true; }
bool AllocatesOpaqueHandle() const override { return true; }
private:
std::unique_ptr<ReadOnlyMemoryRegion> memory_region_;
Status allocation_status_;
bool delete_on_deallocate_ = false;
MemmappedTensorAllocator(const MemmappedTensorAllocator&) = delete;
void operator=(const MemmappedTensorAllocator&) = delete;
};
}
ImmutableConstantOp::ImmutableConstantOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context,
context->GetAttr(kMemoryRegionNameAttr, ®ion_name_));
OP_REQUIRES_OK(context, context->GetAttr(kDTypeAttr, &dtype_));
OP_REQUIRES(context, dtype_ != DT_RESOURCE && dtype_ != DT_VARIANT,
errors::InvalidArgument(
"Resource and variant dtypes are invalid for this op."));
OP_REQUIRES_OK(context, context->GetAttr(kShapeAttr, &shape_));
}
void ImmutableConstantOp::Compute(OpKernelContext* ctx) {
std::unique_ptr<MemmappedTensorAllocator> allocator(
new MemmappedTensorAllocator());
OP_REQUIRES_OK(ctx,
allocator->InitializeFromRegion(region_name_, ctx->env()));
OP_REQUIRES(ctx, dtype_ != DT_STRING,
errors::Unimplemented("Sorry, DT_STRING is not currently "
"supported for ImmutableConstOp."));
ctx->set_output(0, Tensor(allocator.get(), dtype_, shape_));
OP_REQUIRES_OK(ctx, allocator->allocation_status());
allocator.release()->set_delete_on_deallocate();
}
ImmutableConstantOp::~ImmutableConstantOp() {}
constexpr char const* ImmutableConstantOp::kDTypeAttr;
constexpr char const* ImmutableConstantOp::kShapeAttr;
constexpr char const* ImmutableConstantOp::kMemoryRegionNameAttr;
REGISTER_KERNEL_BUILDER(Name("ImmutableConst").Device(DEVICE_CPU),
ImmutableConstantOp);
} | #include "tensorflow/core/kernels/immutable_constant_op.h"
#include <algorithm>
#include <tuple>
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/null_file_system.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
namespace tensorflow {
namespace {
constexpr size_t kTestAlignment = 4096;
constexpr size_t kTestTensorSize = 4;
constexpr size_t kTestTensorSizeBytes = kTestTensorSize * sizeof(float);
class TestReadOnlyMemoryRegion : public ReadOnlyMemoryRegion {
public:
TestReadOnlyMemoryRegion() = delete;
explicit TestReadOnlyMemoryRegion(uint64 length)
: memptr_(cpu_allocator()->AllocateRaw(kTestAlignment, length)),
length_(length) {}
~TestReadOnlyMemoryRegion() override {
cpu_allocator()->DeallocateRaw(memptr_);
}
const void* data() override { return memptr_; }
float* GetWritableDataStart() { return reinterpret_cast<float*>(memptr_); }
uint64 length() override { return length_; }
protected:
void* memptr_;
uint64 length_;
};
class TestFileSystem : public NullFileSystem {
public:
~TestFileSystem() override = default;
using NullFileSystem::NewReadOnlyMemoryRegionFromFile;
Status NewReadOnlyMemoryRegionFromFile(
const string& fname, TransactionToken* token,
std::unique_ptr<ReadOnlyMemoryRegion>* result) override {
float val = 0;
StringPiece scheme, host, path;
io::ParseURI(fname, &scheme, &host, &path);
if (path == "/2") {
val = 2.0f;
} else if (path == "/3") {
val = 3.0f;
} else {
val = 0.0f;
}
auto region = new TestReadOnlyMemoryRegion(kTestTensorSizeBytes);
std::fill_n(region->GetWritableDataStart(), kTestTensorSize, val);
result->reset(region);
return absl::OkStatus();
}
};
REGISTER_FILE_SYSTEM("test", TestFileSystem);
struct ImmutableConstantOpTest {};
TEST(ImmutableConstantOpTest, Simple) {
const TensorShape kTestTensorShape({4, 1});
const TensorShape kTestTensorShapeT({1, 4});
auto root = Scope::NewRootScope().ExitOnError();
auto node1 =
ops::ImmutableConst(root, DT_FLOAT, kTestTensorShape, "test:
auto node2 =
ops::ImmutableConst(root, DT_FLOAT, kTestTensorShapeT, "test:
auto result = ops::MatMul(root, node1, node2);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
SessionOptions session_options;
session_options.env = Env::Default();
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_opt_level(OptimizerOptions::L0);
std::unique_ptr<Session> session(NewSession(session_options));
ASSERT_TRUE(session != nullptr) << "Failed to create session";
TF_ASSERT_OK(session->Create(graph_def)) << "Can't create test graph";
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->Run({}, {result.node()->name() + ":0"}, {}, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_EQ(outputs.front().flat<float>()(0), 2.0f * 3.0f);
EXPECT_EQ(outputs.front().flat<float>()(1), 2.0f * 3.0f);
EXPECT_EQ(outputs.front().flat<float>()(2), 2.0f * 3.0f);
EXPECT_EQ(outputs.front().flat<float>()(kTestTensorSize - 1), 2.0f * 3.0f);
}
TEST(ImmutableConstantOpTest, ExecutionError) {
const TensorShape kBadTensorShape({40, 100});
const TensorShape kTestTensorShapeT({1, 4});
auto root = Scope::DisabledShapeInferenceScope().ExitOnError();
auto node1 =
ops::ImmutableConst(root, DT_FLOAT, kBadTensorShape, "test:
auto node2 =
ops::ImmutableConst(root, DT_FLOAT, kTestTensorShapeT, "test:
auto result = ops::MatMul(root, node1, node2);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
SessionOptions session_options;
session_options.env = Env::Default();
std::unique_ptr<Session> session(NewSession(session_options));
ASSERT_TRUE(session != nullptr) << "Failed to create session";
TF_ASSERT_OK(session->Create(graph_def)) << "Can't create test graph";
std::vector<Tensor> outputs;
EXPECT_EQ(
session->Run({}, {result.node()->name() + ":0"}, {}, &outputs).code(),
error::INTERNAL);
}
Status CreateTempFileFloat(Env* env, float value, uint64 size,
string* filename) {
const string dir = testing::TmpDir();
*filename = io::JoinPath(dir, strings::StrCat("file_", value));
std::unique_ptr<WritableFile> file;
TF_RETURN_IF_ERROR(env->NewWritableFile(*filename, &file));
for (uint64 i = 0; i < size; ++i) {
StringPiece sp(static_cast<char*>(static_cast<void*>(&value)),
sizeof(value));
TF_RETURN_IF_ERROR(file->Append(sp));
}
TF_RETURN_IF_ERROR(file->Close());
return absl::OkStatus();
}
TEST(ImmutableConstantOpTest, FromFile) {
const TensorShape kFileTensorShape({1000, 1});
Env* env = Env::Default();
auto root = Scope::NewRootScope().ExitOnError();
string two_file, three_file;
TF_ASSERT_OK(CreateTempFileFloat(env, 2.0f, 1000, &two_file));
TF_ASSERT_OK(CreateTempFileFloat(env, 3.0f, 1000, &three_file));
auto node1 = ops::ImmutableConst(root, DT_FLOAT, kFileTensorShape, two_file);
auto node2 =
ops::ImmutableConst(root, DT_FLOAT, kFileTensorShape, three_file);
auto result = ops::MatMul(root, node1, node2, ops::MatMul::TransposeB(true));
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
SessionOptions session_options;
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_opt_level(OptimizerOptions::L0);
std::unique_ptr<Session> session(NewSession(session_options));
ASSERT_TRUE(session != nullptr) << "Failed to create session";
TF_ASSERT_OK(session->Create(graph_def)) << "Can't create test graph";
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->Run({}, {result.node()->name() + ":0"}, {}, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_EQ(outputs.front().flat<float>()(0), 2.0f * 3.0f);
EXPECT_EQ(outputs.front().flat<float>()(1), 2.0f * 3.0f);
EXPECT_EQ(outputs.front().flat<float>()(2), 2.0f * 3.0f);
}
Status CreateTempFileBadString(Env* env, char value, uint64 size,
const string suffix, string* filename) {
const string dir = testing::TmpDir();
*filename = io::JoinPath(dir, strings::StrCat("file_", suffix));
std::unique_ptr<WritableFile> file;
TF_RETURN_IF_ERROR(env->NewWritableFile(*filename, &file));
TF_RETURN_IF_ERROR(file->Append(std::string(size, value)));
TF_RETURN_IF_ERROR(file->Close());
return absl::OkStatus();
}
TEST(ImmutableConstantOpTest, FromFileStringUnimplmented) {
const TensorShape kFileTensorShape({1});
Env* env = Env::Default();
auto root = Scope::NewRootScope().ExitOnError();
string bad_file;
TF_ASSERT_OK(CreateTempFileBadString(env, '\xe2', 128, "bad_e2", &bad_file));
auto result =
ops::ImmutableConst(root, DT_STRING, kFileTensorShape, bad_file);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
SessionOptions session_options;
session_options.env = Env::Default();
std::unique_ptr<Session> session(NewSession(session_options));
ASSERT_TRUE(session != nullptr) << "Failed to create session";
TF_ASSERT_OK(session->Create(graph_def)) << "Can't create test graph";
std::vector<Tensor> outputs;
EXPECT_EQ(
session->Run({}, {result.node()->name() + ":0"}, {}, &outputs).code(),
error::UNIMPLEMENTED);
}
}
} |
1,495 | cpp | tensorflow/tensorflow | sparse_tensor_dense_matmul_op | tensorflow/core/kernels/sparse_tensor_dense_matmul_op.cc | tensorflow/core/kernels/sparse_tensor_dense_matmul_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_SPARSE_TENSOR_DENSE_MATMUL_OP_H_
#define TENSORFLOW_CORE_KERNELS_SPARSE_TENSOR_DENSE_MATMUL_OP_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
namespace functor {
template <typename Device, typename T, typename Tindices, bool ADJ_A,
bool ADJ_B>
struct SparseTensorDenseMatMulFunctor {
static EIGEN_ALWAYS_INLINE Status Compute(
OpKernelContext* ctx, typename TTypes<T>::Matrix out,
typename TTypes<Tindices>::ConstMatrix a_indices,
typename TTypes<T>::ConstVec a_values, typename TTypes<T>::ConstMatrix b);
};
template <typename MATRIX, bool ADJ>
class MaybeAdjoint;
template <typename MATRIX>
class MaybeAdjoint<MATRIX, false> {
public:
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE MaybeAdjoint(MATRIX m) : m_(m) {}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename MATRIX::Scalar operator()(
const typename MATRIX::Index i, const typename MATRIX::Index j) const {
return m_(i, j);
}
private:
const MATRIX m_;
};
template <typename T>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T MaybeConj(T v) {
return Eigen::numext::conj(v);
}
template <typename MATRIX>
class MaybeAdjoint<MATRIX, true> {
public:
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE MaybeAdjoint(MATRIX m) : m_(m) {}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename MATRIX::Scalar operator()(
const typename MATRIX::Index i, const typename MATRIX::Index j) const {
return Eigen::numext::conj(m_(j, i));
}
private:
const MATRIX m_;
};
template <typename T>
struct SumType {
using type = T;
};
template <>
struct SumType<Eigen::half> {
using type = float;
};
}
}
#endif
#define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/sparse_tensor_dense_matmul_op.h"
#include "Eigen/Core"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/kernels/fill_functor.h"
#include "tensorflow/core/platform/bfloat16.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Device, typename T, typename Tindices>
class SparseTensorDenseMatMulOp : public OpKernel {
public:
explicit SparseTensorDenseMatMulOp(OpKernelConstruction* ctx)
: OpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("adjoint_a", &adjoint_a_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("adjoint_b", &adjoint_b_));
}
void Compute(OpKernelContext* ctx) override {
const Tensor* a_indices;
const Tensor* a_values;
const Tensor* a_shape;
const Tensor* b;
OP_REQUIRES_OK(ctx, ctx->input("a_indices", &a_indices));
OP_REQUIRES_OK(ctx, ctx->input("a_values", &a_values));
OP_REQUIRES_OK(ctx, ctx->input("a_shape", &a_shape));
OP_REQUIRES_OK(ctx, ctx->input("b", &b));
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(b->shape()),
errors::InvalidArgument("Tensor 'b' is not a matrix"));
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(a_shape->shape()),
errors::InvalidArgument("Tensor 'a_shape' is not a vector"));
OP_REQUIRES(
ctx, a_shape->NumElements() == 2,
errors::InvalidArgument("Tensor 'a_shape' must have 2 elements"));
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(a_values->shape()),
errors::InvalidArgument("Tensor 'a_values' is not a vector"));
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(a_indices->shape()),
errors::InvalidArgument("Tensor 'a_indices' is not a matrix"));
const int64_t nnz = a_indices->shape().dim_size(0);
OP_REQUIRES(ctx, nnz == a_values->NumElements(),
errors::InvalidArgument("Number of rows of a_indices does not "
"match number of entries in a_values"));
OP_REQUIRES(
ctx, a_indices->shape().dim_size(1) == a_shape->NumElements(),
errors::InvalidArgument("Number of columns of a_indices does not match "
"number of entries in a_shape"));
auto a_shape_t = a_shape->vec<int64_t>();
const int64_t outer_left = (adjoint_a_) ? a_shape_t(1) : a_shape_t(0);
const int64_t outer_right =
(adjoint_b_) ? b->shape().dim_size(0) : b->shape().dim_size(1);
const int64_t inner_left = (adjoint_a_) ? a_shape_t(0) : a_shape_t(1);
const int64_t inner_right =
(adjoint_b_) ? b->shape().dim_size(1) : b->shape().dim_size(0);
OP_REQUIRES(
ctx, inner_right == inner_left,
errors::InvalidArgument(
"Cannot multiply A and B because inner dimension does not match: ",
inner_left, " vs. ", inner_right,
". Did you forget a transpose? "
"Dimensions of A: [",
a_shape_t(0), ", ", a_shape_t(1),
"). Dimensions of B: ", b->shape().DebugString()));
if (std::is_same<Device, GPUDevice>::value) {
const int int32max = std::numeric_limits<int>::max();
OP_REQUIRES(
ctx,
(FastBoundsCheck(inner_left, int32max) &&
FastBoundsCheck(inner_right, int32max) &&
FastBoundsCheck(outer_left, int32max) &&
FastBoundsCheck(outer_right, int32max) &&
FastBoundsCheck(b->NumElements(), int32max) &&
FastBoundsCheck(outer_left * outer_right, int32max) &&
FastBoundsCheck(a_values->NumElements(), int32max)),
errors::InvalidArgument("Cannot use GPU for > 2^31 entry inputs"));
OP_REQUIRES(ctx, FastBoundsCheck(nnz * outer_right, int32max),
errors::InvalidArgument(
"Cannot use GPU when output.shape[1] * nnz(a) > 2^31"));
}
TensorShape out_shape({outer_left, outer_right});
Tensor* out = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, out_shape, &out));
if (out->NumElements() == 0) {
return;
}
if (a_values->NumElements() == 0 || b->NumElements() == 0) {
functor::SetZeroFunctor<Device, T> f;
f(ctx->eigen_device<Device>(), out->flat<T>());
return;
}
#define MAYBE_ADJOINT(ADJ_A, ADJ_B) \
if (adjoint_a_ == ADJ_A && adjoint_b_ == ADJ_B) { \
Status functor_status = functor::SparseTensorDenseMatMulFunctor< \
Device, T, Tindices, ADJ_A, \
ADJ_B>::Compute(ctx, out->matrix<T>(), a_indices->matrix<Tindices>(), \
a_values->vec<T>(), b->matrix<T>()); \
OP_REQUIRES_OK(ctx, functor_status); \
}
MAYBE_ADJOINT(false, false);
MAYBE_ADJOINT(false, true);
MAYBE_ADJOINT(true, false);
MAYBE_ADJOINT(true, true);
#undef MAYBE_ADJOINT
}
private:
bool adjoint_a_;
bool adjoint_b_;
};
#define REGISTER_CPU(TypeT, TypeIndex) \
REGISTER_KERNEL_BUILDER( \
Name("SparseTensorDenseMatMul") \
.Device(DEVICE_CPU) \
.TypeConstraint<TypeT>("T") \
.TypeConstraint<TypeIndex>("Tindices") \
.HostMemory("a_shape"), \
SparseTensorDenseMatMulOp<CPUDevice, TypeT, TypeIndex>);
#define REGISTER_KERNELS_CPU(T) \
REGISTER_CPU(T, int64_t); \
REGISTER_CPU(T, int32)
REGISTER_KERNELS_CPU(Eigen::half);
REGISTER_KERNELS_CPU(float);
REGISTER_KERNELS_CPU(double);
REGISTER_KERNELS_CPU(int32);
REGISTER_KERNELS_CPU(complex64);
REGISTER_KERNELS_CPU(complex128);
REGISTER_KERNELS_CPU(bfloat16);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
namespace functor {
#define DECLARE_GPU_SPEC(T, Tindices, ADJ_A, ADJ_B) \
template <> \
Status SparseTensorDenseMatMulFunctor< \
GPUDevice, T, Tindices, ADJ_A, \
ADJ_B>::Compute(OpKernelContext* ctx, typename TTypes<T>::Matrix out, \
TTypes<Tindices>::ConstMatrix a_indices, \
typename TTypes<T>::ConstVec a_values, \
typename TTypes<T>::ConstMatrix b); \
extern template struct SparseTensorDenseMatMulFunctor< \
GPUDevice, T, Tindices, ADJ_A, ADJ_B>;
#define REGISTER_GPU_SPEC(T, ADJ_A, ADJ_B) \
DECLARE_GPU_SPEC(T, int32, ADJ_A, ADJ_B); \
DECLARE_GPU_SPEC(T, int64_t, ADJ_A, ADJ_B)
#define DECLARE_ADJOINT_GPU_SPEC(T) \
REGISTER_GPU_SPEC(T, false, false) \
REGISTER_GPU_SPEC(T, false, true) \
REGISTER_GPU_SPEC(T, true, false) \
REGISTER_GPU_SPEC(T, true, true)
DECLARE_ADJOINT_GPU_SPEC(Eigen::half);
DECLARE_ADJOINT_GPU_SPEC(float);
DECLARE_ADJOINT_GPU_SPEC(double);
DECLARE_ADJOINT_GPU_SPEC(complex64);
DECLARE_ADJOINT_GPU_SPEC(complex128);
#undef DECLARE_ADJOINT_GPU_SPEC
#undef DECLARE_GPU_SPEC
#undef REGISTER_GPU_SPEC
}
#define REGISTER_GPU(TypeT, TypeIndex) \
REGISTER_KERNEL_BUILDER( \
Name("SparseTensorDenseMatMul") \
.Device(DEVICE_GPU) \
.TypeConstraint<TypeT>("T") \
.TypeConstraint<TypeIndex>("Tindices") \
.HostMemory("a_shape"), \
SparseTensorDenseMatMulOp<GPUDevice, TypeT, TypeIndex>);
#define REGISTER_KERNELS_GPU(T) \
REGISTER_GPU(T, int64_t); \
REGISTER_GPU(T, int32)
REGISTER_KERNELS_GPU(Eigen::half);
REGISTER_KERNELS_GPU(float);
REGISTER_KERNELS_GPU(double);
REGISTER_KERNELS_GPU(complex64);
REGISTER_KERNELS_GPU(complex128);
#undef REGISTER_GPU
#undef REGISTER_KERNELS_GPU
#endif
namespace functor {
namespace {
Status KOutOfBoundsError(int64_t k, std::size_t i, int rhs_index_a,
std::size_t lhs_right) {
return errors::InvalidArgument("k (", k, ") from index[", i, ",", rhs_index_a,
"] out of bounds (>=", lhs_right, ")");
}
Status MOutOfBoundsError(int64_t m, std::size_t i, int lhs_index_a,
int64_t out_dim0) {
return errors::InvalidArgument("m (", m, ") from index[", i, ",", lhs_index_a,
"] out of bounds (>=", out_dim0, ")");
}
template <typename T, typename Tsum, typename Tindices, bool ADJ_A, bool ADJ_B>
Status SparseTensorDenseMatMulImpl(
typename TTypes<Tsum>::Matrix out,
typename TTypes<Tindices>::ConstMatrix a_indices,
typename TTypes<T>::ConstVec a_values, typename TTypes<T>::ConstMatrix b) {
static constexpr std::size_t kNumVectorize = 32;
const std::size_t nnz = a_values.size();
const std::size_t rhs_right = (ADJ_B ? b.dimension(0) : b.dimension(1));
const std::size_t lhs_right = (ADJ_B ? b.dimension(1) : b.dimension(0));
const int lhs_index_a = ADJ_A ? 1 : 0;
const int rhs_index_a = ADJ_A ? 0 : 1;
if (rhs_right < kNumVectorize) {
auto maybe_adjoint_b = MaybeAdjoint<decltype(b), ADJ_B>(b);
for (std::size_t i = 0; i < nnz; ++i) {
const Tindices m = internal::SubtleMustCopy(a_indices(i, lhs_index_a));
const Tindices k = internal::SubtleMustCopy(a_indices(i, rhs_index_a));
if (!FastBoundsCheck(k, lhs_right)) {
return KOutOfBoundsError(k, i, rhs_index_a, lhs_right);
}
if (!FastBoundsCheck(m, out.dimension(0))) {
return MOutOfBoundsError(m, i, lhs_index_a, out.dimension(0));
}
const T a_value = ADJ_A ? MaybeConj(a_values(i)) : a_values(i);
for (std::size_t n = 0; n < rhs_right; ++n) {
const T b_value = maybe_adjoint_b(k, n);
out(m, n) += static_cast<Tsum>(a_value) * static_cast<Tsum>(b_value);
}
}
} else {
const int b_chip_index = ADJ_B ? 1 : 0;
#define LOOP_NNZ(b_passed) \
for (std::size_t i = 0; i < nnz; ++i) { \
const Tindices m = internal::SubtleMustCopy(a_indices(i, lhs_index_a)); \
const Tindices k = internal::SubtleMustCopy(a_indices(i, rhs_index_a)); \
const T a_value = (ADJ_A) ? MaybeConj(a_values(i)) : a_values(i); \
if (!FastBoundsCheck(k, lhs_right)) { \
return KOutOfBoundsError(k, i, rhs_index_a, lhs_right); \
} \
if (!FastBoundsCheck(m, out.dimension(0))) { \
return MOutOfBoundsError(m, i, lhs_index_a, out.dimension(0)); \
} \
out.template chip<0>(m) += \
b_passed.template chip<b_chip_index>(k).template cast<Tsum>() * \
static_cast<Tsum>(a_value); \
}
if (ADJ_B) {
Eigen::array<int, 2> shuffle{1, 0};
Eigen::Tensor<T, 2, Eigen::ColMajor> col_major_conj_b =
b.swap_layout().shuffle(shuffle).conjugate();
LOOP_NNZ(col_major_conj_b);
} else {
LOOP_NNZ(b);
}
#undef LOOP_NNZ
}
return absl::OkStatus();
}
}
template <typename T, typename Tindices, bool ADJ_A, bool ADJ_B>
struct SparseTensorDenseMatMulFunctor<CPUDevice, T, Tindices, ADJ_A, ADJ_B> {
static Status Compute(OpKernelContext* ctx, typename TTypes<T>::Matrix out,
typename TTypes<Tindices>::ConstMatrix a_indices,
typename TTypes<T>::ConstVec a_values,
typename TTypes<T>::ConstMatrix b) {
using Tsum = typename SumType<T>::type;
Tensor temp_out_t;
if (!std::is_same<T, Tsum>::value) {
TF_RETURN_IF_ERROR(ctx->allocate_temp(
DataTypeToEnum<Tsum>::value,
TensorShape({out.dimension(0), out.dimension(1)}), &temp_out_t));
auto temp_out = temp_out_t.matrix<Tsum>();
temp_out.setZero();
TF_RETURN_IF_ERROR(
SparseTensorDenseMatMulImpl<T, Tsum, Tindices, ADJ_A, ADJ_B>(
temp_out, a_indices, a_values, b));
out = temp_out.template cast<T>();
} else {
out.setZero();
auto out_workaround =
*reinterpret_cast<typename TTypes<Tsum>::Matrix*>(&out);
TF_RETURN_IF_ERROR(
SparseTensorDenseMatMulImpl<T, Tsum, Tindices, ADJ_A, ADJ_B>(
out_workaround, a_indices, a_values, b));
}
return OkStatus();
}
};
}
} | #include <random>
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
Node* SparseTensorDenseMatMulNode(Graph* g, Node* a_indices, Node* a_values,
Node* a_shape, Node* b, bool adjoint_a,
bool adjoint_b) {
Node* ret;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "SparseTensorDenseMatMul")
.Input(a_indices)
.Input(a_values)
.Input(a_shape)
.Input(b)
.Attr("T", DT_FLOAT)
.Attr("adjoint_a", adjoint_a)
.Attr("adjoint_b", adjoint_b)
.Finalize(g, &ret));
return ret;
}
static Graph* SparseTensorDenseMatmul(int nnz, int m, int k, int n,
bool adjoint_a, bool adjoint_b) {
Graph* g = new Graph(OpRegistry::Global());
Tensor a_values(DT_FLOAT, TensorShape({nnz}));
Tensor a_indices(DT_INT64, TensorShape({nnz, 2}));
Tensor a_shape(DT_INT64, TensorShape({2}));
auto a_shape_t = a_shape.vec<int64_t>();
a_shape_t(0) = adjoint_a ? k : m;
a_shape_t(1) = adjoint_a ? m : k;
a_values.flat<float>().setRandom();
auto a_indices_t = a_indices.matrix<int64_t>();
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> a_lhs_dist(0, a_shape_t(0) - 1);
std::uniform_int_distribution<> a_rhs_dist(0, a_shape_t(1) - 1);
for (int32_t i = 0; i < nnz; ++i) {
a_indices_t(i, 0) = a_lhs_dist(gen);
a_indices_t(i, 1) = a_rhs_dist(gen);
}
Tensor b(DT_FLOAT, adjoint_b ? TensorShape({n, k}) : TensorShape({k, n}));
b.flat<float>().setRandom();
SparseTensorDenseMatMulNode(
g, test::graph::Constant(g, a_indices),
test::graph::Constant(g, a_values), test::graph::HostConstant(g, a_shape),
test::graph::Constant(g, b), adjoint_a, adjoint_b);
return g;
}
#define BM_SparseTensorDenseMatmulDev(NNZ, M, K, N, TA, TB, DEVICE) \
static void \
BM_SparseTensorDenseMatmul##_##NNZ##_##M##_##K##_##N##_##TA##_##TB##_##DEVICE( \
::testing::benchmark::State& state) { \
int64_t items_per_iter = (static_cast<int64_t>(NNZ) * (TB ? K : N)); \
test::Benchmark(#DEVICE, SparseTensorDenseMatmul(NNZ, M, K, N, TA, TB), \
false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * items_per_iter); \
state.SetBytesProcessed(state.iterations() * items_per_iter * \
sizeof(float)); \
} \
BENCHMARK( \
BM_SparseTensorDenseMatmul##_##NNZ##_##M##_##K##_##N##_##TA##_##TB##_##DEVICE);
#define BM_SparseTensorDenseMatmul(NNZ, M, K, N, TA, TB) \
BM_SparseTensorDenseMatmulDev(NNZ, M, K, N, TA, TB, cpu); \
BM_SparseTensorDenseMatmulDev(NNZ, M, K, N, TA, TB, gpu);
BM_SparseTensorDenseMatmul(128, 8, 512, 1, false, false);
BM_SparseTensorDenseMatmul(128, 16, 512, 1, false, false);
BM_SparseTensorDenseMatmul(128, 128, 512, 1, false, false);
BM_SparseTensorDenseMatmul(128, 4096, 4096, 1, false, false);
BM_SparseTensorDenseMatmul(1024, 4096, 4096, 1, false, false);
BM_SparseTensorDenseMatmul(16384, 4096, 4096, 1, false, false);
BM_SparseTensorDenseMatmul(128, 8, 1024, 16, false, false);
BM_SparseTensorDenseMatmul(128, 16, 1024, 16, false, false);
BM_SparseTensorDenseMatmul(128, 128, 1024, 16, false, false);
BM_SparseTensorDenseMatmul(128, 4096, 4096, 128, false, false);
BM_SparseTensorDenseMatmul(128, 4096, 4096, 1024, false, false);
BM_SparseTensorDenseMatmul(1024, 8, 1024, 16, false, false);
BM_SparseTensorDenseMatmul(1024, 16, 1024, 16, false, false);
BM_SparseTensorDenseMatmul(1024, 128, 1024, 16, false, false);
BM_SparseTensorDenseMatmul(1024, 4096, 4096, 128, false, false);
BM_SparseTensorDenseMatmul(1024, 4096, 4096, 1024, false, false);
BM_SparseTensorDenseMatmul(16384, 8, 1024, 16, false, false);
BM_SparseTensorDenseMatmul(16384, 16, 1024, 16, false, false);
BM_SparseTensorDenseMatmul(16384, 128, 1024, 16, false, false);
BM_SparseTensorDenseMatmul(16384, 4096, 4096, 128, false, false);
BM_SparseTensorDenseMatmul(16384, 4096, 4096, 1024, false, false);
BM_SparseTensorDenseMatmul(16384, 4096, 4096, 4096, false, false);
BM_SparseTensorDenseMatmul(16384, 4096, 4096, 4096, false, true);
BM_SparseTensorDenseMatmul(16384, 4096, 4096, 4096, true, false);
BM_SparseTensorDenseMatmul(16384, 4096, 4096, 4096, true, true);
} |
1,496 | cpp | tensorflow/tensorflow | constant_op | tensorflow/core/kernels/constant_op.cc | tensorflow/core/kernels/constant_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_CONSTANT_OP_H_
#define TENSORFLOW_CORE_KERNELS_CONSTANT_OP_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/platform/macros.h"
namespace tensorflow {
class ConstantOp : public OpKernel {
public:
explicit ConstantOp(OpKernelConstruction* ctx);
void Compute(OpKernelContext* ctx) override;
bool IsExpensive() override { return false; }
const Tensor* const_tensor() const override { return &tensor_; };
~ConstantOp() override;
private:
Tensor tensor_;
ConstantOp(const ConstantOp&) = delete;
void operator=(const ConstantOp&) = delete;
};
class PlaceholderOp : public OpKernel {
public:
explicit PlaceholderOp(OpKernelConstruction* ctx);
void Compute(OpKernelContext* ctx) override;
private:
PartialTensorShape expected_shape_;
};
}
#endif
#define EIGEN_USE_THREADS
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
(defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
#define EIGEN_USE_GPU
#endif
#include "tensorflow/core/kernels/constant_op.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/variant_op_registry.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/kernels/fill_functor.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/lib/scoped_memory_debug_annotation.h"
namespace tensorflow {
namespace {
NodeDef StripTensorDataFromNodeDef(OpKernelConstruction* ctx) {
const NodeDef& original = ctx->def();
if (std::is_base_of<protobuf::Message, NodeDef>()) {
DCHECK_EQ(reinterpret_cast<const protobuf::Message*>(&original)
->GetDescriptor()
->field_count(),
7)
<< "The NodeDef format has changed, and the attr-stripping code may "
"need to be updated.";
}
NodeDef ret;
ret.set_name(original.name());
ret.set_op(original.op());
ret.set_device(original.device());
AddNodeAttr("dtype", ctx->output_type(0), &ret);
MergeDebugInfo(original, &ret);
if (original.has_experimental_type()) {
*ret.mutable_experimental_type() = original.experimental_type();
}
return ret;
}
}
ConstantOp::ConstantOp(OpKernelConstruction* ctx)
: OpKernel(ctx, StripTensorDataFromNodeDef(ctx), false),
tensor_(ctx->output_type(0)) {
const TensorProto* proto = nullptr;
tsl::profiler::ScopedMemoryDebugAnnotation op_annotation(name_view().data());
OP_REQUIRES_OK(ctx, ctx->GetAttr("value", &proto));
OP_REQUIRES_OK(ctx, ctx->device()->MakeTensorFromProto(
*proto, AllocatorAttributes(), &tensor_));
OP_REQUIRES(
ctx, ctx->output_type(0) == tensor_.dtype(),
errors::InvalidArgument("Type mismatch between value (",
DataTypeString(tensor_.dtype()), ") and dtype (",
DataTypeString(ctx->output_type(0)), ")"));
}
void ConstantOp::Compute(OpKernelContext* ctx) {
ctx->set_output(0, tensor_);
if (TF_PREDICT_FALSE(ctx->track_allocations())) {
ctx->record_persistent_memory_allocation(tensor_.AllocatedBytes());
}
}
ConstantOp::~ConstantOp() {}
REGISTER_KERNEL_BUILDER(Name("Const").Device(DEVICE_CPU), ConstantOp);
REGISTER_KERNEL_BUILDER(Name("Const").Device(DEVICE_TPU_SYSTEM), ConstantOp);
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
(defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
#define REGISTER_KERNEL(D, TYPE) \
REGISTER_KERNEL_BUILDER( \
Name("Const").Device(DEVICE_##D).TypeConstraint<TYPE>("dtype"), \
ConstantOp);
REGISTER_KERNEL(GPU, Eigen::half);
REGISTER_KERNEL(GPU, bfloat16);
REGISTER_KERNEL(GPU, float);
REGISTER_KERNEL(GPU, double);
REGISTER_KERNEL(GPU, uint8);
REGISTER_KERNEL(GPU, int8);
REGISTER_KERNEL(GPU, qint8);
REGISTER_KERNEL(GPU, uint16);
REGISTER_KERNEL(GPU, int16);
REGISTER_KERNEL(GPU, qint16);
REGISTER_KERNEL(GPU, quint16);
REGISTER_KERNEL(GPU, uint32);
REGISTER_KERNEL(GPU, qint32);
REGISTER_KERNEL(GPU, int64_t);
REGISTER_KERNEL(GPU, uint64);
REGISTER_KERNEL(GPU, complex64);
REGISTER_KERNEL(GPU, complex128);
REGISTER_KERNEL(GPU, bool);
REGISTER_KERNEL(GPU, Variant);
#undef REGISTER_KERNEL
#endif
#define REGISTER_DEFAULT_KERNEL(TYPE) \
REGISTER_KERNEL_BUILDER( \
Name("Const").Device(DEVICE_DEFAULT).TypeConstraint<TYPE>("dtype"), \
ConstantOp);
TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_DEFAULT_KERNEL);
TF_CALL_QUANTIZED_TYPES(REGISTER_DEFAULT_KERNEL);
TF_CALL_qint16(REGISTER_DEFAULT_KERNEL);
TF_CALL_quint16(REGISTER_DEFAULT_KERNEL);
TF_CALL_bool(REGISTER_DEFAULT_KERNEL);
TF_CALL_variant(REGISTER_DEFAULT_KERNEL);
#undef REGISTER_DEFAULT_KERNEL
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Device, typename T, typename Index>
class FillOp : public OpKernel {
public:
explicit FillOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& Tdims = context->input(0);
OP_REQUIRES(
context,
(TensorShapeUtils::IsVector(Tdims.shape()) ||
TensorShapeUtils::IsScalar(Tdims.shape())),
errors::InvalidArgument("dims must represent a vector, got shape ",
Tdims.shape().DebugString()));
const Tensor& Tvalue = context->input(1);
OP_REQUIRES(
context,
TensorShapeUtils::IsScalar(Tvalue.shape()) ||
(TensorShapeUtils::IsVector(Tvalue.shape()) &&
Tvalue.shape().dim_size(0) == 1),
errors::InvalidArgument("value must represent a scalar, got shape ",
Tvalue.shape().DebugString()));
auto dims = Tdims.flat<Index>();
TensorShape shape;
OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(
reinterpret_cast<const Index*>(dims.data()),
dims.size(), &shape));
Tensor* out = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(0, shape, &out));
functor::FillFunctor<Device, T> functor;
functor(context->eigen_device<Device>(), out->flat<T>(),
Tvalue.scalar<T>());
}
};
#define REGISTER_KERNEL(D, TYPE) \
REGISTER_KERNEL_BUILDER(Name("Fill") \
.Device(DEVICE_##D) \
.TypeConstraint<TYPE>("T") \
.TypeConstraint<int32>("index_type") \
.HostMemory("dims"), \
FillOp<D##Device, TYPE, int32>); \
REGISTER_KERNEL_BUILDER(Name("Fill") \
.Device(DEVICE_##D) \
.TypeConstraint<TYPE>("T") \
.TypeConstraint<int64_t>("index_type") \
.HostMemory("dims"), \
FillOp<D##Device, TYPE, int64>);
#define REGISTER_CPU_KERNEL(TYPE) REGISTER_KERNEL(CPU, TYPE)
TF_CALL_ALL_TYPES(REGISTER_CPU_KERNEL);
REGISTER_KERNEL(CPU, quint8);
REGISTER_KERNEL(CPU, quint16);
REGISTER_KERNEL(CPU, qint8);
REGISTER_KERNEL(CPU, qint16);
REGISTER_KERNEL(CPU, qint32);
REGISTER_KERNEL(CPU, int4);
REGISTER_KERNEL(CPU, uint4);
#undef REGISTER_CPU_KERNEL
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
(defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
REGISTER_KERNEL(GPU, Eigen::half);
REGISTER_KERNEL(GPU, bfloat16);
REGISTER_KERNEL(GPU, float);
REGISTER_KERNEL(GPU, double);
REGISTER_KERNEL(GPU, complex64);
REGISTER_KERNEL(GPU, complex128);
REGISTER_KERNEL(GPU, uint8);
REGISTER_KERNEL(GPU, int8);
REGISTER_KERNEL(GPU, uint16);
REGISTER_KERNEL(GPU, int16);
REGISTER_KERNEL(GPU, int64_t);
REGISTER_KERNEL(GPU, bool);
REGISTER_KERNEL(GPU, int4);
REGISTER_KERNEL(GPU, uint4);
#endif
REGISTER_KERNEL_BUILDER(Name("Fill")
.Device(DEVICE_DEFAULT)
.TypeConstraint<int32>("T")
.TypeConstraint<int32>("index_type")
.HostMemory("dims")
.HostMemory("value")
.HostMemory("output"),
FillOp<CPUDevice, int32, int32>);
#undef REGISTER_KERNEL
template <typename Device, typename T>
class ZerosLikeOp : public OpKernel {
public:
explicit ZerosLikeOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
const Tensor& input = ctx->input(0);
const Device& d = ctx->eigen_device<Device>();
if (std::is_same<T, Variant>::value) {
OP_REQUIRES(
ctx, input.dims() == 0,
errors::InvalidArgument("ZerosLike non-scalar Tensor with "
"dtype=DT_VARIANT is not supported."));
const Variant& v = input.scalar<Variant>()();
int numa_node = ctx->device()->NumaNode();
Tensor out(cpu_allocator(numa_node), DT_VARIANT, TensorShape({}));
Variant* out_v = &(out.scalar<Variant>()());
OP_REQUIRES_OK(ctx, UnaryOpVariant<Device>(
ctx, ZEROS_LIKE_VARIANT_UNARY_OP, v, out_v));
ctx->set_output(0, out);
} else {
Tensor* out = nullptr;
OP_REQUIRES_OK(ctx, ctx->forward_input_or_allocate_output(
{0}, 0, input.shape(), &out));
functor::SetZeroFunctor<Device, T> f;
f(d, out->flat<T>());
}
}
};
#define REGISTER_KERNEL(type, dev) \
REGISTER_KERNEL_BUILDER( \
Name("ZerosLike").Device(DEVICE_##dev).TypeConstraint<type>("T"), \
ZerosLikeOp<dev##Device, type>)
#define REGISTER_CPU(type) REGISTER_KERNEL(type, CPU)
TF_CALL_POD_STRING_TYPES(REGISTER_CPU);
REGISTER_CPU(Variant);
#undef REGISTER_CPU
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
(defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED)
REGISTER_KERNEL(bool, GPU);
REGISTER_KERNEL(Eigen::half, GPU);
REGISTER_KERNEL(float, GPU);
REGISTER_KERNEL(double, GPU);
REGISTER_KERNEL(int64_t, GPU);
REGISTER_KERNEL(complex64, GPU);
REGISTER_KERNEL(complex128, GPU);
#endif
REGISTER_KERNEL(bfloat16, GPU);
REGISTER_KERNEL(Variant, GPU);
#endif
#undef REGISTER_KERNEL
REGISTER_KERNEL_BUILDER(Name("ZerosLike")
.Device(DEVICE_DEFAULT)
.TypeConstraint<int32>("T")
.HostMemory("y"),
ZerosLikeOp<CPUDevice, int32>);
template <typename Device, typename T>
class OnesLikeOp : public OpKernel {
public:
explicit OnesLikeOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
const Tensor& input = ctx->input(0);
Tensor* out = nullptr;
OP_REQUIRES_OK(ctx, ctx->forward_input_or_allocate_output(
{0}, 0, input.shape(), &out));
functor::SetOneFunctor<Device, T> f;
f(ctx->eigen_device<Device>(), out->flat<T>());
}
};
#define REGISTER_KERNEL(type, dev) \
REGISTER_KERNEL_BUILDER( \
Name("OnesLike").Device(DEVICE_##dev).TypeConstraint<type>("T"), \
OnesLikeOp<dev##Device, type>)
#define REGISTER_CPU(type) REGISTER_KERNEL(type, CPU)
TF_CALL_POD_TYPES(REGISTER_CPU);
#undef REGISTER_CPU
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
(defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED)
REGISTER_KERNEL(bool, GPU);
REGISTER_KERNEL(Eigen::half, GPU);
REGISTER_KERNEL(float, GPU);
REGISTER_KERNEL(double, GPU);
REGISTER_KERNEL(int64_t, GPU);
REGISTER_KERNEL(complex64, GPU);
REGISTER_KERNEL(complex128, GPU);
#endif
REGISTER_KERNEL(bfloat16, GPU);
#endif
#undef REGISTER_KERNEL
REGISTER_KERNEL_BUILDER(Name("OnesLike")
.Device(DEVICE_DEFAULT)
.TypeConstraint<int32>("T")
.HostMemory("y"),
OnesLikeOp<CPUDevice, int32>);
PlaceholderOp::PlaceholderOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("shape", &expected_shape_));
}
void PlaceholderOp::Compute(OpKernelContext* ctx) {
if (expected_shape_.dims() > 0) {
OP_REQUIRES(ctx, false,
errors::InvalidArgument(
"You must feed a value for placeholder tensor '", name(),
"' with dtype ", DataTypeString(output_type(0)),
" and shape ", expected_shape_.DebugString()));
} else {
OP_REQUIRES(ctx, false,
errors::InvalidArgument(
"You must feed a value for placeholder tensor '", name(),
"' with dtype ", DataTypeString(output_type(0))));
}
}
REGISTER_KERNEL_BUILDER(Name("Placeholder").Device(DEVICE_CPU), PlaceholderOp);
REGISTER_KERNEL_BUILDER(Name("PlaceholderV2").Device(DEVICE_CPU),
PlaceholderOp);
REGISTER_KERNEL_BUILDER(Name("Placeholder").Device(DEVICE_DEFAULT),
PlaceholderOp);
REGISTER_KERNEL_BUILDER(Name("PlaceholderV2").Device(DEVICE_DEFAULT),
PlaceholderOp);
} | #include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
class ConstantOpTest : public OpsTestBase {
protected:
void PersistentMemoryTrackingTest(bool on_gpu);
};
void ConstantOpTest::PersistentMemoryTrackingTest(bool on_gpu) {
DataType data_type = DT_INT32;
std::initializer_list<int64_t> dims = {2, 3, 4, 5};
Tensor tensor(data_type, TensorShape(dims));
for (int i = 0; i < 2 * 3 * 4 * 5; ++i) {
tensor.flat<int32>()(i) = i;
}
NodeDef const_node;
TF_ASSERT_OK(NodeDefBuilder("some_node", "Const")
.Attr("dtype", data_type)
.Attr("value", tensor)
.Finalize(&const_node));
string device_string = "CPU";
DeviceType device_type = DEVICE_CPU;
if (on_gpu) {
device_string = "GPU";
DeviceType device_type = DEVICE_GPU;
}
std::unique_ptr<Device> device(DeviceFactory::NewDevice(
device_string, {}, "/job:worker/replica:0/task:0"));
Status status;
std::unique_ptr<OpKernel> op(CreateOpKernel(device_type, device.get(),
cpu_allocator(), const_node,
TF_GRAPH_DEF_VERSION, &status));
TF_ASSERT_OK(status);
OpKernelContext::Params params;
params.device = device.get();
params.frame_iter = FrameAndIter(0, 0);
params.op_kernel = op.get();
params.track_allocations = true;
OpKernelContext ctx(¶ms);
op->Compute(&ctx);
TF_EXPECT_OK(ctx.status());
if (on_gpu) {
EXPECT_EQ(ctx.persistent_memory_allocated(), 512);
} else {
EXPECT_EQ(ctx.persistent_memory_allocated(), 480);
}
for (auto allocator_pair : ctx.ConsumeWrappedAllocators()) {
allocator_pair.second->GetRecordsAndUnRef();
}
}
TEST_F(ConstantOpTest, PersistentMemoryTracking) {
PersistentMemoryTrackingTest(false);
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
(defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
PersistentMemoryTrackingTest(true);
#endif
}
static Graph* ManyConsts(int num, bool sequential) {
Graph* g = new Graph(OpRegistry::Global());
Node* prev = nullptr;
for (int i = 0; i < num; ++i) {
Tensor c(DT_FLOAT, TensorShape({}));
c.scalar<float>()() = i;
Node* curr = test::graph::Constant(g, c);
if (sequential && prev != nullptr) {
g->AddControlEdge(prev, curr);
}
prev = curr;
}
return g;
}
static void BM_ManyConsts_Parallel(::testing::benchmark::State& state) {
const int num = state.range(0);
test::Benchmark("cpu", ManyConsts(num, false ),
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num);
}
BENCHMARK(BM_ManyConsts_Parallel)->Range(1, 1 << 10);
static void BM_ManyConsts_Sequential(::testing::benchmark::State& state) {
const int num = state.range(0);
test::Benchmark("cpu", ManyConsts(num, true ),
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num);
}
BENCHMARK(BM_ManyConsts_Sequential)->Range(1, 1 << 10);
} |
1,497 | cpp | tensorflow/tensorflow | tensor_flag_utils | tensorflow/core/kernels/tensor_flag_utils.cc | tensorflow/core/kernels/tensor_flag_utils_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_TENSOR_FLAG_UTILS_H_
#define TENSORFLOW_CORE_KERNELS_TENSOR_FLAG_UTILS_H_
#include <utility>
#include <vector>
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace tensor_flag_utils {
template <typename Tindices>
std::vector<Tindices> ParseRowStartIndices(
const tensorflow::Tensor& tensor,
const Tindices num_nonzero_entries_in_sparse_mat);
Status ValidateSparseMatrixShardingConfig(const Tensor& config);
Status ValidateScalarQuantityShardingConfig(const Tensor& config);
template <typename MatrixType, typename K>
MatrixType FindConfigValueForKey(
const typename TTypes<MatrixType>::ConstMatrix& config_mat,
const std::pair<K, K>& key);
template <typename MatrixType, typename K>
MatrixType FindConfigValueForKey(
const typename TTypes<MatrixType>::ConstMatrix& config_mat, const K key);
template <typename Tindices>
Tindices GetLinearBucket(const Tindices value, const Tindices bucket_size);
template <typename Tindices>
Tindices GetPowerBucket(const Tindices value, const Tindices bucket_size);
}
}
#endif
#include "tensorflow/core/kernels/tensor_flag_utils.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/tensor_shape.h"
namespace tensorflow {
namespace tensor_flag_utils {
Status ValidateSparseMatrixShardingConfig(const Tensor& config) {
if (TensorShapeUtils::IsScalar(config.shape())) {
const float scalar_config = config.template scalar<float>()();
if (0 < scalar_config && scalar_config <= 1.0) {
return absl::OkStatus();
}
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("Expected config to be in range (0, 1] but instead found ",
scalar_config));
}
if (!TensorShapeUtils::IsMatrix(config.shape())) {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("Expected config to be either scalar or matrix "
"but instead found tensor of rank ",
config.dims()));
}
if (config.dim_size(1) != 3) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat(
"Expected config matrix to have dim(1) = 3 but instead found ",
config.dim_size(1)));
}
auto config_matrix = config.matrix<float>();
for (int i = 0; i < config.dim_size(0); ++i) {
if (0 > config_matrix(i, 0)) {
return errors::InvalidArgument(
"First column of fraction_rows_per_thread_config "
"should "
"have non-negative values but found ",
config_matrix(i, 0), " in row ", i);
}
if (0 > config_matrix(i, 1)) {
return errors::InvalidArgument(
"Second column of fraction_rows_per_thread_config "
"should "
"have non-negative values but found ",
config_matrix(i, 1), " in row ", i);
}
if (!(0 < config_matrix(i, 2) && config_matrix(i, 2) <= 1)) {
return errors::InvalidArgument(
"Last column of fraction_rows_per_thread_config should "
"have values in the range (0, 1] but found ",
config_matrix(i, 2), " in row ", i);
}
}
return absl::OkStatus();
}
template <typename MatrixType, typename K>
MatrixType FindConfigValueForKey(
const typename TTypes<MatrixType>::ConstMatrix& config_mat,
const std::pair<K, K>& key) {
const int last_row_index = config_mat.dimension(0) - 1;
for (int i = 0; i < last_row_index; ++i) {
if (key.first >= config_mat(i, 0) && key.second >= config_mat(i, 1)) {
return config_mat(i, 2);
}
}
return config_mat(last_row_index, 2);
}
Status ValidateScalarQuantityShardingConfig(const Tensor& config) {
if (TensorShapeUtils::IsScalar(config.shape())) {
const float scalar_config = config.template scalar<float>()();
if (0 < scalar_config && scalar_config <= 1.0) {
return absl::OkStatus();
}
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("Expected config to be in range (0, 1] but instead found ",
scalar_config));
}
if (!TensorShapeUtils::IsMatrix(config.shape())) {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("Expected config to be either scalar or matrix "
"but instead found tensor of rank ",
config.dims()));
}
if (config.dim_size(1) != 2) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat(
"Expected config matrix to have dim(1) = 2 but instead found ",
config.dim_size(1)));
}
auto config_matrix = config.matrix<float>();
for (int i = 0; i < config.dim_size(0); ++i) {
if (0 > config_matrix(i, 0)) {
return errors::InvalidArgument(
"First column of fraction_rows_per_thread_config "
"should "
"have non-negative values but found ",
config_matrix(i, 0), " in row ", i);
}
if (!(0 < config_matrix(i, 1) && config_matrix(i, 1) <= 1)) {
return errors::InvalidArgument(
"Last column of fraction_rows_per_thread_config should "
"have values in the range (0, 1] but found ",
config_matrix(i, 1), " in row ", i);
}
}
return absl::OkStatus();
}
template <typename MatrixType, typename K>
MatrixType FindConfigValueForKey(
const typename TTypes<MatrixType>::ConstMatrix& config_mat, const K key) {
const int last_row_index = config_mat.dimension(0) - 1;
for (int i = 0; i < last_row_index; ++i) {
if (key >= config_mat(i, 0)) {
return config_mat(i, 1);
}
}
return config_mat(last_row_index, 1);
}
template <typename Tindices>
Tindices GetLinearBucket(const Tindices value, const Tindices bucket_size) {
const Tindices next_multiple_of_bucket_size =
(value + bucket_size - 1) / bucket_size * bucket_size;
return next_multiple_of_bucket_size - (bucket_size - 1);
}
template <typename Tindices>
Tindices GetPowerBucket(const Tindices value, const Tindices bucket_size) {
if (bucket_size == 1) {
return 1;
}
return std::pow(bucket_size, std::floor(std::log(bucket_size * (value - 1)) /
std::log(bucket_size)) -
1) +
1;
}
#define REGISTER_SPARSE_UTIL_FUNCTIONS(TypeIndex) \
template float FindConfigValueForKey<float, TypeIndex>( \
const TTypes<float>::ConstMatrix& config_mat, \
const std::pair<TypeIndex, TypeIndex>& key); \
template float FindConfigValueForKey<float, TypeIndex>( \
const TTypes<float>::ConstMatrix& config_mat, const TypeIndex key); \
template int64 FindConfigValueForKey<int64, TypeIndex>( \
const TTypes<int64_t>::ConstMatrix& config_mat, const TypeIndex key);
REGISTER_SPARSE_UTIL_FUNCTIONS(int32);
REGISTER_SPARSE_UTIL_FUNCTIONS(int64);
REGISTER_SPARSE_UTIL_FUNCTIONS(uint8);
REGISTER_SPARSE_UTIL_FUNCTIONS(uint16);
REGISTER_SPARSE_UTIL_FUNCTIONS(uint32);
REGISTER_SPARSE_UTIL_FUNCTIONS(uint64);
template int32 GetLinearBucket(const int32 value, const int32 bucket_size);
template int64 GetLinearBucket(const int64 value, const int64 bucket_size);
template int32 GetPowerBucket(const int32 value, const int32 bucket_size);
template int64 GetPowerBucket(const int64 value, const int64 bucket_size);
}
} | #include "tensorflow/core/kernels/tensor_flag_utils.h"
#include <vector>
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/platform/test.h"
namespace {
using ::int64_t;
using tensorflow::DataType;
using tensorflow::int32;
using tensorflow::Tensor;
using tensorflow::TTypes;
using tensorflow::error::INVALID_ARGUMENT;
using tensorflow::tensor_flag_utils::FindConfigValueForKey;
using tensorflow::tensor_flag_utils::GetLinearBucket;
using tensorflow::tensor_flag_utils::GetPowerBucket;
using tensorflow::tensor_flag_utils::ValidateScalarQuantityShardingConfig;
using tensorflow::tensor_flag_utils::ValidateSparseMatrixShardingConfig;
TEST(SparseUtilsTest, ValidateSparseMatrixShardingConfig) {
{
Tensor t(DataType::DT_FLOAT, {});
t.scalar<float>()() = 0.7;
EXPECT_TRUE(ValidateSparseMatrixShardingConfig(t).ok());
}
{
Tensor t(DataType::DT_FLOAT, {});
t.scalar<float>()() = 1.0;
EXPECT_TRUE(ValidateSparseMatrixShardingConfig(t).ok());
}
{
Tensor t(DataType::DT_FLOAT, {1, 1});
int indx = 0;
for (const float v : {60.0}) {
t.flat<float>()(indx++) = v;
}
EXPECT_EQ(INVALID_ARGUMENT, ValidateSparseMatrixShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {1, 2});
int indx = 0;
for (const float v : {
60.0,
50.0,
}) {
t.flat<float>()(indx++) = v;
}
EXPECT_EQ(INVALID_ARGUMENT, ValidateSparseMatrixShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {1, 3});
int indx = 0;
for (const float v : {30.0, 20.0, 1.0}) {
t.flat<float>()(indx++) = v;
}
EXPECT_TRUE(ValidateSparseMatrixShardingConfig(t).ok());
}
{
Tensor t(DataType::DT_FLOAT, {2, 3});
int indx = 0;
for (const float v : {60.0, 50.0, 0.41, 30.0, 20.0, 0.7}) {
t.flat<float>()(indx++) = v;
}
EXPECT_TRUE(ValidateSparseMatrixShardingConfig(t).ok());
}
{
Tensor t(DataType::DT_FLOAT, {2, 3});
int indx = 0;
for (const float v : {60.0, 40.0, 0.41, 30.0, 20.0, 10.7}) {
t.flat<float>()(indx++) = v;
}
EXPECT_EQ(INVALID_ARGUMENT, ValidateSparseMatrixShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {2, 3});
int indx = 0;
for (const float v : {60.0, 40.0, 0.41, 30.0, 20.0, -0.7}) {
t.flat<float>()(indx++) = v;
}
EXPECT_EQ(INVALID_ARGUMENT, ValidateSparseMatrixShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {2, 3});
int indx = 0;
for (const float v : {60.0, -40.0, 0.41, 30.0, 20.0, 0.7}) {
t.flat<float>()(indx++) = v;
}
EXPECT_EQ(INVALID_ARGUMENT, ValidateSparseMatrixShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {});
t.scalar<float>()() = -0.5;
EXPECT_EQ(INVALID_ARGUMENT, ValidateSparseMatrixShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {});
t.scalar<float>()() = 0;
EXPECT_EQ(INVALID_ARGUMENT, ValidateSparseMatrixShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {});
t.scalar<float>()() = 1.2;
EXPECT_EQ(INVALID_ARGUMENT, ValidateSparseMatrixShardingConfig(t).code());
}
}
TEST(SparseUtilsTest, ValidateScalarQuantityShardingConfig) {
{
Tensor t(DataType::DT_FLOAT, {});
t.scalar<float>()() = 0.7;
EXPECT_TRUE(ValidateScalarQuantityShardingConfig(t).ok());
}
{
Tensor t(DataType::DT_FLOAT, {});
t.scalar<float>()() = 1.0;
EXPECT_TRUE(ValidateScalarQuantityShardingConfig(t).ok());
}
{
Tensor t(DataType::DT_FLOAT, {});
t.scalar<float>()() = 1.2;
EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {1, 1});
int indx = 0;
for (const float v : {60.0}) {
t.flat<float>()(indx++) = v;
}
EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {1, 2});
int indx = 0;
for (const float v : {
60.0,
50.0,
}) {
t.flat<float>()(indx++) = v;
}
EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {1, 3});
int indx = 0;
for (const float v : {30.0, 20.0, 1.0}) {
t.flat<float>()(indx++) = v;
}
EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {2, 2});
int indx = 0;
for (const float v : {60.0, 0.41, 30.0, 0.7}) {
t.flat<float>()(indx++) = v;
}
EXPECT_TRUE(ValidateScalarQuantityShardingConfig(t).ok());
}
{
Tensor t(DataType::DT_FLOAT, {2, 2});
int indx = 0;
for (const float v : {60.0, 0.41, 30.0, 10.7}) {
t.flat<float>()(indx++) = v;
}
EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {2, 2});
int indx = 0;
for (const float v : {60.0, 0.41, 30.0, -0.7}) {
t.flat<float>()(indx++) = v;
}
EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {2, 2});
int indx = 0;
for (const float v : {-40.0, 0.41, 20.0, 0.7}) {
t.flat<float>()(indx++) = v;
}
EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {});
t.scalar<float>()() = -0.5;
EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {});
t.scalar<float>()() = 0;
EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code());
}
{
Tensor t(DataType::DT_FLOAT, {});
t.scalar<float>()() = 1.2;
EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code());
}
}
TEST(SparseUtils, FindConfigValueForKey) {
{
float data[] = {60.0, 50.0, 0.41, 30.0, 20.0, 0.1, 0, 0, 0.7};
TTypes<float>::ConstMatrix config_mat(data, 3, 3);
auto val = FindConfigValueForKey<float, int32>(config_mat, {70, 40});
EXPECT_FLOAT_EQ(0.1, val);
val = FindConfigValueForKey<float, int32>(config_mat, {60, 50});
EXPECT_FLOAT_EQ(0.41, val);
val = FindConfigValueForKey<float, int32>(config_mat, {60, 60});
EXPECT_FLOAT_EQ(0.41, val);
val = FindConfigValueForKey<float, int32>(config_mat, {60, 40});
EXPECT_FLOAT_EQ(0.1, val);
val = FindConfigValueForKey<float, int32>(config_mat, {50, 60});
EXPECT_FLOAT_EQ(0.1, val);
val = FindConfigValueForKey<float, int32>(config_mat, {20, 30});
EXPECT_FLOAT_EQ(0.7, val);
val = FindConfigValueForKey<float, int32>(config_mat, {30, 10});
EXPECT_FLOAT_EQ(0.7, val);
}
{
float data[] = {0, 0, 0.7};
TTypes<float>::ConstMatrix config_mat(data, 1, 3);
auto val = FindConfigValueForKey<float, int64_t>(config_mat, {70, 40});
EXPECT_FLOAT_EQ(0.7, val);
val = FindConfigValueForKey<float, int64_t>(config_mat, {60, 50});
EXPECT_FLOAT_EQ(0.7, val);
val = FindConfigValueForKey<float, int64_t>(config_mat, {60, 60});
EXPECT_FLOAT_EQ(0.7, val);
val = FindConfigValueForKey<float, int64_t>(config_mat, {60, 40});
EXPECT_FLOAT_EQ(0.7, val);
val = FindConfigValueForKey<float, int64_t>(config_mat, {50, 60});
EXPECT_FLOAT_EQ(0.7, val);
val = FindConfigValueForKey<float, int64_t>(config_mat, {20, 30});
EXPECT_FLOAT_EQ(0.7, val);
val = FindConfigValueForKey<float, int64_t>(config_mat, {30, 10});
EXPECT_FLOAT_EQ(0.7, val);
}
{
float data[] = {60.0, 50.0, 0.41, 0, 0, 0.7};
TTypes<float>::ConstMatrix config_mat(data, 2, 3);
auto val = FindConfigValueForKey<float, int32>(config_mat, {70, 40});
EXPECT_FLOAT_EQ(0.7, val);
val = FindConfigValueForKey<float, int32>(config_mat, {60, 50});
EXPECT_FLOAT_EQ(0.41, val);
val = FindConfigValueForKey<float, int32>(config_mat, {60, 60});
EXPECT_FLOAT_EQ(0.41, val);
val = FindConfigValueForKey<float, int32>(config_mat, {60, 40});
EXPECT_FLOAT_EQ(0.7, val);
val = FindConfigValueForKey<float, int32>(config_mat, {50, 60});
EXPECT_FLOAT_EQ(0.7, val);
val = FindConfigValueForKey<float, int32>(config_mat, {20, 30});
EXPECT_FLOAT_EQ(0.7, val);
val = FindConfigValueForKey<float, int32>(config_mat, {30, 10});
EXPECT_FLOAT_EQ(0.7, val);
}
{
float data[] = {60.0, 0.41, 50.0, 0.14, 0, 0.7};
TTypes<float>::ConstMatrix config_mat(data, 3, 2);
auto val = FindConfigValueForKey<float, int32>(config_mat, 70);
EXPECT_FLOAT_EQ(0.41, val);
val = FindConfigValueForKey<float, int32>(config_mat, 60);
EXPECT_FLOAT_EQ(0.41, val);
val = FindConfigValueForKey<float, int32>(config_mat, 55);
EXPECT_FLOAT_EQ(0.14, val);
val = FindConfigValueForKey<float, int32>(config_mat, 50);
EXPECT_FLOAT_EQ(0.14, val);
val = FindConfigValueForKey<float, int32>(config_mat, 20);
EXPECT_FLOAT_EQ(0.7, val);
val = FindConfigValueForKey<float, int32>(config_mat, 30);
EXPECT_FLOAT_EQ(0.7, val);
}
}
TEST(SparseUtils, GetLinearBucket) {
EXPECT_EQ(11, GetLinearBucket(11, 5));
EXPECT_EQ(11, GetLinearBucket(12, 5));
EXPECT_EQ(1, GetLinearBucket(int64_t{4}, int64_t{5}));
}
TEST(SparseUtils, GetPowerBucket) {
EXPECT_EQ(6, GetPowerBucket(11, 5));
EXPECT_EQ(6, GetPowerBucket(12, 5));
EXPECT_EQ(1332, GetPowerBucket(1335, 11));
EXPECT_EQ(5, GetPowerBucket(int64_t{5}, int64_t{4}));
EXPECT_EQ(1, GetPowerBucket(int64_t{4}, int64_t{1}));
}
} |
1,498 | cpp | tensorflow/tensorflow | shape_ops | tensorflow/core/kernels/shape_ops.cc | tensorflow/core/kernels/shape_ops_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_SHAPE_OPS_H_
#define TENSORFLOW_CORE_KERNELS_SHAPE_OPS_H_
#include <limits>
#include <unordered_set>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/variant_op_registry.h"
namespace tensorflow {
namespace shape_op_helpers {
inline Status GetShape(OpKernelContext* ctx, int input_index,
TensorShape* shape) {
*shape = ctx->input(input_index).shape();
return absl::OkStatus();
}
}
template <typename OutType>
class ShapeOp : public OpKernel {
public:
explicit ShapeOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
TensorShape shape;
OP_REQUIRES_OK(ctx, shape_op_helpers::GetShape(ctx, 0, &shape));
const int rank = shape.dims();
Tensor* out = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({rank}), &out));
auto vec = out->vec<OutType>();
for (int i = 0; i < rank; ++i) {
int64_t dim_size = shape.dim_size(i);
if (out->dtype() == DT_INT32) {
OP_REQUIRES(
ctx, FastBoundsCheck(dim_size, std::numeric_limits<int32>::max()),
errors::InvalidArgument("Shape output type is 32-bit ", " but dim ",
i, " is ", dim_size));
}
vec(i) = static_cast<OutType>(dim_size);
}
}
bool IsExpensive() override { return false; }
};
template <typename OutType>
class ShapeNOp : public OpKernel {
public:
explicit ShapeNOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
for (int i = 0; i < ctx->num_inputs(); ++i) {
TensorShape shape;
OP_REQUIRES_OK(ctx, shape_op_helpers::GetShape(ctx, i, &shape));
const int dims = shape.dims();
Tensor* out = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(i, {dims}, &out));
auto vec = out->vec<OutType>();
for (int j = 0; j < dims; ++j) {
int64_t dim_size = shape.dim_size(j);
if (out->dtype() == DT_INT32) {
OP_REQUIRES(
ctx, FastBoundsCheck(dim_size, std::numeric_limits<int32>::max()),
errors::InvalidArgument("ShapeN output type is 32-bit but shape ",
i, " dim ", j, " is ", dim_size));
}
vec(j) = static_cast<OutType>(dim_size);
}
}
}
bool IsExpensive() override { return false; }
};
class RankOp : public OpKernel {
public:
explicit RankOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
TensorShape shape;
OP_REQUIRES_OK(ctx, shape_op_helpers::GetShape(ctx, 0, &shape));
const int rank = shape.dims();
Tensor* out = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &out));
out->scalar<int32>()() = rank;
}
bool IsExpensive() override { return false; }
};
template <typename OutType>
class SizeOp : public OpKernel {
public:
explicit SizeOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
TensorShape shape;
OP_REQUIRES_OK(ctx, shape_op_helpers::GetShape(ctx, 0, &shape));
const int64_t size = shape.num_elements();
Tensor* out = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &out));
if (out->dtype() == DT_INT32) {
OP_REQUIRES(
ctx, FastBoundsCheck(size, std::numeric_limits<int32>::max()),
errors::InvalidArgument("Number of elements was larger than "
"representable by 32-bit output type"));
}
out->scalar<OutType>()() = static_cast<OutType>(size);
}
bool IsExpensive() override { return false; }
};
template <typename Tdim>
class ExpandDimsOp : public OpKernel {
public:
explicit ExpandDimsOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
const Tensor& input_t = ctx->input(0);
OP_REQUIRES(ctx, input_t.dtype() != DT_VARIANT,
errors::InvalidArgument("ExpandDims on Variant not supported"));
const Tensor& dim_t = ctx->input(1);
OP_REQUIRES(
ctx, (dim_t.NumElements() == 1),
errors::InvalidArgument("'dim' must be a tensor with a single value"));
DCHECK_EQ(dim_t.dtype(), DataTypeToEnum<Tdim>::v());
Tdim dim = *static_cast<const Tdim*>(DMAHelper::base(&dim_t));
const TensorShape& input_shape = input_t.shape();
int input_dims = input_shape.dims();
OP_REQUIRES(ctx, dim >= -1 - input_dims && dim <= input_dims,
errors::InvalidArgument("Tried to expand dim index ", dim,
" for tensor with ", input_dims,
" dimensions."));
if (dim < 0) {
dim = std::min<Tdim>(dim + input_dims + 1, input_dims);
}
absl::InlinedVector<int64_t, 8> output_shape_vec(input_dims + 1);
for (int64_t i = 0; i < dim; ++i) {
output_shape_vec[i] = input_shape.dim_size(i);
}
output_shape_vec[dim] = 1;
for (int64_t i = dim + 1; i < input_dims + 1; ++i) {
output_shape_vec[i] = input_shape.dim_size(i - 1);
}
TensorShape output_shape(output_shape_vec);
Tensor output_t;
if (!output_t.CopyFrom(input_t, output_shape)) {
ctx->SetStatus(
errors::Internal("Could not expand dimension with input shape ",
ctx->input(0).shape().DebugString(),
" and output shape ", output_shape.DebugString()));
}
ctx->set_output(0, std::move(output_t));
}
bool IsExpensive() override { return false; }
};
class SqueezeOp : public OpKernel {
public:
explicit SqueezeOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
std::vector<int32> squeeze_dims;
OP_REQUIRES_OK(ctx, ctx->GetAttr("squeeze_dims", &squeeze_dims));
squeeze_dims_.insert(squeeze_dims.begin(), squeeze_dims.end());
}
void Compute(OpKernelContext* ctx) override {
OP_REQUIRES(ctx, ctx->input(0).dtype() != DT_VARIANT,
errors::InvalidArgument("Squeeze on Variant not supported"));
auto existing_dims = ctx->input(0).shape().dim_sizes();
const int existing_dims_size = static_cast<int>(existing_dims.size());
std::vector<int64_t> new_shape;
std::unordered_set<int32> wrapped_squeeze_dims;
wrapped_squeeze_dims.reserve(squeeze_dims_.size());
for (int32_t dim : squeeze_dims_) {
OP_REQUIRES(
ctx, (dim >= -ctx->input(0).dims() && dim < ctx->input(0).dims()),
errors::InvalidArgument("Tried to squeeze dim index ", dim,
" for tensor with ", ctx->input(0).dims(),
" dimensions."));
if (dim < 0) {
dim = existing_dims_size + dim;
}
wrapped_squeeze_dims.insert(dim);
}
for (int i = 0; i < existing_dims_size; ++i) {
auto existing_dim = existing_dims[i];
if (!wrapped_squeeze_dims.empty()) {
if (wrapped_squeeze_dims.count(i) > 0) {
OP_REQUIRES(ctx, existing_dim == 1,
errors::InvalidArgument(
"Can not squeeze dim[", i,
"], expected a dimension of 1, got ", existing_dim));
} else {
new_shape.push_back(existing_dim);
}
} else {
if (existing_dim != 1) {
new_shape.push_back(existing_dim);
}
}
}
const TensorShape output_shape(new_shape);
Tensor* output = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, {0}, &output));
if (!output->CopyFrom(ctx->input(0), output_shape)) {
ctx->SetStatus(errors::Internal("Could not squeeze input with shape ",
ctx->input(0).shape().DebugString(),
" and output shape ",
output_shape.DebugString()));
}
}
bool IsExpensive() override { return false; }
private:
std::unordered_set<int32> squeeze_dims_;
};
}
#endif
#include "tensorflow/core/kernels/shape_ops.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/register_types.h"
namespace tensorflow {
REGISTER_KERNEL_BUILDER(Name("Shape")
.Device(DEVICE_CPU)
.HostMemory("output")
.TypeConstraint<int32>("out_type"),
ShapeOp<int32>);
REGISTER_KERNEL_BUILDER(Name("Shape")
.Device(DEVICE_CPU)
.HostMemory("output")
.TypeConstraint<int64_t>("out_type"),
ShapeOp<int64_t>);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER_GPU_KERNEL(type) \
REGISTER_KERNEL_BUILDER(Name("Shape") \
.Device(DEVICE_GPU) \
.HostMemory("output") \
.TypeConstraint<int32>("out_type") \
.TypeConstraint<type>("T"), \
ShapeOp<int32>); \
REGISTER_KERNEL_BUILDER(Name("Shape") \
.Device(DEVICE_GPU) \
.HostMemory("output") \
.TypeConstraint<int64_t>("out_type") \
.TypeConstraint<type>("T"), \
ShapeOp<int64_t>);
TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_GPU_KERNEL);
TF_CALL_bool(REGISTER_GPU_KERNEL);
TF_CALL_variant(REGISTER_GPU_KERNEL);
#undef REGISTER_GPU_KERNEL
REGISTER_KERNEL_BUILDER(Name("Shape")
.Device(DEVICE_GPU)
.HostMemory("input")
.HostMemory("output")
.TypeConstraint<int32>("T")
.TypeConstraint<int32>("out_type"),
ShapeOp<int32>);
REGISTER_KERNEL_BUILDER(Name("Shape")
.Device(DEVICE_GPU)
.HostMemory("input")
.HostMemory("output")
.TypeConstraint<int32>("T")
.TypeConstraint<int64_t>("out_type"),
ShapeOp<int64_t>);
#endif
#define REGISTER_DEFAULT_KERNEL(type) \
REGISTER_KERNEL_BUILDER(Name("Shape") \
.Device(DEVICE_DEFAULT) \
.HostMemory("output") \
.TypeConstraint<int32>("out_type") \
.TypeConstraint<type>("T"), \
ShapeOp<int32>); \
REGISTER_KERNEL_BUILDER(Name("Shape") \
.Device(DEVICE_DEFAULT) \
.HostMemory("output") \
.TypeConstraint<int64_t>("out_type") \
.TypeConstraint<type>("T"), \
ShapeOp<int64_t>);
TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_DEFAULT_KERNEL);
TF_CALL_bool(REGISTER_DEFAULT_KERNEL);
TF_CALL_variant(REGISTER_DEFAULT_KERNEL);
#undef REGISTER_DEFAULT_KERNEL
REGISTER_KERNEL_BUILDER(Name("Shape")
.Device(DEVICE_DEFAULT)
.HostMemory("input")
.HostMemory("output")
.TypeConstraint<int32>("T")
.TypeConstraint<int32>("out_type"),
ShapeOp<int32>);
REGISTER_KERNEL_BUILDER(Name("Shape")
.Device(DEVICE_DEFAULT)
.HostMemory("input")
.HostMemory("output")
.TypeConstraint<int32>("T")
.TypeConstraint<int64_t>("out_type"),
ShapeOp<int64_t>);
REGISTER_KERNEL_BUILDER(Name("ShapeN")
.Device(DEVICE_CPU)
.HostMemory("output")
.TypeConstraint<int32>("out_type"),
ShapeNOp<int32>);
REGISTER_KERNEL_BUILDER(Name("ShapeN")
.Device(DEVICE_CPU)
.HostMemory("output")
.TypeConstraint<int64_t>("out_type"),
ShapeNOp<int64_t>);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER_GPU_KERNEL(type) \
REGISTER_KERNEL_BUILDER(Name("ShapeN") \
.Device(DEVICE_GPU) \
.HostMemory("output") \
.TypeConstraint<int32>("out_type") \
.TypeConstraint<type>("T"), \
ShapeNOp<int32>); \
REGISTER_KERNEL_BUILDER(Name("ShapeN") \
.Device(DEVICE_GPU) \
.HostMemory("output") \
.TypeConstraint<int64_t>("out_type") \
.TypeConstraint<type>("T"), \
ShapeNOp<int64_t>)
TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_GPU_KERNEL);
TF_CALL_bool(REGISTER_GPU_KERNEL);
#undef REGISTER_GPU_KERNEL
REGISTER_KERNEL_BUILDER(Name("ShapeN")
.Device(DEVICE_GPU)
.HostMemory("input")
.HostMemory("output")
.TypeConstraint<int32>("T")
.TypeConstraint<int32>("out_type"),
ShapeNOp<int32>);
REGISTER_KERNEL_BUILDER(Name("ShapeN")
.Device(DEVICE_GPU)
.HostMemory("input")
.HostMemory("output")
.TypeConstraint<int32>("T")
.TypeConstraint<int64_t>("out_type"),
ShapeNOp<int64_t>);
#endif
#define REGISTER_DEFAULT_KERNEL(type) \
REGISTER_KERNEL_BUILDER(Name("ShapeN") \
.Device(DEVICE_DEFAULT) \
.HostMemory("output") \
.TypeConstraint<int32>("out_type") \
.TypeConstraint<type>("T"), \
ShapeNOp<int32>); \
REGISTER_KERNEL_BUILDER(Name("ShapeN") \
.Device(DEVICE_DEFAULT) \
.HostMemory("output") \
.TypeConstraint<int64_t>("out_type") \
.TypeConstraint<type>("T"), \
ShapeNOp<int64_t>)
TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_DEFAULT_KERNEL);
TF_CALL_bool(REGISTER_DEFAULT_KERNEL);
#undef REGISTER_DEFAULT_KERNEL
REGISTER_KERNEL_BUILDER(Name("ShapeN")
.Device(DEVICE_DEFAULT)
.HostMemory("input")
.HostMemory("output")
.TypeConstraint<int32>("T")
.TypeConstraint<int32>("out_type"),
ShapeNOp<int32>);
REGISTER_KERNEL_BUILDER(Name("ShapeN")
.Device(DEVICE_DEFAULT)
.HostMemory("input")
.HostMemory("output")
.TypeConstraint<int32>("T")
.TypeConstraint<int64_t>("out_type"),
ShapeNOp<int64_t>);
REGISTER_KERNEL_BUILDER(Name("Rank").Device(DEVICE_CPU).HostMemory("output"),
RankOp);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER_GPU_KERNEL(type) \
REGISTER_KERNEL_BUILDER(Name("Rank") \
.Device(DEVICE_GPU) \
.TypeConstraint<type>("T") \
.HostMemory("output"), \
RankOp);
TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_GPU_KERNEL);
TF_CALL_variant(REGISTER_GPU_KERNEL);
#undef REGISTER_GPU_KERNEL
REGISTER_KERNEL_BUILDER(Name("Rank")
.Device(DEVICE_GPU)
.TypeConstraint<int32>("T")
.HostMemory("input")
.HostMemory("output"),
RankOp);
REGISTER_KERNEL_BUILDER(Name("Rank")
.Device(DEVICE_GPU)
.TypeConstraint<bool>("T")
.HostMemory("input")
.HostMemory("output"),
RankOp);
#endif
#define REGISTER_DEFAULT_KERNEL(type) \
REGISTER_KERNEL_BUILDER(Name("Rank") \
.Device(DEVICE_DEFAULT) \
.TypeConstraint<type>("T") \
.HostMemory("output"), \
RankOp);
TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_DEFAULT_KERNEL);
TF_CALL_variant(REGISTER_DEFAULT_KERNEL);
#undef REGISTER_DEFAULT_KERNEL
REGISTER_KERNEL_BUILDER(Name("Rank")
.Device(DEVICE_DEFAULT)
.TypeConstraint<int32>("T")
.HostMemory("input")
.HostMemory("output"),
RankOp);
REGISTER_KERNEL_BUILDER(Name("Rank")
.Device(DEVICE_DEFAULT)
.TypeConstraint<bool>("T")
.HostMemory("input")
.HostMemory("output"),
RankOp);
REGISTER_KERNEL_BUILDER(Name("Size")
.Device(DEVICE_CPU)
.HostMemory("output")
.TypeConstraint<int32>("out_type"),
SizeOp<int32>);
REGISTER_KERNEL_BUILDER(Name("Size")
.Device(DEVICE_CPU)
.HostMemory("output")
.TypeConstraint<int64_t>("out_type"),
SizeOp<int64_t>);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER_GPU_KERNEL(type) \
REGISTER_KERNEL_BUILDER(Name("Size") \
.Device(DEVICE_GPU) \
.TypeConstraint<type>("T") \
.TypeConstraint<int32>("out_type") \
.HostMemory("output"), \
SizeOp<int32>); \
REGISTER_KERNEL_BUILDER(Name("Size") \
.Device(DEVICE_GPU) \
.TypeConstraint<type>("T") \
.TypeConstraint<int64_t>("out_type") \
.HostMemory("output"), \
SizeOp<int64_t>);
TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_GPU_KERNEL);
TF_CALL_bool(REGISTER_GPU_KERNEL);
TF_CALL_variant(REGISTER_GPU_KERNEL);
#undef REGISTER_GPU_KERNEL
REGISTER_KERNEL_BUILDER(Name("Size")
.Device(DEVICE_GPU)
.TypeConstraint<int32>("T")
.TypeConstraint<int32>("out_type")
.HostMemory("input")
.HostMemory("output"),
SizeOp<int32>);
REGISTER_KERNEL_BUILDER(Name("Size")
.Device(DEVICE_GPU)
.TypeConstraint<int32>("T")
.TypeConstraint<int64_t>("out_type")
.HostMemory("input")
.HostMemory("output"),
SizeOp<int64_t>);
#endif
#define REGISTER_DEFAULT_KERNEL(type) \
REGISTER_KERNEL_BUILDER(Name("Size") \
.Device(DEVICE_DEFAULT) \
.TypeConstraint<type>("T") \
.TypeConstraint<int32>("out_type") \
.HostMemory("output"), \
SizeOp<int32>); \
REGISTER_KERNEL_BUILDER(Name("Size") \
.Device(DEVICE_DEFAULT) \
.TypeConstraint<type>("T") \
.TypeConstraint<int64_t>("out_type") \
.HostMemory("output"), \
SizeOp<int64_t>);
TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_DEFAULT_KERNEL);
TF_CALL_bool(REGISTER_DEFAULT_KERNEL);
TF_CALL_variant(REGISTER_DEFAULT_KERNEL);
#undef REGISTER_DEFAULT_KERNEL
REGISTER_KERNEL_BUILDER(Name("Size")
.Device(DEVICE_DEFAULT)
.TypeConstraint<int32>("T")
.TypeConstraint<int32>("out_type")
.HostMemory("input")
.HostMemory("output"),
SizeOp<int32>);
REGISTER_KERNEL_BUILDER(Name("Size")
.Device(DEVICE_DEFAULT)
.TypeConstraint<int32>("T")
.TypeConstraint<int64_t>("out_type")
.HostMemory("input")
.HostMemory("output"),
SizeOp<int64_t>);
REGISTER_KERNEL_BUILDER(Name("ExpandDims")
.Device(DEVICE_CPU)
.HostMemory("dim")
.TypeConstraint<int32>("Tdim"),
ExpandDimsOp<int32>);
REGISTER_KERNEL_BUILDER(Name("ExpandDims")
.Device(DEVICE_CPU)
.HostMemory("dim")
.TypeConstraint<int64_t>("Tdim"),
ExpandDimsOp<int64_t>);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER_GPU_KERNEL(type) \
REGISTER_KERNEL_BUILDER(Name("ExpandDims") \
.Device(DEVICE_GPU) \
.TypeConstraint<type>("T") \
.TypeConstraint<int32>("Tdim") \
.HostMemory("dim"), \
ExpandDimsOp<int32>); \
REGISTER_KERNEL_BUILDER(Name("ExpandDims") \
.Device(DEVICE_GPU) \
.TypeConstraint<type>("T") \
.TypeConstraint<int64_t>("Tdim") \
.HostMemory("dim"), \
ExpandDimsOp<int64_t>);
TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_GPU_KERNEL);
TF_CALL_bool(REGISTER_GPU_KERNEL);
#undef REGISTER_GPU_KERNEL
REGISTER_KERNEL_BUILDER(Name("ExpandDims")
.Device(DEVICE_GPU)
.TypeConstraint<int32>("T")
.TypeConstraint<int32>("Tdim")
.HostMemory("input")
.HostMemory("dim")
.HostMemory("output"),
ExpandDimsOp<int32>);
REGISTER_KERNEL_BUILDER(Name("ExpandDims")
.Device(DEVICE_GPU)
.TypeConstraint<int32>("T")
.TypeConstraint<int64_t>("Tdim")
.HostMemory("input")
.HostMemory("dim")
.HostMemory("output"),
ExpandDimsOp<int64_t>);
#endif
#define REGISTER_DEFAULT_KERNEL(type) \
REGISTER_KERNEL_BUILDER(Name("ExpandDims") \
.Device(DEVICE_DEFAULT) \
.TypeConstraint<type>("T") \
.TypeConstraint<int32>("Tdim") \
.HostMemory("dim"), \
ExpandDimsOp<int32>); \
REGISTER_KERNEL_BUILDER(Name("ExpandDims") \
.Device(DEVICE_DEFAULT) \
.TypeConstraint<type>("T") \
.TypeConstraint<int64_t>("Tdim") \
.HostMemory("dim"), \
ExpandDimsOp<int64_t>);
TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_DEFAULT_KERNEL);
TF_CALL_bool(REGISTER_DEFAULT_KERNEL);
#undef REGISTER_DEFAULT_KERNEL
REGISTER_KERNEL_BUILDER(Name("ExpandDims")
.Device(DEVICE_DEFAULT)
.TypeConstraint<int32>("T")
.TypeConstraint<int32>("Tdim")
.HostMemory("input")
.HostMemory("dim")
.HostMemory("output"),
ExpandDimsOp<int32>);
REGISTER_KERNEL_BUILDER(Name("ExpandDims")
.Device(DEVICE_DEFAULT)
.TypeConstraint<int32>("T")
.TypeConstraint<int64_t>("Tdim")
.HostMemory("input")
.HostMemory("dim")
.HostMemory("output"),
ExpandDimsOp<int64_t>);
REGISTER_KERNEL_BUILDER(Name("Squeeze").Device(DEVICE_CPU), SqueezeOp);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER_GPU_KERNEL(type) \
REGISTER_KERNEL_BUILDER( \
Name("Squeeze").Device(DEVICE_GPU).TypeConstraint<type>("T"), \
SqueezeOp);
TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_GPU_KERNEL);
TF_CALL_bool(REGISTER_GPU_KERNEL);
#undef REGISTER_GPU_KERNEL
REGISTER_KERNEL_BUILDER(Name("Squeeze")
.Device(DEVICE_GPU)
.TypeConstraint<int32>("T")
.HostMemory("input")
.HostMemory("output"),
SqueezeOp);
#endif
#define REGISTER_DEFAULT_KERNEL(type) \
REGISTER_KERNEL_BUILDER( \
Name("Squeeze").Device(DEVICE_DEFAULT).TypeConstraint<type>("T"), \
SqueezeOp);
TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_DEFAULT_KERNEL);
TF_CALL_bool(REGISTER_DEFAULT_KERNEL);
#undef REGISTER_DEFAULT_KERNEL
REGISTER_KERNEL_BUILDER(Name("Squeeze")
.Device(DEVICE_DEFAULT)
.TypeConstraint<int32>("T")
.HostMemory("input")
.HostMemory("output"),
SqueezeOp);
class EnsureShapeOp : public OpKernel {
public:
explicit EnsureShapeOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
OP_REQUIRES_ | #include <functional>
#include <memory>
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
static void BM_ExpandDims(::testing::benchmark::State& state) {
Graph* g = new Graph(OpRegistry::Global());
Tensor input(DT_INT32, TensorShape({1, 1, 1, 1}));
input.flat<int32>()(0) = 10;
Tensor axis(DT_INT32, TensorShape({}));
axis.flat<int32>()(0) = 2;
Node* node;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "ExpandDims")
.Input(test::graph::Constant(g, input))
.Input(test::graph::Constant(g, axis))
.Attr("T", DT_INT32)
.Attr("Tdim", DT_INT32)
.Finalize(g, &node));
FixupSourceAndSinkEdges(g);
test::Benchmark("cpu", g, nullptr, nullptr, nullptr,
"SINGLE_THREADED_EXECUTOR", false)
.Run(state);
}
BENCHMARK(BM_ExpandDims)->UseRealTime();
}
} |
1,499 | cpp | tensorflow/tensorflow | identity_n_op | tensorflow/core/kernels/identity_n_op.cc | tensorflow/core/kernels/identity_n_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_IDENTITY_N_OP_H_
#define TENSORFLOW_CORE_KERNELS_IDENTITY_N_OP_H_
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_kernel.h"
namespace tensorflow {
class IdentityNOp : public OpKernel {
public:
explicit IdentityNOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {
OpInputList input;
OpOutputList output;
OP_REQUIRES_OK(context, context->input_list("input", &input));
OP_REQUIRES_OK(context, context->output_list("output", &output));
OP_REQUIRES(context, input.size() == output.size(),
errors::InvalidArgument("Input and output counts must match"));
if (absl::StrContains(name(), kTpuExecuteStagingNodeName)) {
metrics::UpdateTpuVariableDistributionTime(EnvTime::NowMicros() -
context->start_time_usecs());
}
for (int i = 0; i < input.size(); ++i) {
output.set(i, input[i]);
}
}
bool IsExpensive() override { return false; }
};
}
#endif
#include "tensorflow/core/kernels/identity_n_op.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
namespace tensorflow {
REGISTER_KERNEL_BUILDER(Name("IdentityN").Device(DEVICE_DEFAULT), IdentityNOp);
REGISTER_KERNEL_BUILDER(Name("IdentityN").Device(DEVICE_TPU_SYSTEM),
IdentityNOp);
REGISTER_INPUT_COLOCATION_EXEMPTION("IdentityN");
} | #include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class IdentityNOpTest : public OpsTestBase {
protected:
Status Init(DataType input0_type, DataType input1_type) {
TF_CHECK_OK(NodeDefBuilder("op", "IdentityN")
.Input(FakeInput({input0_type, input1_type}))
.Finalize(node_def()));
return InitOp();
}
};
TEST_F(IdentityNOpTest, Int32DoubleSuccess_6) {
TF_ASSERT_OK(Init(DT_INT32, DT_DOUBLE));
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<double>(TensorShape({6}),
{7.3, 8.3, 9.3, 10.3, 11.3, 12.3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected0(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int32>(&expected0, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected0, *GetOutput(0));
Tensor expected1(allocator(), DT_DOUBLE, TensorShape({6}));
test::FillValues<double>(&expected1, {7.3, 8.3, 9.3, 10.3, 11.3, 12.3});
test::ExpectTensorEqual<double>(expected1, *GetOutput(1));
}
TEST_F(IdentityNOpTest, Int32Success_2_3) {
TF_ASSERT_OK(Init(DT_INT32, DT_INT32));
AddInputFromArray<int32>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<int32>(TensorShape({2, 3}), {7, 8, 9, 10, 11, 12});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({2, 3}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
test::FillValues<int32>(&expected, {7, 8, 9, 10, 11, 12});
test::ExpectTensorEqual<int32>(expected, *GetOutput(1));
}
TEST_F(IdentityNOpTest, StringInt32Success) {
TF_ASSERT_OK(Init(DT_STRING, DT_INT32));
AddInputFromArray<tstring>(TensorShape({6}), {"A", "b", "C", "d", "E", "f"});
AddInputFromArray<int32>(TensorShape({8}), {1, 3, 5, 7, 9, 11, 13, 15});
TF_ASSERT_OK(RunOpKernel());
Tensor expected0(allocator(), DT_STRING, TensorShape({6}));
test::FillValues<tstring>(&expected0, {"A", "b", "C", "d", "E", "f"});
test::ExpectTensorEqual<tstring>(expected0, *GetOutput(0));
Tensor expected1(allocator(), DT_INT32, TensorShape({8}));
test::FillValues<int32>(&expected1, {1, 3, 5, 7, 9, 11, 13, 15});
test::ExpectTensorEqual<int32>(expected1, *GetOutput(1));
}
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.