ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 13
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 78
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 93
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
acb28b16-a116-4b1a-a11e-a1ee50b66645 | cpp | tensorflow/tensorflow | force_xla_constants_on_host_pass | tensorflow/compiler/jit/force_xla_constants_on_host_pass.cc | tensorflow/compiler/jit/force_xla_constants_on_host_pass_test.cc | #include "tensorflow/compiler/jit/force_xla_constants_on_host_pass.h"
#include "tensorflow/compiler/jit/compilability_check_util.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
namespace tensorflow {
Status ForceXlaConstantsOnHostPass::Run(
const GraphOptimizationPassOptions& options) {
Graph* graph = options.graph->get();
OptimizerOptions opts;
auto pflr = std::make_unique<ProcessFunctionLibraryRuntime>(
nullptr, options.session_options->env, nullptr,
TF_GRAPH_DEF_VERSION, options.flib_def, opts);
FunctionLibraryRuntime* flr =
pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice);
for (Node* node : graph->nodes()) {
if (CanCreateXlaKernel(node->def())) {
const FunctionBody* fbody = nullptr;
std::vector<int> constant_arg_indices;
std::vector<int> resource_arg_indices;
NameAttrList function;
TF_RETURN_IF_ERROR(NameAndAttrsFromFunctionCall(node->def(), &function));
TF_RETURN_IF_ERROR(GetBodyAndConstantsAndResources(
flr, function, &fbody, &constant_arg_indices, &resource_arg_indices));
VLOG(3) << "Found constant arg indices: "
<< absl::StrJoin(constant_arg_indices, ", ");
node->AddAttr("_input_hostmem", constant_arg_indices);
}
}
return absl::OkStatus();
}
} | #include "tensorflow/compiler/jit/force_xla_constants_on_host_pass.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/jit/compilability_check_util.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/jit/test_util.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
Status ForceXlaConstantsOnHost(const Scope& s,
FunctionLibraryDefinition* flib_def,
std::unique_ptr<Graph>* result) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
GraphOptimizationPassOptions options;
SessionOptions session_options;
session_options.env = Env::Default();
options.graph = &graph;
options.session_options = &session_options;
options.flib_def = flib_def;
TF_RETURN_IF_ERROR(s.ToGraph(graph.get()));
ForceXlaConstantsOnHostPass rewriter;
TF_RETURN_IF_ERROR(rewriter.Run(options));
*result = std::move(graph);
return absl::OkStatus();
}
TEST(ForceXlaConstantsOnHostPassTest, Simple) {
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Scope root = Scope::NewRootScope().ExitOnError();
FunctionDefLibrary library;
FunctionDef called_func =
FunctionDefHelper::Create("TransposeCall",
{"a:float", "b:int32"},
{"c:float"}, {},
{{{"t0"},
"Transpose",
{"a", "b"},
{
{"T", DT_FLOAT},
{"Tperm", DT_INT32},
}}},
{{"c", "t0:y:0"}});
AttrValue true_attribute;
true_attribute.set_b(true);
(*called_func.mutable_attr())[kXlaMustCompileAttr] = true_attribute;
*library.add_function() = called_func;
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(library));
FunctionLibraryDefinition flib_def(OpRegistry::Global(), library);
Output in = ops::Placeholder(root, DT_FLOAT);
Output perm = ops::Const(root, {3, 1, 2, 0});
NameAttrList b_name_attr;
b_name_attr.set_name("TransposeCall");
ops::PartitionedCall call(root.WithOpName("call"), {in, perm}, {DT_FLOAT},
b_name_attr);
call.output.front().node()->AddAttr(kXlaMustCompileAttr, true);
std::unique_ptr<Graph> graph;
TF_ASSERT_OK(ForceXlaConstantsOnHost(root, &flib_def, &graph));
bool found = false;
for (Node* node : graph->nodes()) {
if (CanCreateXlaKernel(node->def())) {
EXPECT_FALSE(found);
found = true;
std::vector<int32> hostmem_attr;
EXPECT_TRUE(TryGetNodeAttr(node->def(), "_input_hostmem", &hostmem_attr));
EXPECT_EQ(hostmem_attr.size(), 1);
EXPECT_EQ(hostmem_attr[0], 1);
}
}
EXPECT_TRUE(found);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/force_xla_constants_on_host_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/force_xla_constants_on_host_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8b3d1a79-9e0e-4afa-b82c-826ddee12d52 | cpp | tensorflow/tensorflow | xla_launch_util | tensorflow/compiler/jit/xla_launch_util.cc | tensorflow/compiler/jit/xla_launch_util_test.cc | #include "tensorflow/compiler/jit/xla_launch_util.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <set>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/cleanup/cleanup.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/jit/pjrt_tensor_buffer.h"
#include "tensorflow/compiler/jit/pjrt_tensor_buffer_util.h"
#include "tensorflow/compiler/jit/variable_info.h"
#include "tensorflow/compiler/jit/variable_info_util.h"
#include "tensorflow/compiler/tf2xla/const_analysis.h"
#include "tensorflow/compiler/tf2xla/shape_util.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "xla/client/local_client.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/pjrt/pjrt_stream_executor_client.h"
#include "xla/pjrt/tracked_device_buffer.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/tsl/framework/device_id_utils.h"
#include "xla/tsl/framework/serving_device_selector_policies.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/gpu/gpu_serving_device_selector.h"
#include "tensorflow/core/common_runtime/gpu_device_context.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/refcount.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/tfrt/common/async_value_tensor.h"
#include "tensorflow/core/util/stream_executor_util.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
using xla::ScopedShapedBuffer;
using xla::ShapedBuffer;
se::Platform::Id XlaPlatformInfoFromDevice(DeviceBase* device_base) {
auto device = static_cast<Device*>(device_base);
se::Platform::Id platform_id = nullptr;
if (device->device_type() == DEVICE_CPU) {
platform_id = se::host::kHostPlatformId;
}
return platform_id;
}
absl::flat_hash_map<int, int> CreateVariableLookup(
const std::vector<VariableInfo>& variables) {
absl::flat_hash_map<int, int> variable_lookup;
for (int i = 0; i < variables.size(); i++) {
variable_lookup[variables[i].index()] = i;
}
return variable_lookup;
}
}
std::vector<const Tensor*> InputsFromContext(OpKernelContext* ctx) {
std::vector<const Tensor*> inputs;
inputs.reserve(ctx->num_inputs());
for (int input_idx = 0; input_idx < ctx->num_inputs(); input_idx++) {
inputs.push_back(&ctx->input(input_idx));
}
return inputs;
}
absl::StatusOr<std::vector<int>> GetConstantInputIndicesFromContext(
OpKernelContext* ctx) {
std::vector<int> constant_input_indices;
TF_RETURN_IF_ERROR(GetCompileTimeConstInputs(
&ctx->op_kernel(), &constant_input_indices, ctx->function_library()));
if (!absl::c_all_of(constant_input_indices, [&](int idx) {
return ctx->input_memory_type(idx) == HOST_MEMORY;
})) {
return errors::Internal("Unexpected device placement for a constant input");
}
return constant_input_indices;
}
XlaComputationLaunchContext::XlaComputationLaunchContext(
xla::LocalClient* client, se::DeviceMemoryAllocator* xla_allocator,
int device_ordinal, bool allocate_xla_tensors, bool use_multiple_streams)
: client_(client),
xla_allocator_(xla_allocator),
allocate_xla_tensors_(allocate_xla_tensors),
use_multiple_streams_(use_multiple_streams),
device_ordinal_(device_ordinal) {
if (use_multiple_streams_) {
CHECK(allocate_xla_tensors_) << "To use multiple streams correctly we must "
"be allocating XLA tensors!";
}
}
static void PopulateExecutionInputBuffer(xla::ExecutionInput& execution_input,
xla::ShapeIndex index,
se::DeviceMemoryBase buffer,
bool donate_buffer, int device_ordinal,
se::DeviceMemoryAllocator* allocator) {
xla::MaybeOwningDeviceMemory* in_buffer =
execution_input.MutableBuffer(index);
if (donate_buffer) {
*in_buffer = se::OwningDeviceMemory(buffer, device_ordinal, allocator);
} else {
*in_buffer = buffer;
}
}
absl::StatusOr<std::vector<xla::ExecutionInput>>
XlaComputationLaunchContext::PopulateInputs(
OpKernelContext* ctx,
const XlaCompiler::CompilationResult* compilation_result,
const std::map<int, const Tensor*>& resource_vars,
int missing_ctx_input_prefix,
const xla::HloInputOutputAliasConfig& input_output_alias) {
std::vector<xla::ExecutionInput> arguments;
arguments.reserve(compilation_result->xla_input_shapes.size());
for (int i = 0; i < compilation_result->xla_input_shapes.size(); ++i) {
int arg_num = compilation_result->input_mapping[i];
CHECK_GE(arg_num, missing_ctx_input_prefix);
const xla::Shape& device_shape = compilation_result->xla_input_shapes[i];
const xla::Shape& host_shape =
xla::ShapeUtil::DeviceShapeToHostShape(device_shape);
auto resource_var_it = resource_vars.find(arg_num);
bool is_resource_variable = resource_var_it != resource_vars.end();
bool is_updated_resource_variable =
is_resource_variable &&
absl::c_any_of(compilation_result->resource_updates,
[&](const XlaCompiler::ResourceUpdate& update) {
return update.input_index == arg_num &&
update.modified;
});
const Tensor* t = is_resource_variable
? resource_var_it->second
: &(ctx->input(arg_num - missing_ctx_input_prefix));
CHECK(t);
bool donate_buffer =
t->RefCountIsOne() && is_updated_resource_variable &&
input_output_alias.ParameterHasAlias(i, xla::ShapeIndex{});
VLOG(3) << "Processing input: " << i
<< "; is_resource_variable=" << is_resource_variable
<< "; is_updated_resource_variable=" << is_updated_resource_variable
<< "; donate_buffer=" << donate_buffer;
if (use_multiple_streams_) {
CHECK(ctx->op_device_context() && ctx->op_device_context()->stream())
<< "Must have a stream available when using XLA tensors!";
XlaTensor* xla_tensor = XlaTensor::FromTensor(t);
CHECK(xla_tensor);
xla_tensor->WaitForDefinitionEventOnStream(
ctx->op_device_context()->stream());
}
arguments.emplace_back(device_shape, host_shape);
xla::ExecutionInput& execution_input = arguments.back();
se::DeviceMemoryBase dmem = XlaTensor::DeviceMemoryFromTensor(*t);
PopulateExecutionInputBuffer(execution_input, xla::ShapeIndex{}, dmem,
donate_buffer, device_ordinal_,
xla_allocator_);
}
return std::move(arguments);
}
static Tensor MakeTensor(DataType dtype, const TensorShape& shape,
se::DeviceMemoryBase buffer, Allocator* allocator) {
size_t expected_size = shape.num_elements() * DataTypeSize(dtype);
auto* tensor_buffer = new XlaTensorBuffer(buffer.opaque(), expected_size,
buffer.size(), allocator);
Tensor t(dtype, shape, tensor_buffer);
tensor_buffer->Unref();
return t;
}
static absl::StatusOr<Tensor> GetOrCreateTensorForOutput(
xla::ScopedShapedBuffer& output, int output_num, OpKernelContext* ctx,
int missing_ctx_input_prefix,
const xla::HloInputOutputAliasConfig& input_output_alias,
absl::Span<const int> input_mapping,
const std::map<int, const Tensor*>& resource_vars_snapshots,
DataType output_dtype, const TensorShape& output_shape,
Allocator* output_allocator, bool allocate_xla_tensors, se::Stream* stream,
bool use_multiple_streams, std::shared_ptr<se::Event> definition_event) {
xla::ShapeIndex output_index = input_output_alias.shape().IsTuple()
? xla::ShapeIndex({output_num})
: xla::ShapeIndex({});
CHECK(input_output_alias.shape().IsTuple() || output_num == 0);
if (std::optional<xla::HloInputOutputAliasConfig::Alias> alias =
input_output_alias.GetAliasedParameter(output_index)) {
VLOG(3) << "Found alias: " << alias->ToString();
int tf_param =
input_mapping[alias->parameter_number] - missing_ctx_input_prefix;
const Tensor input_tensor =
ctx->input(tf_param).dtype() != DT_RESOURCE
? ctx->input(tf_param)
: *resource_vars_snapshots.at(missing_ctx_input_prefix + tf_param);
se::DeviceMemoryBase input_buffer =
XlaTensor::DeviceMemoryFromTensor(input_tensor);
se::DeviceMemoryBase output_buffer = output.buffer({output_num});
if (input_buffer.opaque() == output_buffer.opaque()) {
output.set_buffer(se::OwningDeviceMemory(), {output_num});
return input_tensor;
}
}
if (allocate_xla_tensors) {
Tensor output_tensor;
TF_RETURN_IF_ERROR(
ctx->allocate_temp(output_dtype, output_shape, &output_tensor));
if (output_tensor.TotalBytes() > 0) {
XlaTensor* xla_tensor = XlaTensor::FromTensor(&output_tensor);
TF_RET_CHECK(xla_tensor);
xla_tensor->set_shaped_buffer(output.TakeSubTree({output_num}));
if (use_multiple_streams) {
xla_tensor->ResetDefinitionEvent(definition_event, stream);
}
}
return output_tensor;
}
se::DeviceMemoryBase output_buffer = output.buffer({output_num});
Tensor output_tensor =
MakeTensor(output_dtype, output_shape, output_buffer, output_allocator);
output.set_buffer(se::OwningDeviceMemory(), {output_num});
return output_tensor;
}
Status SetOutputForConstant(
OpKernelContext* ctx, bool requires_copy_to_device,
const XlaCompiler::CompilationResult* compilation_result, int output_num) {
CHECK(compilation_result->outputs[output_num].is_constant);
const Tensor& const_tensor =
compilation_result->outputs[output_num].constant_value;
Tensor* output_tensor;
if (requires_copy_to_device && const_tensor.TotalBytes() > 0) {
VLOG(1) << "Constant output tensor on device";
TF_RETURN_IF_ERROR(
ctx->allocate_output(output_num, const_tensor.shape(), &output_tensor));
Device* device = dynamic_cast<Device*>(ctx->device());
if (device == nullptr) {
return errors::Internal("DeviceBase was not a Device.");
}
ctx->op_device_context()->CopyCPUTensorToDevice(
&const_tensor, device, output_tensor,
[&](Status status) { TF_CHECK_OK(status); });
if (device->device_type() == DEVICE_GPU) {
auto* gpu_device_context =
static_cast<GPUDeviceContext*>(ctx->op_device_context());
TF_RETURN_IF_ERROR(gpu_device_context->stream()->WaitFor(
gpu_device_context->host_to_device_stream()));
}
} else {
ctx->set_output(output_num, const_tensor);
output_tensor = ctx->mutable_output(output_num);
}
return absl::OkStatus();
}
static absl::StatusOr<Var*> GetOrCreateResourceVar(
OpKernelContext* ctx, const ResourceHandle& handle,
const XlaCompiler::ResourceUpdate& write) {
Var* variable = nullptr;
TF_RETURN_IF_ERROR(
LookupOrCreateResource<Var>(ctx, handle, &variable, [&write](Var** ptr) {
*ptr = new Var(write.type);
return absl::OkStatus();
}));
return variable;
}
absl::StatusOr<std::vector<VariableInfo>> GatherVariableInfo(
OpKernelContext* ctx,
const XlaCompiler::CompilationResult& compilation_result,
int missing_ctx_input_prefix) {
std::vector<VariableInfo> out;
out.reserve(compilation_result.resource_updates.size());
for (int i = 0; i < compilation_result.resource_updates.size(); ++i) {
const XlaCompiler::ResourceUpdate& write =
compilation_result.resource_updates[i];
int actual_input_index = write.input_index - missing_ctx_input_prefix;
if (actual_input_index < 0 || actual_input_index >= ctx->num_inputs()) {
return errors::Internal("Invalid input index for variable write.");
}
const ResourceHandle handle = HandleFromInput(ctx, actual_input_index);
TF_ASSIGN_OR_RETURN(Var * variable,
GetOrCreateResourceVar(ctx, handle, write));
out.emplace_back(actual_input_index, handle.name(), variable,
handle.definition_stack_trace());
}
return std::move(out);
}
Status XlaComputationLaunchContext::PopulateOutputs(
OpKernelContext* ctx,
const XlaCompiler::CompilationResult* compilation_result,
ScopedShapedBuffer output, int missing_ctx_input_prefix,
absl::Span<VariableInfo> variable_infos,
const xla::HloInputOutputAliasConfig& input_output_alias,
const std::map<int, const Tensor*>& resource_vars) {
se::Stream* stream =
ctx->op_device_context() ? ctx->op_device_context()->stream() : nullptr;
Allocator* allocator = ctx->device()->GetAllocator({});
VLOG(2) << "Result tuple shape: " << output.on_host_shape().DebugString();
VLOG(2) << "Result tuple shape (on device): "
<< output.on_device_shape().DebugString();
CHECK_EQ(ctx->num_outputs(), compilation_result->outputs.size());
if (!output.on_host_shape().IsTuple()) {
ShapedBuffer nontuple_buffer = output.release();
ShapedBuffer buffer(
xla::ShapeUtil::MakeTupleShape({nontuple_buffer.on_host_shape()}),
xla::ShapeUtil::MakeTupleShape({nontuple_buffer.on_device_shape()}),
output.device_ordinal());
buffer.buffers().CopySubtreeFrom(nontuple_buffer.buffers(),
{},
{0});
output = ScopedShapedBuffer(std::move(buffer), output.memory_allocator());
}
std::shared_ptr<se::Event> definition_event;
if (use_multiple_streams_ && stream) {
TF_ASSIGN_OR_RETURN(definition_event, stream->parent()->CreateEvent());
TF_RETURN_IF_ERROR(stream->RecordEvent(definition_event.get()));
}
for (const XlaOutputDescription& descr : compilation_result->outputs) {
if (descr.type == DT_VARIANT) {
return errors::Unimplemented(
"Support for TensorList crossing the XLA/TF boundary "
"is not implemented");
}
}
std::vector<TensorShape> output_tensor_shapes;
output_tensor_shapes.reserve(ctx->num_outputs());
if (output.on_host_shape().is_dynamic()) {
const se::Platform* platform = nullptr;
if (stream != nullptr) {
platform = stream->parent()->GetPlatform();
} else {
TF_ASSIGN_OR_RETURN(platform,
se::PlatformManager::PlatformWithId(
XlaPlatformInfoFromDevice(ctx->device())));
}
TF_ASSIGN_OR_RETURN(auto transfer_manager,
xla::TransferManager::GetForPlatform(platform));
xla::Shape output_device_shape = output.on_device_shape();
TF_RETURN_IF_ERROR(transfer_manager->ReadDynamicShapes(
stream, &output, &output_device_shape));
output.set_shapes(output_device_shape, output_device_shape);
for (int i = 0; i < ctx->num_outputs(); ++i) {
const xla::Shape& subshape =
xla::ShapeUtil::GetSubshape(output_device_shape, {i});
TensorShape shape;
TF_RETURN_IF_ERROR(XLAShapeToTensorShape(subshape, &shape));
output_tensor_shapes.push_back(shape);
}
} else {
for (int i = 0; i < ctx->num_outputs(); ++i) {
output_tensor_shapes.push_back(compilation_result->outputs[i].shape);
}
}
int output_num = 0;
for (int i = 0, end = ctx->num_outputs(); i < end; ++i) {
const TensorShape& shape = output_tensor_shapes[i];
const DataType& type = compilation_result->outputs[i].type;
VLOG(2) << "Populating output for retval " << i << " shape "
<< shape.DebugString() << " type " << DataTypeString(type);
if (compilation_result->outputs[i].is_constant) {
TF_RETURN_IF_ERROR(SetOutputForConstant(
ctx, stream != nullptr,
compilation_result, i));
} else if (type == DT_RESOURCE) {
int input_index =
compilation_result->outputs[i].input_index - missing_ctx_input_prefix;
TF_RET_CHECK(input_index >= 0 && input_index < ctx->num_inputs())
<< "Invalid input for outputs " << i << ": " << input_index;
ctx->set_output(i, ctx->input(input_index));
} else {
TF_ASSIGN_OR_RETURN(
Tensor output_tensor,
GetOrCreateTensorForOutput(
output, output_num, ctx, missing_ctx_input_prefix,
input_output_alias, compilation_result->input_mapping,
resource_vars, ctx->expected_output_dtype(i), shape, allocator,
allocate_xla_tensors_, stream, use_multiple_streams_,
definition_event));
ctx->set_output(i, output_tensor);
++output_num;
}
}
absl::flat_hash_map<int, int> variable_info_lookup;
for (int i = 0; i < variable_infos.size(); i++) {
variable_info_lookup.emplace(variable_infos[i].index(), i);
}
for (int i = 0, end = compilation_result->resource_updates.size(); i < end;
++i) {
const XlaCompiler::ResourceUpdate& write =
compilation_result->resource_updates[i];
int actual_input_index = write.input_index - missing_ctx_input_prefix;
CHECK_GE(actual_input_index, 0);
CHECK_LT(actual_input_index, ctx->num_inputs());
Var* var = variable_infos[variable_info_lookup[actual_input_index]].var();
CHECK(var);
VLOG(2) << "Updating variable #" << i
<< " at input index: " << actual_input_index << " with shape "
<< write.shape.DebugString() << "; variable tensor has shape: "
<< var->tensor()->shape().DebugString();
if (var->is_initialized && var->tensor()->dtype() != write.type) {
return errors::Internal("Mismatched type in variable write");
}
TF_ASSIGN_OR_RETURN(
Tensor output_tensor,
GetOrCreateTensorForOutput(output, output_num, ctx,
missing_ctx_input_prefix, input_output_alias,
compilation_result->input_mapping,
resource_vars, write.type, write.shape,
allocator, allocate_xla_tensors_, stream,
use_multiple_streams_, definition_event));
var->is_initialized |= write.modified;
*var->tensor() = output_tensor;
++output_num;
}
return absl::OkStatus();
}
absl::StatusOr<std::vector<XlaCompiler::Argument>>
XlaComputationLaunchContext::BuildXlaCompilerArguments(
absl::Span<int const> must_be_constant_idxs,
absl::Span<const Tensor* const> inputs,
absl::Span<VariableInfo const> variable_args, Device* device) {
if (!must_be_constant_idxs.empty() &&
!absl::c_is_sorted(must_be_constant_idxs)) {
return absl::InvalidArgumentError("must_be_constant_idxs is not sorted");
}
VLOG(2) << "Must be const args: {"
<< absl::StrJoin(must_be_constant_idxs, ",") << "} out of "
<< inputs.size() << " args";
std::vector<XlaCompiler::Argument> out;
out.resize(inputs.size());
DeviceContext* device_context = nullptr;
if (device != nullptr) {
TF_RETURN_IF_ERROR(device->TryGetDeviceContext(&device_context));
bool using_default_context = false;
auto cleanup = absl::MakeCleanup([&] {
if (device_context != nullptr && !using_default_context) {
device_context->Unref();
}
});
if (device_context == nullptr) {
using_default_context = true;
auto* dev_info = device->tensorflow_accelerator_device_info();
if (dev_info) device_context = dev_info->default_context;
}
}
absl::flat_hash_map<int, const VariableInfo*> variable_info_lookup;
TF_CHECK_OK(CreateVariableInfoLookup(variable_args, variable_info_lookup));
for (int64_t input_num = 0; input_num < inputs.size(); ++input_num) {
const Tensor* input = inputs[input_num];
XlaCompiler::Argument& arg = out[input_num];
if (variable_info_lookup.count(input_num) && device != nullptr) {
TF_RET_CHECK(input->dtype() == DT_RESOURCE);
const VariableInfo& variable = *variable_info_lookup[input_num];
arg.name = std::string(variable.name());
arg.kind = XlaCompiler::Argument::kResource;
arg.resource_kind = XlaResource::kVariable;
arg.definition_stack_trace = variable.definition_stack_trace();
if (variable.var() && variable.var()->is_initialized) {
const Tensor* value = variable.var()->tensor();
arg.type = value->dtype();
arg.shape = value->shape();
arg.initialized = true;
} else {
arg.initialized = false;
arg.type = DT_INVALID;
arg.shape = TensorShape();
}
if (absl::c_binary_search(must_be_constant_idxs, input_num)) {
TF_RET_CHECK(variable.var() && variable.var()->is_initialized);
const Tensor* value = variable.var()->tensor();
Tensor value_on_host(value->dtype(), value->shape());
if (!device_context) {
value_on_host = *value;
} else {
TF_RETURN_IF_ERROR(device_context->CopyDeviceTensorToCPUSync(
value, "", device, &value_on_host));
}
arg.kind = XlaCompiler::Argument::kConstantResource;
arg.constant_value = value_on_host;
}
} else if (absl::c_binary_search(must_be_constant_idxs, input_num)) {
arg.kind = XlaCompiler::Argument::kConstant;
arg.type = input->dtype();
arg.shape = input->shape();
arg.constant_value = *input;
} else {
TF_RET_CHECK(input->dtype() != DT_RESOURCE);
if (input->NumElements() > 0) {
arg.kind = XlaCompiler::Argument::kParameter;
} else {
arg.kind = XlaCompiler::Argument::kConstant;
arg.constant_value = *input;
}
arg.type = input->dtype();
arg.shape = input->shape();
}
}
return out;
}
Status PreparePjRtExecutableArguments(
int num_missing_prefix_ctx_inputs, const std::vector<int>& input_mapping,
const std::vector<const Tensor*>& inputs,
const absl::flat_hash_map<int, const Tensor*>& variable_snapshots,
xla::PjRtClient* pjrt_client, xla::PjRtDevice* pjrt_device,
bool use_pjrt_tensor_buffer, std::vector<xla::PjRtBuffer*>* args,
std::vector<std::unique_ptr<xla::PjRtBuffer>>* owned_args,
absl::flat_hash_set<int>* non_donatable_input_indices) {
for (auto arg_num : input_mapping) {
const Tensor* tensor;
if (auto it = variable_snapshots.find(arg_num);
it != variable_snapshots.end()) {
tensor = it->second;
} else {
tensor = inputs[arg_num - num_missing_prefix_ctx_inputs];
}
AsyncValueTensor* av_tensor = AsyncValueTensor::FromTensor(tensor);
if (use_pjrt_tensor_buffer) {
if (av_tensor != nullptr) {
return absl::InvalidArgumentError(
"If use_pjrt_tensor_buffer is set, the input tensor should not "
"contain an AsyncValueTensor.");
}
const PjRtTensorBuffer* pjrt_tensor_buffer =
dynamic_cast<const PjRtTensorBuffer*>(DMAHelper::buffer(tensor));
if (pjrt_tensor_buffer != nullptr) {
args->push_back(pjrt_tensor_buffer->pjrt_buffer());
} else {
auto dmem = se::DeviceMemoryBase(
const_cast<char*>(tensor->tensor_data().data()),
tensor->tensor_data().size());
absl::Span<const std::shared_ptr<xla::BufferSequencingEvent>>
definition_events;
auto device_buffer = std::make_shared<xla::TrackedDeviceBuffer>(
nullptr, pjrt_device,
std::initializer_list<se::DeviceMemoryBase>{dmem},
definition_events, []() {});
xla::Shape device_shape;
TF_RETURN_IF_ERROR(TensorShapeToXLAShape(
tensor->dtype(), tensor->shape(), &device_shape));
std::unique_ptr<xla::PjRtBuffer> pjrt_buffer =
std::make_unique<xla::PjRtStreamExecutorBuffer>(
device_shape, std::move(device_buffer), pjrt_client,
pjrt_device,
pjrt_device->default_memory_space().value_or(nullptr));
owned_args->push_back(std::move(pjrt_buffer));
args->push_back(owned_args->back().get());
}
} else {
if (av_tensor->GetBuffer() == nullptr) {
CHECK_EQ(tensor->NumElements(), 0);
continue;
}
args->push_back(av_tensor->GetBuffer().get());
}
if (!tensor->RefCountIsOne()) {
non_donatable_input_indices->insert(args->size() - 1);
}
}
return absl::OkStatus();
}
Status PopulateCtxOutputsFromPjRtExecutableOutputs(
int num_missing_prefix_ctx_inputs, const std::vector<const Tensor*>& inputs,
const std::vector<VariableInfo>& variables,
const XlaCompiler::CompilationResult& compilation_result,
const bool use_pjrt_tensor_buffer,
std::vector<std::unique_ptr<xla::PjRtBuffer>>& executable_outputs,
OpKernelContext* ctx) {
int output_num = 0;
for (int i = 0, end = ctx->num_outputs(); i < end; ++i) {
const DataType& type = compilation_result.outputs[i].type;
VLOG(2) << "Populating output for retval " << i << " type "
<< DataTypeString(type);
if (type == DT_VARIANT) {
return absl::UnimplementedError(
"Support for TensorList crossing the XLA/TF boundary "
"is not implemented");
}
if (compilation_result.outputs[i].is_constant) {
bool requires_copy_to_device = GetDeviceType(ctx) != DEVICE_CPU;
TF_RETURN_IF_ERROR(SetOutputForConstant(ctx, requires_copy_to_device,
&compilation_result, i));
} else if (type == DT_RESOURCE) {
int input_index = compilation_result.outputs[i].input_index -
num_missing_prefix_ctx_inputs;
TF_RET_CHECK(input_index >= 0 && input_index < ctx->num_inputs())
<< "Invalid input for outputs " << i << ": " << input_index;
ctx->set_output(i, *inputs[input_index]);
} else {
xla::PjRtBuffer* output_buffer = executable_outputs[output_num].get();
if (output_buffer->IsTuple()) {
return absl::InvalidArgumentError(
"Tuple PJRT buffer output is not supported.");
}
absl::Span<const int64_t> dims;
std::optional<std::vector<int64_t>> logical_dims_storage;
if (output_buffer->has_dynamic_dimensions()) {
TF_ASSIGN_OR_RETURN(std::vector<int64_t> logical_dims,
output_buffer->logical_dimensions());
logical_dims_storage.emplace(std::move(logical_dims));
dims = *logical_dims_storage;
} else {
dims = output_buffer->dimensions();
}
TensorShape tensor_shape;
for (int i = 0; i < dims.size(); ++i) {
TF_RETURN_IF_ERROR(tensor_shape.AddDimWithStatus(dims[i]));
}
if (use_pjrt_tensor_buffer) {
TF_ASSIGN_OR_RETURN(
Tensor output_tensor,
MakeTensorFromPjRtBuffer(
type, tensor_shape, std::move(executable_outputs[output_num])));
ctx->set_output(i, output_tensor);
} else {
Tensor* output_tensor;
TF_RETURN_IF_ERROR(
ctx->allocate_output(i, tensor_shape, &output_tensor));
auto output_avt = AsyncValueTensor::FromTensor(output_tensor);
output_avt->SetBuffer(std::move(executable_outputs[output_num]));
}
++output_num;
}
}
const auto& variable_lookup = CreateVariableLookup(variables);
for (int i = 0; i < compilation_result.resource_updates.size(); ++i) {
const XlaCompiler::ResourceUpdate& write =
compilation_result.resource_updates[i];
int actual_input_index = write.input_index - num_missing_prefix_ctx_inputs;
CHECK_GE(actual_input_index, 0);
CHECK_LT(actual_input_index, ctx->num_inputs());
auto it = variable_lookup.find(actual_input_index);
if (it == variable_lookup.end()) {
continue;
}
Var* var = variables[it->second].var();
CHECK(var);
VLOG(2) << "Updating variable #" << i
<< " at input index: " << actual_input_index << " with shape "
<< write.shape.DebugString() << "; variable tensor has shape: "
<< var->tensor()->shape().DebugString();
if (var->is_initialized && var->tensor()->dtype() != write.type) {
return errors::Internal("Mismatched type in variable write");
}
if (use_pjrt_tensor_buffer) {
TF_RETURN_IF_ERROR(PjRtTensorBufferUtil::UpdateOrMakeTensorWithPjRtBuffer(
write.type, write.shape, std::move(executable_outputs[output_num]),
var->tensor()));
} else {
TF_RETURN_IF_ERROR(
ctx->allocate_temp(write.type, write.shape, var->tensor()));
AsyncValueTensor::FromTensor(var->tensor())
->SetBuffer(std::move(executable_outputs[output_num]));
}
var->is_initialized |= write.modified;
++output_num;
}
return absl::OkStatus();
}
xla::ExecuteOptions GetPjRtExecuteOptions(
const DeviceType& device_type,
absl::flat_hash_set<int> non_donatable_input_indices) {
xla::ExecuteOptions options;
options.arguments_are_tupled = false;
options.untuple_result = true;
options.launch_id = 1;
if (device_type == DEVICE_GPU) {
options.strict_shape_checking = false;
}
options.use_major_to_minor_data_layout_for_callbacks = true;
options.non_donatable_input_indices = std::move(non_donatable_input_indices);
return options;
}
DeviceType GetDeviceType(OpKernelContext* ctx) {
auto* device =
tensorflow::down_cast<Device*>(ctx->device()->UnderlyingDevice());
return DeviceType(device->device_type());
}
Status RunPjRtExecutable(
const std::vector<const Tensor*>& inputs,
const std::vector<VariableInfo>& variables,
const XlaCompiler::CompilationResult& compilation_result,
xla::PjRtClient* pjrt_client, xla::PjRtLoadedExecutable* executable,
OpKernelContext* ctx) {
absl::flat_hash_map<int, const Tensor*> variable_snapshots;
for (int i = 0; i < variables.size(); i++) {
variable_snapshots[variables[i].index()] = variables[i].var()->tensor();
}
return RunPjRtExecutable(0, inputs,
variable_snapshots, variables, compilation_result,
pjrt_client, executable, ctx);
}
Status RunPjRtExecutable(
int num_missing_prefix_ctx_inputs, const std::vector<const Tensor*>& inputs,
const absl::flat_hash_map<int, const Tensor*>& variable_snapshots,
const std::vector<VariableInfo>& updated_variables,
const XlaCompiler::CompilationResult& compilation_result,
xla::PjRtClient* pjrt_client, xla::PjRtLoadedExecutable* executable,
OpKernelContext* ctx) {
const bool use_pjrt_tensor_buffer = ctx->device()
->tensorflow_accelerator_device_info()
->use_pjrt_tensor_buffer;
const DeviceType& device_type = GetDeviceType(ctx);
const int pjrt_device_id =
tsl::GetDeviceIdFromDeviceParsedName(ctx->device()->parsed_name());
TF_ASSIGN_OR_RETURN(xla::PjRtDevice * device,
pjrt_client->LookupAddressableDevice(
xla::PjRtLocalDeviceId(pjrt_device_id)));
gpu::GpuServingDeviceSelectorResource* device_selector_resource = nullptr;
if (device_type == DEVICE_GPU) {
auto rm = ctx->resource_manager();
TF_RETURN_IF_ERROR(rm->LookupOrCreate<
gpu::GpuServingDeviceSelectorResource>(
rm->default_container(), gpu::kGpuServingDeviceSelectorResourceName,
&device_selector_resource,
[&](gpu::GpuServingDeviceSelectorResource** device_selector_resource) {
*device_selector_resource = new gpu::GpuServingDeviceSelectorResource(
pjrt_client->addressable_device_count(),
std::make_unique<tsl::RoundRobinPolicy>());
return absl::OkStatus();
}));
core::ScopedUnref device_selector_resource_ref(device_selector_resource);
TF_ASSIGN_OR_RETURN(absl::string_view fingerprint,
executable->FingerprintExecutable());
device_selector_resource->selector()->Enqueue(pjrt_device_id, fingerprint);
}
TF_ASSIGN_OR_RETURN(
std::vector<std::unique_ptr<xla::PjRtBuffer>> execute_outputs,
RunPjRtExecutable(num_missing_prefix_ctx_inputs, inputs,
variable_snapshots, updated_variables, device_type,
use_pjrt_tensor_buffer, compilation_result, device,
pjrt_client, executable));
if (device_selector_resource != nullptr) {
device_selector_resource->selector()->Completed(pjrt_device_id,
false);
}
TF_RETURN_IF_ERROR(PopulateCtxOutputsFromPjRtExecutableOutputs(
num_missing_prefix_ctx_inputs, inputs, updated_variables,
compilation_result, use_pjrt_tensor_buffer, execute_outputs, ctx));
return absl::OkStatus();
}
absl::StatusOr<std::vector<std::unique_ptr<xla::PjRtBuffer>>> RunPjRtExecutable(
int num_missing_prefix_ctx_inputs, const std::vector<const Tensor*>& inputs,
const absl::flat_hash_map<int, const Tensor*>& variable_snapshots,
const std::vector<VariableInfo>& updated_variables,
const DeviceType& device_type, bool use_pjrt_tensor_buffer,
const XlaCompiler::CompilationResult& compilation_result,
xla::PjRtDevice* device, xla::PjRtClient* pjrt_client,
xla::PjRtLoadedExecutable* executable) {
std::vector<xla::PjRtBuffer*> executable_args;
executable_args.reserve(compilation_result.input_mapping.size());
std::vector<std::unique_ptr<xla::PjRtBuffer>> owned_executable_args;
absl::flat_hash_set<int> non_donatable_input_indices;
TF_RETURN_IF_ERROR(PreparePjRtExecutableArguments(
num_missing_prefix_ctx_inputs, compilation_result.input_mapping, inputs,
variable_snapshots, pjrt_client, device, use_pjrt_tensor_buffer,
&executable_args, &owned_executable_args, &non_donatable_input_indices));
std::vector<std::unique_ptr<xla::PjRtBuffer>> execute_outputs;
std::optional<xla::PjRtFuture<>> future;
if (executable->num_replicas() != 1 || executable->num_partitions() != 1) {
TF_ASSIGN_OR_RETURN(
execute_outputs,
executable->ExecuteSharded(
executable_args, device,
GetPjRtExecuteOptions(device_type,
std::move(non_donatable_input_indices)),
future));
} else {
TF_ASSIGN_OR_RETURN(
execute_outputs,
executable->ExecutePortable(
executable_args, device,
GetPjRtExecuteOptions(device_type,
std::move(non_donatable_input_indices)),
future));
}
if (!owned_executable_args.empty() && future.has_value()) {
future->OnReady([owned_executable_args =
std::move(owned_executable_args)](Status s) {});
}
return execute_outputs;
}
} | #include "tensorflow/compiler/jit/xla_launch_util.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/compiler/jit/device_compiler.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/jit/pjrt_device_compiler_client.h"
#include "tensorflow/compiler/jit/variable_info.h"
#include "tensorflow/compiler/jit/variable_info_util.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/pjrt/tfrt_cpu_pjrt_client.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tsl/framework/allocator.h"
#include "xla/tsl/framework/device_id_utils.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/tfrt/common/create_pjrt_client_util.h"
#include "tensorflow/core/tfrt/common/pjrt_util.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
using PjRtDeviceCompiler =
DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>;
using PjRtDeviceExecutablePersistor =
DeviceExecutablePersistor<xla::PjRtLoadedExecutable, xla::PjRtClient>;
absl::flat_hash_map<int, const Tensor*> GetVariableSnapshots(
const std::vector<VariableInfo>& variables) {
absl::flat_hash_map<int, const Tensor*> variable_snapshots;
for (int i = 0; i < variables.size(); i++) {
variable_snapshots[variables[i].index()] = variables[i].var()->tensor();
}
return variable_snapshots;
}
class PjRtExecutionUtilTest : public OpsTestBase {
public:
PjRtExecutionUtilTest() {
auto& rollout_config = GetXlaOpsCommonFlags()->tf_xla_use_device_api;
rollout_config.enabled_for_xla_launch_ = true;
rollout_config.enabled_for_compile_on_demand_ = true;
GetXlaDeviceFlags()->tf_xla_enable_xla_devices = true;
auto device_type = DeviceType(DEVICE_XLA_CPU);
rollout_config.AllowForDeviceInXlaLaunch(device_type);
rollout_config.AllowForDeviceInXlaCompileOnDemand(device_type);
auto jit_device_type = DeviceType(DEVICE_CPU_XLA_JIT);
auto device =
DeviceFactory::NewDevice(device_type.type_string(), SessionOptions(),
"/job:localhost/replica:0/task:0");
device_ = device.get();
SetDevice(device_type, std::move(device));
TF_CHECK_OK(SetPjRtClientInTFGlobalResourceManager(
device_type,
xla::GetTfrtCpuClient(true, 1)
.value()));
TF_CHECK_OK(device_->TryGetDeviceContext(&device_context_));
AllocatorAttributes host_alloc_attr;
host_alloc_attr.set_on_host(true);
host_allocator_ = device_->GetAllocator(host_alloc_attr);
AllocatorAttributes device_alloc_attr;
device_alloc_attr.set_on_host(false);
device_allocator_ = device_->GetAllocator(device_alloc_attr);
auto pjrt_client_or = GetOrCreatePjRtClient(device_type_);
TF_CHECK_OK(pjrt_client_or.status());
pjrt_client_ = pjrt_client_or.value();
device_compiler_ = new PjRtDeviceCompiler(
std::make_unique<PjRtDeviceExecutablePersistor>(
PjRtDeviceExecutablePersistor::Config(), jit_device_type),
std::make_unique<PjRtDeviceCompilerClient>(pjrt_client_));
profiler_ = new DeviceCompilationProfiler();
compiler_options_.device_type = jit_device_type;
compiler_options_.client = nullptr;
compiler_options_.flib_def = flib_def_.get();
}
~PjRtExecutionUtilTest() override {
for (const auto& tensor : tensors_) {
delete tensor;
}
tensors_.clear();
device_context_->Unref();
core::ScopedUnref device_compiler_ref(device_compiler_);
core::ScopedUnref profiler_ref(profiler_);
}
template <typename T>
Tensor* CreateHostTensor(const TensorShape& shape,
const gtl::ArraySlice<T> data) {
Tensor* host_tensor =
new Tensor(host_allocator_, DataTypeToEnum<T>::v(), shape);
test::FillValues<T>(host_tensor, data);
tensors_.push_back(host_tensor);
return host_tensor;
}
template <typename T>
Tensor* CreateDeviceTensor(const TensorShape& shape,
const gtl::ArraySlice<T> data) {
Tensor* host_tensor = CreateHostTensor<T>(shape, data);
Tensor* device_tensor =
new Tensor(device_allocator_, DataTypeToEnum<T>::v(), shape);
TF_EXPECT_OK(device_context_->CopyCPUTensorToDeviceSync(
host_tensor, device_, device_tensor));
tensors_.push_back(device_tensor);
return device_tensor;
}
Tensor* GetOutput(int output_index) {
CHECK_LT(output_index, context_->num_outputs());
Tensor* device_tensor = context_->mutable_output(output_index);
managed_outputs_.resize(context_->num_outputs());
if (managed_outputs_[output_index]) {
return managed_outputs_[output_index];
}
Tensor* host_tensor = new Tensor(host_allocator_, device_tensor->dtype(),
device_tensor->shape());
TF_EXPECT_OK(device_context_->CopyDeviceTensorToCPUSync(
device_tensor, "", device_, host_tensor));
managed_outputs_[output_index] = host_tensor;
return host_tensor;
}
void CompileToExecutable(const std::vector<XlaCompiler::Argument>& args,
const XlaCompiler::CompilationResult** result,
xla::PjRtLoadedExecutable** executable,
XlaCompiler::CompileOptions compile_options = {}) {
TF_EXPECT_OK(device_compiler_->CompileSingleOpIfNeeded(
compiler_options_, args, compile_options, context_.get(), profiler_,
result, executable));
}
absl::StatusOr<std::vector<std::unique_ptr<xla::PjRtBuffer>>> RunExecutable(
const std::vector<const Tensor*>& inputs,
const std::vector<VariableInfo>& variables,
const XlaCompiler::CompilationResult* result,
xla::PjRtLoadedExecutable* executable) {
TF_ASSIGN_OR_RETURN(auto pjrt_device,
pjrt_client_->LookupAddressableDevice(
xla::PjRtLocalDeviceId(device_->parsed_name().id)));
std::vector<xla::PjRtBuffer*> executable_args;
executable_args.reserve(result->input_mapping.size());
absl::flat_hash_set<int> non_donatable_input_indices;
TF_EXPECT_OK(PreparePjRtExecutableArguments(
0, result->input_mapping, inputs,
GetVariableSnapshots(variables), nullptr,
nullptr, false,
&executable_args, {}, &non_donatable_input_indices));
xla::ExecuteOptions exe_options;
exe_options.arguments_are_tupled = false;
exe_options.untuple_result = true;
return executable->ExecutePortable(executable_args, pjrt_device,
exe_options);
}
template <typename T>
Var* CreateVariable(const string& name, const TensorShape& shape,
const gtl::ArraySlice<T> data) {
Tensor* init_var_value = CreateDeviceTensor<T>(shape, data);
Var* var = new Var(DataTypeToEnum<T>::v());
*var->tensor() = *init_var_value;
var->is_initialized = true;
return var;
}
template <typename T>
void AddVariableInput(const string& name, const TensorShape& shape,
const gtl::ArraySlice<T> data) {
Var* var = CreateVariable<T>(name, shape, data);
ResourceMgr* rm = device_->resource_manager();
TF_ASSERT_OK(rm->Create(rm->default_container(), name, var));
ResourceHandle handle;
handle.set_device(device_->name());
handle.set_container(rm->default_container());
handle.set_name(name);
TypeIndex type_index = TypeIndex::Make<Var>();
handle.set_hash_code(type_index.hash_code());
handle.set_maybe_type_name(type_index.name());
Tensor* input = new Tensor(host_allocator_, DT_RESOURCE, TensorShape({}));
input->scalar<ResourceHandle>()() = handle;
tensors_.push_back(input);
inputs_.push_back({nullptr, input});
}
protected:
DeviceContext* device_context_;
Allocator* host_allocator_;
Allocator* device_allocator_;
XlaCompiler::Options compiler_options_;
xla::PjRtClient* pjrt_client_;
PjRtDeviceCompiler* device_compiler_;
DeviceCompilationProfiler* profiler_;
};
TEST_F(PjRtExecutionUtilTest, PreparePjRtExecutableArguments) {
std::vector<const Tensor*> inputs;
inputs.push_back(CreateDeviceTensor<int32_t>(TensorShape({1, 3}), {0, 0, 0}));
inputs.push_back(CreateDeviceTensor<int32_t>(TensorShape({1, 3}), {1, 2, 3}));
inputs.push_back(CreateDeviceTensor<int32_t>(TensorShape({1, 3}), {4, 5, 6}));
int num_missing_prefix_ctx_inputs = 2;
std::vector<int> input_mapping{3, 4};
std::vector<VariableInfo> variables;
std::vector<xla::PjRtBuffer*> exec_args;
exec_args.reserve(input_mapping.size());
absl::flat_hash_set<int> non_donatable_input_indices;
TF_EXPECT_OK(PreparePjRtExecutableArguments(
num_missing_prefix_ctx_inputs, input_mapping, inputs,
GetVariableSnapshots(variables),
nullptr, nullptr,
false, &exec_args,
{}, &non_donatable_input_indices));
EXPECT_EQ(exec_args.size(), 2);
std::shared_ptr<xla::Literal> literal1 = *exec_args[0]->ToLiteralSync();
EXPECT_TRUE(xla::LiteralTestUtil::Equal(
*literal1, xla::LiteralUtil::CreateR2<int32_t>({{1, 2, 3}})));
std::shared_ptr<xla::Literal> literal2 = *exec_args[1]->ToLiteralSync();
EXPECT_TRUE(xla::LiteralTestUtil::Equal(
*literal2, xla::LiteralUtil::CreateR2<int32_t>({{4, 5, 6}})));
}
TEST_F(PjRtExecutionUtilTest, PreparePjRtExecutableArgumentsVariableInputs) {
std::vector<VariableInfo> variables;
Var* var1 = CreateVariable<int32>("v1", TensorShape({1, 2}), {1, 2});
variables.emplace_back(3, "v1", var1);
Var* var2 = CreateVariable<int32>("v2", TensorShape({1, 2}), {3, 4});
variables.emplace_back(4, "v2", var2);
std::vector<const Tensor*> inputs;
inputs.push_back(CreateDeviceTensor<int32_t>(TensorShape({1, 3}), {0, 0, 0}));
int num_missing_prefix_ctx_inputs = 2;
std::vector<int> input_mapping{3, 4};
std::vector<xla::PjRtBuffer*> exec_args;
exec_args.reserve(input_mapping.size());
absl::flat_hash_set<int> non_donatable_input_indices;
TF_EXPECT_OK(PreparePjRtExecutableArguments(
num_missing_prefix_ctx_inputs, input_mapping, inputs,
GetVariableSnapshots(variables),
nullptr, nullptr,
false, &exec_args,
{}, &non_donatable_input_indices));
EXPECT_EQ(exec_args.size(), 2);
std::shared_ptr<xla::Literal> literal1 = *exec_args[0]->ToLiteralSync();
EXPECT_TRUE(xla::LiteralTestUtil::Equal(
*literal1, xla::LiteralUtil::CreateR2<int32_t>({{1, 2}})));
std::shared_ptr<xla::Literal> literal2 = *exec_args[1]->ToLiteralSync();
EXPECT_TRUE(xla::LiteralTestUtil::Equal(
*literal2, xla::LiteralUtil::CreateR2<int32_t>({{3, 4}})));
}
TEST_F(PjRtExecutionUtilTest, PopulateCtxOutputs) {
XlaOpRegistry::RegisterCompilationKernels();
TF_EXPECT_OK(NodeDefBuilder("AddV2", "AddV2")
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT32))
.Attr("T", DT_INT32)
.Device("/job:localhost/replica:0/task:0/device:XLA_CPU:0")
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
Tensor* a = CreateDeviceTensor<int32>(TensorShape({1, 3}), {1, 2, 3});
Tensor* b = CreateDeviceTensor<int32>(TensorShape({1, 3}), {4, 5, 6});
inputs_.push_back({nullptr, a});
inputs_.push_back({nullptr, b});
CreateContext();
std::vector<XlaCompiler::Argument> args(2);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].type = DT_INT32;
args[0].shape = TensorShape({1, 3});
args[1].kind = XlaCompiler::Argument::kParameter;
args[1].type = DT_INT32;
args[1].shape = TensorShape({1, 3});
const XlaCompiler::CompilationResult* result;
xla::PjRtLoadedExecutable* executable;
CompileToExecutable(args, &result, &executable);
std::vector<const Tensor*> inputs;
inputs.push_back(a);
inputs.push_back(b);
TF_ASSERT_OK_AND_ASSIGN(auto execute_outputs,
RunExecutable(inputs, {}, result, executable));
TF_EXPECT_OK(PopulateCtxOutputsFromPjRtExecutableOutputs(
0, inputs, {}, *result,
false, execute_outputs, context_.get()));
Tensor* expected = CreateHostTensor<int32>(TensorShape({1, 3}), {5, 7, 9});
test::ExpectTensorEqual<int32>(*expected, *GetOutput(0));
}
TEST_F(PjRtExecutionUtilTest, PopulateCtxOutputsDynamicShape) {
XlaOpRegistry::RegisterCompilationKernels();
TF_EXPECT_OK(NodeDefBuilder("testWhere", "Where")
.Input(FakeInput(DT_FLOAT))
.Attr("T", DT_FLOAT)
.Device("/job:localhost/replica:0/task:0/device:XLA_CPU:0")
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
Tensor* a =
CreateDeviceTensor<float>(TensorShape({2, 3}), {0., 1., 1., 0., 0., 0.});
inputs_.push_back({nullptr, a});
CreateContext();
std::vector<XlaCompiler::Argument> args(1);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].type = DT_FLOAT;
args[0].shape = TensorShape({2, 3});
const XlaCompiler::CompilationResult* result;
xla::PjRtLoadedExecutable* executable;
CompileToExecutable(args, &result, &executable);
std::vector<const Tensor*> inputs;
inputs.push_back(a);
TF_ASSERT_OK_AND_ASSIGN(auto execute_outputs,
RunExecutable(inputs, {}, result, executable));
TF_EXPECT_OK(PopulateCtxOutputsFromPjRtExecutableOutputs(
0, inputs, {}, *result,
false, execute_outputs, context_.get()));
Tensor* expected = CreateHostTensor<int64>(TensorShape({2, 2}), {0, 1, 0, 2});
test::ExpectTensorEqual<int64>(*expected, *GetOutput(0));
}
TEST_F(PjRtExecutionUtilTest, PopulateCtxOutputsVariableInputs) {
XlaOpRegistry::RegisterCompilationKernels();
TF_EXPECT_OK(NodeDefBuilder("AddV2", "AddV2")
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT32))
.Attr("T", DT_INT32)
.Device("/job:localhost/replica:0/task:0/device:XLA_CPU:0")
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddVariableInput<int32>("var1", TensorShape({1, 2}), {1, 2});
AddVariableInput<int32>("var2", TensorShape({1, 2}), {3, 4});
CreateContext();
std::vector<XlaCompiler::Argument> args(2);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].initialized = true;
args[0].type = DT_INT32;
args[0].shape = TensorShape({1, 2});
args[1].kind = XlaCompiler::Argument::kParameter;
args[1].initialized = true;
args[1].type = DT_INT32;
args[1].shape = TensorShape({1, 2});
const XlaCompiler::CompilationResult* result;
xla::PjRtLoadedExecutable* executable;
CompileToExecutable(args, &result, &executable);
std::vector<const Tensor*> inputs = InputsFromContext(context_.get());
std::vector<int> variables_indices =
GetResourceVariableIndicesFromContext(context_.get());
std::vector<VariableInfo> variables;
variables.reserve(variables_indices.size());
TF_ASSERT_OK(GetVariableInfosFromInputs(context_->resource_manager(),
context_->device(), inputs,
variables_indices, &variables));
TF_ASSERT_OK_AND_ASSIGN(auto execute_outputs,
RunExecutable(inputs, variables, result, executable));
TF_EXPECT_OK(PopulateCtxOutputsFromPjRtExecutableOutputs(
0, inputs, variables, *result,
false, execute_outputs, context_.get()));
Tensor* expected = CreateHostTensor<int32>(TensorShape({1, 2}), {4, 6});
test::ExpectTensorEqual<int32>(*expected, *GetOutput(0));
}
TEST_F(PjRtExecutionUtilTest, PopulateCtxOutputsResourceUpdates) {
XlaOpRegistry::RegisterCompilationKernels();
TF_EXPECT_OK(NodeDefBuilder("AssignAddVariableOp", "AssignAddVariableOp")
.Input(FakeInput(DT_RESOURCE))
.Input(FakeInput(DT_INT32))
.Attr("dtype", DT_INT32)
.Device("/job:localhost/replica:0/task:0/device:XLA_CPU:0")
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddVariableInput<int32>("var", TensorShape({1, 3}), {1, 2, 3});
Tensor* a = CreateDeviceTensor<int32>(TensorShape({1, 3}), {2, 2, 2});
inputs_.push_back({nullptr, a});
CreateContext();
std::vector<const Tensor*> inputs = InputsFromContext(context_.get());
std::vector<int> variables_indices =
GetResourceVariableIndicesFromContext(context_.get());
std::vector<VariableInfo> variables;
variables.reserve(variables_indices.size());
TF_ASSERT_OK(GetVariableInfosFromInputs(context_->resource_manager(),
context_->device(), inputs,
variables_indices, &variables));
TF_ASSERT_OK_AND_ASSIGN(std::vector<int> constant_input_indices,
GetConstantInputIndicesFromContext(context_.get()));
TF_ASSERT_OK(LockVariables(absl::MakeSpan(variables)));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<XlaCompiler::Argument> args,
XlaComputationLaunchContext::BuildXlaCompilerArguments(
constant_input_indices, inputs, variables,
static_cast<Device*>(context_->device())));
const XlaCompiler::CompilationResult* result;
xla::PjRtLoadedExecutable* executable;
CompileToExecutable(args, &result, &executable);
TF_ASSERT_OK_AND_ASSIGN(auto execute_outputs,
RunExecutable(inputs, variables, result, executable));
TF_EXPECT_OK(PopulateCtxOutputsFromPjRtExecutableOutputs(
0, inputs, variables, *result,
false, execute_outputs, context_.get()));
EXPECT_EQ(context_->num_outputs(), 0);
ResourceMgr* rm = device_->resource_manager();
Var* var = nullptr;
TF_ASSERT_OK(rm->Lookup(rm->default_container(), "var", &var));
core::ScopedUnref var_ref(var);
Tensor* device_tensor = var->tensor();
Tensor* host_tensor = new Tensor(host_allocator_, device_tensor->dtype(),
device_tensor->shape());
tensors_.push_back(host_tensor);
TF_ASSERT_OK(device_context_->CopyDeviceTensorToCPUSync(
device_tensor, "", device_, host_tensor));
Tensor* expected = CreateHostTensor<int32>(TensorShape({1, 3}), {3, 4, 5});
test::ExpectTensorEqual<int32>(*expected, *host_tensor);
}
TEST(XlaLaunchUtilTest, GetPjRtExecuteOptions) {
xla::ExecuteOptions options =
GetPjRtExecuteOptions(DeviceType(DEVICE_GPU), {});
EXPECT_FALSE(options.arguments_are_tupled);
EXPECT_TRUE(options.untuple_result);
EXPECT_FALSE(options.strict_shape_checking);
EXPECT_TRUE(options.use_major_to_minor_data_layout_for_callbacks);
}
TEST_F(PjRtExecutionUtilTest, RunPjRtExecutable) {
XlaOpRegistry::RegisterCompilationKernels();
TF_EXPECT_OK(NodeDefBuilder("AddV2", "AddV2")
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT32))
.Attr("T", DT_INT32)
.Device("/job:localhost/replica:0/task:0/device:XLA_CPU:0")
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddVariableInput<int32>("var1", TensorShape({1, 2}), {1, 2});
AddVariableInput<int32>("var2", TensorShape({1, 2}), {3, 4});
CreateContext();
std::vector<XlaCompiler::Argument> args(2);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].initialized = true;
args[0].type = DT_INT32;
args[0].shape = TensorShape({1, 2});
args[1].kind = XlaCompiler::Argument::kParameter;
args[1].initialized = true;
args[1].type = DT_INT32;
args[1].shape = TensorShape({1, 2});
const XlaCompiler::CompilationResult* result;
xla::PjRtLoadedExecutable* executable;
CompileToExecutable(args, &result, &executable);
std::vector<const Tensor*> inputs = InputsFromContext(context_.get());
std::vector<int> variables_indices =
GetResourceVariableIndicesFromContext(context_.get());
std::vector<VariableInfo> variables;
variables.reserve(variables_indices.size());
TF_ASSERT_OK(GetVariableInfosFromInputs(context_->resource_manager(),
context_->device(), inputs,
variables_indices, &variables));
TF_ASSERT_OK(RunPjRtExecutable(inputs, variables, *result, pjrt_client_,
executable, context_.get()));
Tensor* expected = CreateHostTensor<int32>(TensorShape({1, 2}), {4, 6});
test::ExpectTensorEqual<int32>(*expected, *GetOutput(0));
}
TEST_F(PjRtExecutionUtilTest,
RunPjRtExecutableWithVariableSnapshotsAndMissingInputs) {
XlaOpRegistry::RegisterCompilationKernels();
TF_EXPECT_OK(NodeDefBuilder("Fill", "Fill")
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT32))
.Attr("index_type", DT_INT32)
.Attr("T", DT_INT32)
.Device("/job:localhost/replica:0/task:0/device:XLA_CPU:0")
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
Tensor* dims = CreateHostTensor<int32>(TensorShape({1}), {2});
Tensor* value = CreateDeviceTensor<int32>(TensorShape(), {1});
inputs_.push_back({nullptr, dims});
inputs_.push_back({nullptr, value});
CreateContext();
TF_ASSERT_OK_AND_ASSIGN(std::vector<int> constant_input_indices,
GetConstantInputIndicesFromContext(context_.get()));
EXPECT_EQ(constant_input_indices.size(), 1);
std::vector<const Tensor*> inputs = InputsFromContext(context_.get());
std::vector<int> variables_indices =
GetResourceVariableIndicesFromContext(context_.get());
absl::flat_hash_map<int, const Tensor*> variable_snapshots;
const XlaCompiler::CompilationResult* result;
xla::PjRtLoadedExecutable* executable;
{
std::vector<VariableInfo> variables;
variables.reserve(variables_indices.size());
TF_ASSERT_OK(GetVariableInfosFromInputs(context_->resource_manager(),
context_->device(), inputs,
variables_indices, &variables));
TF_ASSERT_OK(LockVariables(absl::MakeSpan(variables)));
variable_snapshots = GetVariableSnapshots(variables);
TF_ASSERT_OK_AND_ASSIGN(
std::vector<XlaCompiler::Argument> args,
XlaComputationLaunchContext::BuildXlaCompilerArguments(
constant_input_indices, inputs, variables,
static_cast<Device*>(context_->device())));
CompileToExecutable(args, &result, &executable);
}
inputs = {inputs.begin() + constant_input_indices.size(), inputs.end()};
{
TF_ASSERT_OK_AND_ASSIGN(std::vector<VariableInfo> updated_variables,
GatherVariableInfo(context_.get(), *result,
constant_input_indices.size()));
TF_ASSERT_OK(LockVariables(absl::MakeSpan(updated_variables)));
TF_ASSERT_OK(RunPjRtExecutable(
constant_input_indices.size(), inputs, variable_snapshots,
updated_variables, *result, pjrt_client_, executable, context_.get()));
}
Tensor* expected = CreateHostTensor<int32>(TensorShape({2}), {1, 1});
test::ExpectTensorEqual<int32>(*expected, *GetOutput(0));
}
TEST_F(PjRtExecutionUtilTest, RunPjRtExecutableWithoutCtx) {
XlaOpRegistry::RegisterCompilationKernels();
TF_ASSERT_OK(NodeDefBuilder("AddV2", "AddV2")
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT32))
.Attr("T", DT_INT32)
.Device("/job:localhost/replica:0/task:0/device:XLA_CPU:0")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddVariableInput<int32>("var1", TensorShape({1, 2}), {1, 2});
AddVariableInput<int32>("var2", TensorShape({1, 2}), {3, 4});
CreateContext();
std::vector<XlaCompiler::Argument> args(2);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].initialized = true;
args[0].type = DT_INT32;
args[0].shape = TensorShape({1, 2});
args[1].kind = XlaCompiler::Argument::kParameter;
args[1].initialized = true;
args[1].type = DT_INT32;
args[1].shape = TensorShape({1, 2});
const XlaCompiler::CompilationResult* result;
xla::PjRtLoadedExecutable* executable;
CompileToExecutable(args, &result, &executable);
std::vector<const Tensor*> inputs = InputsFromContext(context_.get());
std::vector<int> variables_indices =
GetResourceVariableIndicesFromContext(context_.get());
std::vector<VariableInfo> variables;
variables.reserve(variables_indices.size());
TF_ASSERT_OK(GetVariableInfosFromInputs(context_->resource_manager(),
context_->device(), inputs,
variables_indices, &variables));
const bool use_pjrt_tensor_buffer = context_->device()
->tensorflow_accelerator_device_info()
->use_pjrt_tensor_buffer;
const DeviceType& device_type = GetDeviceType(context_.get());
const int pjrt_device_id =
tsl::GetDeviceIdFromDeviceParsedName(context_->device()->parsed_name());
TF_ASSERT_OK_AND_ASSIGN(xla::PjRtDevice * pjrt_device,
pjrt_client_->LookupAddressableDevice(
xla::PjRtLocalDeviceId(pjrt_device_id)));
absl::flat_hash_map<int, const Tensor*> variable_snapshots;
for (int i = 0; i < variables.size(); i++) {
variable_snapshots[variables[i].index()] = variables[i].var()->tensor();
}
TF_ASSERT_OK_AND_ASSIGN(
std::vector<std::unique_ptr<xla::PjRtBuffer>> execute_outputs,
RunPjRtExecutable(0, inputs,
variable_snapshots, variables, device_type,
use_pjrt_tensor_buffer, *result, pjrt_device,
pjrt_client_, executable));
for (const auto& output : execute_outputs) {
TF_ASSERT_OK(output->GetReadyFuture().Await());
}
ASSERT_EQ(execute_outputs.size(), 1);
std::shared_ptr<xla::Literal> literal = *execute_outputs[0]->ToLiteralSync();
EXPECT_TRUE(xla::LiteralTestUtil::Equal(
*literal, xla::LiteralUtil::CreateR2<int32_t>({{4, 6}})));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/xla_launch_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/xla_launch_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1049daff-5780-4d51-8e50-1ccb85757684 | cpp | tensorflow/tensorflow | increase_dynamism_for_auto_jit_pass | tensorflow/compiler/jit/increase_dynamism_for_auto_jit_pass.cc | tensorflow/compiler/jit/increase_dynamism_for_auto_jit_pass_test.cc | #include "tensorflow/compiler/jit/increase_dynamism_for_auto_jit_pass.h"
#include <iterator>
#include "absl/algorithm/container.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/framework/scope_internal.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "xla/status_macros.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/core/util/dump_graph.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
template <class T>
using StatusOrOptional = StatusOr<std::optional<T>>;
StatusOrOptional<Tensor> TryToGetTensorFromConstOp(Node* n) {
if (n->type_string() != "Const") {
return {std::nullopt};
}
const TensorProto* proto = nullptr;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "value", &proto));
Tensor tensor(proto->dtype());
TF_RET_CHECK(tensor.FromProto(*proto));
return {tensor};
}
struct SliceInputs {
Output slice_op;
Output input;
Output begin;
Output size;
std::vector<int64_t> size_as_vector;
};
std::vector<int64_t> IntTensorAsVector(const Tensor& t) {
DCHECK(t.dtype() == DT_INT32 || t.dtype() == DT_INT64);
std::vector<int64_t> result;
result.reserve(t.NumElements());
for (int i = 0; i < t.NumElements(); i++) {
int64_t element = t.dtype() == DT_INT32
? static_cast<int64_t>(t.flat<int32>()(i))
: t.flat<int64_t>()(i);
result.push_back(element);
}
return result;
}
StatusOrOptional<SliceInputs> GetSliceInputs(Node* slice) {
const int kSliceInputIndex = 0;
const int kSliceBeginIndex = 1;
const int kSliceSizeIndex = 2;
const Edge* slice_input_edge;
TF_RETURN_IF_ERROR(slice->input_edge(kSliceInputIndex, &slice_input_edge));
const Edge* slice_size_edge;
TF_RETURN_IF_ERROR(slice->input_edge(kSliceSizeIndex, &slice_size_edge));
const Edge* slice_begin_edge;
TF_RETURN_IF_ERROR(slice->input_edge(kSliceBeginIndex, &slice_begin_edge));
SliceInputs slice_inputs;
slice_inputs.input =
Output(slice_input_edge->src(), slice_input_edge->src_output());
slice_inputs.begin =
Output(slice_begin_edge->src(), slice_begin_edge->src_output());
slice_inputs.size =
Output(slice_size_edge->src(), slice_size_edge->src_output());
TF_ASSIGN_OR_RETURN(std::optional<Tensor> tf_slice_size,
TryToGetTensorFromConstOp(slice_inputs.size.node()));
if (!tf_slice_size.has_value()) {
return {std::nullopt};
}
if (tf_slice_size->dims() != 1) {
return {std::nullopt};
}
slice_inputs.size_as_vector = IntTensorAsVector(*tf_slice_size);
return {slice_inputs};
}
Output MakeInt64(const Scope& host_scope, absl::string_view name,
const Output& x) {
return x.type() == DT_INT64
? x
: ops::Cast(host_scope.WithOpName(name, "_s64"), x, DT_INT64);
}
SliceInputs MakeSliceIndexAndSizeInt64(const Scope& host_scope,
const SliceInputs& slice_inputs) {
SliceInputs result;
result.input = slice_inputs.input;
result.begin = MakeInt64(host_scope, "begin", slice_inputs.begin);
result.size = MakeInt64(host_scope, "size", slice_inputs.size);
result.size_as_vector = slice_inputs.size_as_vector;
return result;
}
class ConstantCache {
public:
explicit ConstantCache(const Scope& s,
const std::vector<const Edge*>& control_deps)
: scope_(s), control_deps_(control_deps) {}
Output Get1DHostConstant(int64_t constant) {
auto it = cache_.find(constant);
if (it == cache_.end()) {
Output new_const =
ops::Const(scope_.WithOpName("const_", constant), {constant});
it = cache_.insert({constant, new_const}).first;
for (const Edge* e : control_deps_) {
scope_.graph()->AddControlEdge(e->src(), new_const.node());
}
}
return it->second;
}
private:
Scope scope_;
std::unordered_map<int, Output> cache_;
std::vector<const Edge*> control_deps_;
};
Status ComputeSliceSize(const Scope& host_scope,
const SliceInputs& slice_inputs,
std::vector<const Edge*> control_deps, Output* size) {
if (absl::c_all_of(slice_inputs.size_as_vector,
[](int64_t i) { return i >= 0; })) {
*size = slice_inputs.size;
return absl::OkStatus();
}
Output input_shape =
ops::Shape(host_scope.WithOpName("input_shape"), slice_inputs.input,
ops::Shape::OutType(DT_INT64));
ConstantCache constant_pool(host_scope, control_deps);
std::vector<Output> slice_size;
for (int i = 0, end = slice_inputs.size_as_vector.size(); i < end; i++) {
if (slice_inputs.size_as_vector[i] >= 0) {
slice_size.push_back(
constant_pool.Get1DHostConstant(slice_inputs.size_as_vector[i]));
continue;
}
DCHECK_EQ(slice_inputs.size_as_vector[i], -1);
Output begin_i = ops::Slice(
host_scope.WithOpName("begin_", i), slice_inputs.begin,
constant_pool.Get1DHostConstant(i), constant_pool.Get1DHostConstant(1));
Output input_shape_i = ops::Slice(
host_scope.WithOpName("input_shape_", i), input_shape,
constant_pool.Get1DHostConstant(i), constant_pool.Get1DHostConstant(1));
slice_size.push_back(ops::Sub(host_scope.WithOpName("slice_size_", i),
input_shape_i, begin_i));
DCHECK_EQ(slice_size.back().type(), DT_INT64);
}
if (slice_size.size() == 1) {
*size = slice_size[0];
} else {
auto concat_axis = ops::Const(host_scope.WithOpName("concat_axis"), 0);
for (const Edge* e : control_deps) {
host_scope.graph()->AddControlEdge(e->src(), concat_axis.node());
}
*size = ops::Concat(host_scope.WithOpName("slice_size"), slice_size,
concat_axis);
}
return absl::OkStatus();
}
Status ConvertTensorFlowSliceToStaticShapedSlice(
Graph* g, Node* slice, const SliceInputs& slice_inputs,
absl::string_view cluster_name, Node** result) {
string host_name;
TF_RETURN_IF_ERROR(DeviceNameUtils::DeviceNameToCpuDeviceName(
slice->assigned_device_name(), &host_name));
Status status;
Scope main_scope =
NewInternalScope(g, &status, nullptr)
.WithXlaCluster(string(cluster_name))
.NewSubScope(absl::StrCat(slice->name(), "/static_shaped_slice"));
Scope host_scope = main_scope.WithAssignedDevice(host_name);
SliceInputs slice_inputs_int64 =
MakeSliceIndexAndSizeInt64(host_scope, slice_inputs);
Node* old_size;
std::vector<const Edge*> old_size_ctrl_deps;
TF_RETURN_IF_ERROR(slice->input_node(2, &old_size));
absl::c_copy_if(old_size->in_edges(), std::back_inserter(old_size_ctrl_deps),
[](const Edge* e) { return e->IsControlEdge(); });
Output slice_size;
TF_RETURN_IF_ERROR(ComputeSliceSize(host_scope, slice_inputs_int64,
old_size_ctrl_deps, &slice_size));
*result =
ops::Slice(main_scope.WithAssignedDevice(slice->assigned_device_name())
.WithOpName("static_shaped_slice"),
slice_inputs_int64.input, slice_inputs_int64.begin, slice_size)
.node();
TF_RETURN_IF_ERROR(main_scope.status());
std::vector<string> compile_time_const_inputs;
compile_time_const_inputs.push_back("size");
(*result)->AddAttr(kXlaCompileTimeConstantInputsAttr,
compile_time_const_inputs);
return status;
}
void ReplaceTensorFlowSliceWithStaticShapedSlice(Graph* g, Node* slice,
Node* static_shaped_slice) {
std::vector<const Edge*> slice_out_edges;
absl::c_copy(slice->out_edges(), std::back_inserter(slice_out_edges));
for (const Edge* e : slice_out_edges) {
DCHECK(e->src_output() == 0 || e->src_output() == Graph::kControlSlot);
int src_output = e->src_output();
int dst_input = e->dst_input();
Node* dst = e->dst();
g->RemoveEdge(e);
g->AddEdge(static_shaped_slice, src_output, dst, dst_input);
}
for (const Edge* e : slice->in_edges()) {
if (e->IsControlEdge()) {
g->AddControlEdge(e->src(), static_shaped_slice);
}
}
g->RemoveNode(slice);
}
Status RewriteSlice(Graph* g, Node* slice, const SliceInputs& slice_inputs,
absl::string_view cluster_name) {
VLOG(3) << "Rewriting slice " << slice->name()
<< " to a \"static shaped\" Slice";
Node* static_shaped_slice;
TF_RETURN_IF_ERROR(ConvertTensorFlowSliceToStaticShapedSlice(
g, slice, slice_inputs, cluster_name, &static_shaped_slice));
ReplaceTensorFlowSliceWithStaticShapedSlice(g, slice, static_shaped_slice);
return absl::OkStatus();
}
absl::StatusOr<bool> ShouldRewriteSlice(Node* n) {
if (n->type_string() != "Slice") {
return false;
}
if (!GetXlaClusterForNode(*n).has_value()) {
return false;
}
TF_ASSIGN_OR_RETURN(std::optional<SliceInputs> slice_inputs,
GetSliceInputs(n));
if (!slice_inputs.has_value()) {
return false;
}
bool slice_size_has_error =
absl::c_all_of(slice_inputs->size_as_vector,
[](int64_t size_i) { return size_i >= -1; });
if (!slice_size_has_error) {
return false;
}
return !slice_inputs->begin.node()->IsConstant();
}
Status FindAndRewriteSlices(Graph* g, bool* changed) {
std::vector<Node*> slices_to_rewrite;
for (Node* n : g->nodes()) {
TF_ASSIGN_OR_RETURN(bool is_rewritable, ShouldRewriteSlice(n));
if (is_rewritable) {
slices_to_rewrite.push_back(n);
}
}
for (Node* n : slices_to_rewrite) {
TF_ASSIGN_OR_RETURN(std::optional<SliceInputs> slice_inputs,
GetSliceInputs(n));
TF_RET_CHECK(slice_inputs.has_value());
TF_RETURN_IF_ERROR(
RewriteSlice(g, n, *slice_inputs, *GetXlaClusterForNode(*n)));
}
if (!slices_to_rewrite.empty()) {
FixupSourceAndSinkEdges(g);
}
*changed = !slices_to_rewrite.empty();
return absl::OkStatus();
}
}
Status IncreaseDynamismForAutoJitPass::Run(
const GraphOptimizationPassOptions& options) {
MarkForCompilationPassFlags* flags = GetMarkForCompilationPassFlags();
if (flags->tf_xla_clustering_debug) {
DumpGraphToFile("before_increase_dynamism_for_auto_jit_pass",
**options.graph, options.flib_def);
}
bool changed;
TF_RETURN_IF_ERROR(FindAndRewriteSlices(options.graph->get(), &changed));
if (changed && flags->tf_xla_clustering_debug) {
DumpGraphToFile("increase_dynamism_for_auto_jit_pass", **options.graph,
options.flib_def);
}
return absl::OkStatus();
}
} | #include "tensorflow/compiler/jit/increase_dynamism_for_auto_jit_pass.h"
#include <gmock/gmock.h>
#include "absl/status/status.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/compiler/jit/node_matchers.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session_options.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace {
using ::testing::_;
using testing::matchers::AssignedDevice;
using testing::matchers::Attr;
using testing::matchers::Const;
using testing::matchers::CtrlDeps;
using testing::matchers::Inputs;
using testing::matchers::Name;
using testing::matchers::NodeWith;
using testing::matchers::Op;
using testing::matchers::Out;
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& device_attributes)
: Device(nullptr, device_attributes) {}
Status Sync() override { return errors::Unimplemented("FakeDevice::Sync()"); }
Allocator* GetAllocator(AllocatorAttributes attr) override { return nullptr; }
static std::unique_ptr<Device> Make(const string& name, const string& type) {
DeviceAttributes device_attributes;
device_attributes.set_name(name);
device_attributes.set_device_type(DeviceType(type).type());
return std::make_unique<FakeDevice>(device_attributes);
}
};
const char* kHostName = "/job:worker/replica:0/task:0/device:CPU:0";
const char* kDeviceName = "/job:worker/replica:0/task:0/device:GPU:0";
Status IncreaseDynamismForAutoJit(const Scope& s,
std::unique_ptr<Graph>* result) {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(FakeDevice::Make(kDeviceName, DEVICE_GPU));
devices.push_back(FakeDevice::Make(kHostName, DEVICE_CPU));
std::unique_ptr<DeviceSet> device_set(new DeviceSet());
for (auto& device : devices) {
device_set->AddDevice(device.get());
}
auto graph = std::make_unique<Graph>(OpRegistry::Global());
SessionOptions session_options;
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_global_jit_level(OptimizerOptions::ON_2);
GraphOptimizationPassOptions options;
options.graph = &graph;
options.device_set = device_set.get();
options.session_options = &session_options;
std::unordered_map<string, string> assigned_device_names;
for (Node* n : s.graph()->nodes()) {
assigned_device_names[n->name()] = n->assigned_device_name();
}
TF_RETURN_IF_ERROR(s.ToGraph(graph.get()));
for (Node* n : graph->nodes()) {
n->set_assigned_device_name(assigned_device_names[n->name()]);
}
IncreaseDynamismForAutoJitPass rewriter;
TF_RETURN_IF_ERROR(rewriter.Run(options));
*result = std::move(graph);
return absl::OkStatus();
}
TEST(SliceToDynamicSliceRewriteTest, Basic) {
Scope root = Scope::NewRootScope()
.ExitOnError()
.WithAssignedDevice(kDeviceName)
.WithXlaCluster("cluster_0");
Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT);
Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT32);
Output size = ops::Const(root.WithOpName("size"), {-1, 500});
Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size);
std::unique_ptr<Graph> result;
TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result));
const int64_t zero_64 = 0;
const int32_t zero_32 = 0;
const int64_t one_64 = 1;
auto m_input = Out(NodeWith(Op("Placeholder"), Name("input")));
auto m_begin_s64 = Out(NodeWith(
Op("Cast"), Inputs(Out(NodeWith(Op("Placeholder"), Name("begin"))))));
auto m_input_shape = Out(NodeWith(Op("Shape"), Inputs(m_input)));
auto m_slice_size_0 = Out(NodeWith(
Op("Sub"), AssignedDevice(kHostName),
Inputs(
Out(NodeWith(Op("Slice"), AssignedDevice(kHostName),
Inputs(m_input_shape, Const(zero_64), Const(one_64)))),
Out(NodeWith(Op("Slice"), AssignedDevice(kHostName),
Inputs(m_begin_s64, Const(zero_64), Const(one_64)))))));
auto m_dynamic_slice_size =
Out(NodeWith(Op("ConcatV2"), AssignedDevice(kHostName),
Inputs(m_slice_size_0, Const(static_cast<int64_t>(500)),
Const(zero_32))));
std::vector<string> compile_time_constant_inputs;
compile_time_constant_inputs.push_back("size");
auto m_dynamic_slice = NodeWith(
Op("Slice"), AssignedDevice(kDeviceName),
Attr(kXlaCompileTimeConstantInputsAttr, compile_time_constant_inputs),
Inputs(m_input, m_begin_s64, m_dynamic_slice_size));
Node* static_shaped_slice = testing::FindNodeByName(
result.get(), "slice/static_shaped_slice/static_shaped_slice");
ASSERT_NE(static_shaped_slice, nullptr);
EXPECT_THAT(static_shaped_slice, m_dynamic_slice);
}
TEST(SliceToDynamicSliceRewriteTest, SliceFromVector) {
Scope root = Scope::NewRootScope()
.ExitOnError()
.WithAssignedDevice(kDeviceName)
.WithXlaCluster("cluster_0");
Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT);
Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT32);
Output size = ops::Const(root.WithOpName("size"), {-1});
Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size);
std::unique_ptr<Graph> result;
TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result));
Node* static_shaped_slice = testing::FindNodeByName(
result.get(), "slice/static_shaped_slice/static_shaped_slice");
EXPECT_NE(static_shaped_slice, nullptr);
EXPECT_THAT(result->nodes(), Not(Contains(NodeWith(Op("ConcatV2")))));
}
TEST(SliceToDynamicSliceRewriteTest, ControlDependencePreserved) {
Scope root = Scope::NewRootScope()
.ExitOnError()
.WithAssignedDevice(kDeviceName)
.WithXlaCluster("cluster_0");
Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT);
Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT32);
Output size = ops::Const(root.WithOpName("size"), {-1, 500});
Output control_pred = ops::Placeholder(root.WithOpName("control"), DT_BOOL);
Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size);
root.graph()->AddControlEdge(control_pred.node(), slice.node());
std::unique_ptr<Graph> result;
TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result));
Node* static_shaped_slice = testing::FindNodeByName(
result.get(), "slice/static_shaped_slice/static_shaped_slice");
ASSERT_NE(static_shaped_slice, nullptr);
EXPECT_THAT(static_shaped_slice,
NodeWith(Op("Slice"),
CtrlDeps(NodeWith(Op("Placeholder"), Name("control")))));
}
int64_t ToInt64(int v) { return static_cast<int64_t>(v); }
TEST(SliceToDynamicSliceRewriteTest, Int64Indices) {
Scope root = Scope::NewRootScope()
.ExitOnError()
.WithAssignedDevice(kDeviceName)
.WithXlaCluster("cluster_0");
Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT);
Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT64);
Output size =
ops::Const(root.WithOpName("size"), {ToInt64(-1), ToInt64(500)});
Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size);
std::unique_ptr<Graph> result;
TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result));
EXPECT_THAT(result->nodes(), Not(Contains(NodeWith(Op("Cast")))));
}
TEST(SliceToDynamicSliceRewriteTest, DontRewriteInvalidSlice) {
Scope root = Scope::NewRootScope()
.ExitOnError()
.WithAssignedDevice(kDeviceName)
.WithXlaCluster("cluster_0");
Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT);
Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT32);
Output size_placeholder =
ops::Placeholder(root.WithOpName("size_placeholder"), DT_INT32);
Output slice =
ops::Slice(root.WithOpName("slice"), input, begin, size_placeholder);
Output size = ops::Const(root.WithOpName("size"), {-8, 500});
TF_ASSERT_OK(root.graph()->UpdateEdge(size.node(),
0,
slice.node(), 2));
std::unique_ptr<Graph> result;
TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result));
EXPECT_THAT(result->nodes(),
Not(Contains(NodeWith(Op("Slice"),
Attr(kXlaCompileTimeConstantInputsAttr)))));
}
TEST(SliceToDynamicSliceRewriteTest, DontRewriteUnclusteredSlice) {
Scope root =
Scope::NewRootScope().ExitOnError().WithAssignedDevice(kDeviceName);
Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT);
Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT32);
Output size = ops::Const(root.WithOpName("size"), {-1, 500});
Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size);
std::unique_ptr<Graph> result;
TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result));
EXPECT_THAT(result->nodes(),
Not(Contains(NodeWith(Op("Slice"),
Attr(kXlaCompileTimeConstantInputsAttr)))));
}
TEST(SliceToDynamicSliceRewriteTest, DontRewriteSliceWithNonConstSize) {
Scope root = Scope::NewRootScope()
.ExitOnError()
.WithAssignedDevice(kDeviceName)
.WithXlaCluster("cluster_0");
Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT);
Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT64);
Output size = ops::Placeholder(root.WithOpName("size"), DT_INT64);
Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size);
std::unique_ptr<Graph> result;
TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result));
EXPECT_THAT(result->nodes(),
Not(Contains(NodeWith(Op("Slice"),
Attr(kXlaCompileTimeConstantInputsAttr)))));
}
TEST(SliceToDynamicSliceRewriteTest, ScalarSlice) {
Scope root = Scope::NewRootScope()
.ExitOnError()
.WithAssignedDevice(kDeviceName)
.WithXlaCluster("cluster_0");
Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT);
Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT64);
Output size = ops::Const<int64_t>(root.WithOpName("size"), {});
Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size);
std::unique_ptr<Graph> result;
TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result));
Node* static_shaped_slice = testing::FindNodeByName(
result.get(), "slice/static_shaped_slice/static_shaped_slice");
ASSERT_NE(static_shaped_slice, nullptr);
EXPECT_THAT(static_shaped_slice,
NodeWith(Op("Slice"), Attr(kXlaCompileTimeConstantInputsAttr),
Inputs(_, _, Out(NodeWith(Name(size.node()->name()))))));
}
TEST(SliceToDynamicSliceRewriteTest, IndicesNotVector) {
Scope root = Scope::NewRootScope()
.ExitOnError()
.WithAssignedDevice(kDeviceName)
.WithXlaCluster("cluster_0");
auto ToInt64 = [](int v) { return static_cast<int64_t>(v); };
Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT);
Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT64);
Output size_placeholder = ops::Placeholder(root.WithOpName("size"), DT_INT64);
Output slice =
ops::Slice(root.WithOpName("slice"), input, begin, size_placeholder);
Output size =
ops::Const(root.WithOpName("size"), {{ToInt64(-1)}, {ToInt64(500)}});
TF_ASSERT_OK(root.graph()->UpdateEdge(size.node(), 0, slice.node(), 2));
std::unique_ptr<Graph> result;
TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result));
EXPECT_THAT(result->nodes(),
Not(Contains(NodeWith(Op("Slice"),
Attr(kXlaCompileTimeConstantInputsAttr)))));
}
TEST(SliceToDynamicSliceRewriteTest, SliceWithSliceInput) {
Scope root = Scope::NewRootScope()
.ExitOnError()
.WithAssignedDevice(kDeviceName)
.WithXlaCluster("cluster_0");
Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT);
Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT32);
Output size_a = ops::Const(root.WithOpName("size_a"), {-1, 500});
Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size_a);
Output size_b = ops::Const(root.WithOpName("size_a"), {-1, 200});
Output slice_with_slice_input = ops::Slice(
root.WithOpName("slice_with_slice_input"), slice, begin, size_b);
std::unique_ptr<Graph> result;
TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result));
Node* static_shaped_slice = testing::FindNodeByName(
result.get(),
"slice_with_slice_input/static_shaped_slice/static_shaped_slice");
ASSERT_NE(static_shaped_slice, nullptr);
EXPECT_EQ(static_shaped_slice->output_type(0), DT_FLOAT)
<< "Expected DT_FLOAT, was "
<< DataType_Name(static_shaped_slice->output_type(0));
EXPECT_THAT(
static_shaped_slice,
NodeWith(
Op("Slice"),
Inputs(Out(NodeWith(
Op("Slice"),
Name("slice/static_shaped_slice/static_shaped_slice"))),
_, _)));
}
TEST(SliceToDynamicSliceRewriteTest, SliceWithSliceBegin) {
Scope root = Scope::NewRootScope()
.ExitOnError()
.WithAssignedDevice(kDeviceName)
.WithXlaCluster("cluster_0");
Output input_float =
ops::Placeholder(root.WithOpName("input_float"), DT_FLOAT);
Output input_i64 = ops::Placeholder(root.WithOpName("input_i64"), DT_INT64);
Output begin_begin =
ops::Placeholder(root.WithOpName("begin_begin"), DT_INT32);
Output begin_size = ops::Const(root.WithOpName("begin_size"), {-1});
Output begin =
ops::Slice(root.WithOpName("begin"), input_i64, begin_begin, begin_size);
Output size =
ops::Const(root.WithOpName("size"), {ToInt64(-1), ToInt64(200)});
Output slice_with_slice_begin = ops::Slice(
root.WithOpName("slice_with_slice_begin"), input_float, begin, size);
std::unique_ptr<Graph> result;
TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result));
Node* static_shaped_slice = testing::FindNodeByName(
result.get(),
"slice_with_slice_begin/static_shaped_slice/static_shaped_slice");
ASSERT_NE(static_shaped_slice, nullptr);
EXPECT_EQ(static_shaped_slice->output_type(0), DT_FLOAT)
<< "Expected DT_FLOAT, was "
<< DataType_Name(static_shaped_slice->output_type(0));
EXPECT_THAT(
static_shaped_slice,
NodeWith(
Op("Slice"),
Inputs(_,
Out(NodeWith(
Op("Slice"),
Name("begin/static_shaped_slice/static_shaped_slice"))),
_)));
}
TEST(SliceToDynamicSliceRewriteTest, WithControlDepsToConstant) {
Scope root = Scope::NewRootScope()
.ExitOnError()
.WithAssignedDevice(kDeviceName)
.WithXlaCluster("cluster_0");
Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT);
Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT32);
Output size = ops::Const(root.WithOpName("size"), {-1});
Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size);
Output dependency = ops::Placeholder(root.WithOpName("dependency"), DT_BOOL);
root.graph()->AddControlEdge(dependency.node(), size.node());
std::unique_ptr<Graph> result;
TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result));
Node* const_0 = testing::FindNodeByName(result.get(),
"slice/static_shaped_slice/const_0");
EXPECT_NE(const_0, nullptr);
EXPECT_THAT(const_0,
NodeWith(Op("Const"), CtrlDeps(NodeWith(Op("Placeholder"),
Name("dependency")))));
}
TEST(SliceToDynamicSliceRewriteTest, DontRewriteSliceWithConstBegin) {
Scope root = Scope::NewRootScope()
.ExitOnError()
.WithAssignedDevice(kDeviceName)
.WithXlaCluster("cluster_0");
Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT);
Output begin = ops::Const(root.WithOpName("begin"), {10, 10});
Output size = ops::Const(root.WithOpName("size"), {-1, 500});
Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size);
std::unique_ptr<Graph> result;
TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result));
Node* slice_node = testing::FindNodeByName(result.get(), "slice");
EXPECT_THAT(slice_node,
NodeWith(Op("Slice"), Inputs(Out(NodeWith(Op("Placeholder"))),
Out(NodeWith(Op("Const"))),
Out(NodeWith(Op("Const"))))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/increase_dynamism_for_auto_jit_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/increase_dynamism_for_auto_jit_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
03c6a1b3-c3f5-412b-b106-c1bd4b1e3f06 | cpp | tensorflow/tensorflow | xla_kernel_creator | tensorflow/compiler/jit/xla_kernel_creator.cc | tensorflow/compiler/jit/xla_kernel_creator_test.cc | #include "tensorflow/compiler/jit/xla_kernel_creator.h"
#include <memory>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "tensorflow/compiler/jit/compilability_check_util.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/jit/kernels/xla_ops.h"
#include "tensorflow/compiler/tf2xla/const_analysis.h"
#include "tensorflow/compiler/tf2xla/mlir_bridge_pass.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/function_body.h"
#include "tensorflow/core/common_runtime/function_utils.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/node_properties.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
bool XlaKernelCreator::CanCreateKernel(
const FunctionLibraryRuntime& flr,
const std::shared_ptr<const NodeProperties>& props) const {
return CanCreateXlaKernel(props->node_def) &&
!XlaOpRegistry::IsCompilationDevice(flr.device()->device_type());
}
static Status CreateXlaKernel(FunctionLibraryRuntime* flr,
const NodeDef& node_def,
std::unique_ptr<OpKernel>* kernel) {
if (!CanCreateXlaKernel(node_def)) {
return errors::Internal("Invalid node: ", node_def.ShortDebugString());
}
VLOG(3) << "Attempting to create XlaLaunchOp for " << node_def.DebugString();
XlaOpRegistry::RegisterCompilationKernels();
NameAttrList function;
TF_RETURN_IF_ERROR(NameAndAttrsFromFunctionCall(node_def, &function));
const FunctionBody* fbody = nullptr;
std::vector<int> constant_arg_indices;
std::vector<int> resource_arg_indices;
TF_RETURN_IF_ERROR(GetBodyAndConstantsAndResources(
flr, function, &fbody, &constant_arg_indices, &resource_arg_indices));
MemoryTypeVector input_memory_types =
GetInputMemoryTypes(fbody, constant_arg_indices, resource_arg_indices);
MemoryTypeVector output_memory_types = GetOutputMemoryTypes(fbody);
Device* dev = flr->device();
Status s;
auto props = std::make_shared<NodeProperties>(
&fbody->record->fdef().signature(), node_def, fbody->arg_types,
fbody->ret_types);
OpKernelConstruction construction(DeviceType(dev->device_type()), dev,
dev->GetAllocator(AllocatorAttributes()),
flr, dev->resource_manager(), props,
input_memory_types, output_memory_types,
flr->graph_def_version(), &s);
*kernel = std::make_unique<XlaLocalLaunchBase>(
&construction, constant_arg_indices, resource_arg_indices, function,
false);
return s;
}
Status XlaKernelCreator::CreateKernel(
FunctionLibraryRuntime* flr,
const std::shared_ptr<const NodeProperties>& props,
std::unique_ptr<OpKernel>* kernel) const {
return CreateXlaKernel(flr, props->node_def, kernel);
}
bool RegisterLaunchOpCreator() {
XlaKernelCreator* xla_kernel_creator = new XlaKernelCreator();
RegisterDefaultCustomKernelCreator(xla_kernel_creator);
return true;
}
static bool register_me = RegisterLaunchOpCreator();
} | #include "tensorflow/compiler/jit/xla_kernel_creator.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
std::shared_ptr<NodeProperties> ToNodeProperties(const string& text) {
NodeDef node_def;
DataTypeVector dummy;
EXPECT_TRUE(protobuf::TextFormat::MergeFromString(text, &node_def));
return std::make_shared<NodeProperties>(nullptr, std::move(node_def), dummy,
dummy);
}
FunctionDef XTimesY() {
return FunctionDefHelper::Define(
"XTimesY",
{"x: float", "y: resource"},
{"z: float"},
{},
{
{{"y0"}, "ReadVariableOp", {"y"}, {{"dtype", DT_FLOAT}}},
{{"z"}, "Mul", {"x", "y0"}, {{"T", DT_FLOAT}}},
});
}
class XlaKernelCreatorTest : public ::testing::Test {
protected:
void Init(const std::vector<FunctionDef>& flib) {
SessionOptions options;
auto* device_count = options.config.mutable_device_count();
device_count->insert({"CPU", 1});
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::AddDevices(
options, "/job:localhost/replica:0/task:0", &devices));
FunctionDefLibrary proto;
for (const auto& fdef : flib) {
*(proto.add_function()) = fdef;
}
lib_def_ = std::make_unique<FunctionLibraryDefinition>(
OpRegistry::Global(), proto);
OptimizerOptions opts;
device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices));
pflr_ = std::make_unique<ProcessFunctionLibraryRuntime>(
device_mgr_.get(), Env::Default(), nullptr,
TF_GRAPH_DEF_VERSION, lib_def_.get(), opts,
nullptr, nullptr);
flr_ = pflr_->GetFLR("/job:localhost/replica:0/task:0/cpu:0");
}
FunctionLibraryRuntime* flr_;
std::unique_ptr<DeviceMgr> device_mgr_;
std::unique_ptr<FunctionLibraryDefinition> lib_def_;
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr_;
std::unique_ptr<OpKernel> kernel_;
};
AttrValue BoolAttr(bool b) {
AttrValue v;
v.set_b(b);
return v;
}
TEST_F(XlaKernelCreatorTest, OneFloatOneResourceArgument) {
FunctionDef fdef = XTimesY();
(*fdef.mutable_attr())["_XlaMustCompile"] = BoolAttr(true);
Init({fdef});
XlaKernelCreator xla_kernel_creator;
auto callsite =
ToNodeProperties(R"pb(
name: 'XTimesY' op: 'XTimesY' input: 'a' input: 'b'
)pb");
(*(callsite->node_def.mutable_attr()))["_XlaMustCompile"] = BoolAttr(true);
Status status = xla_kernel_creator.CreateKernel(flr_, callsite, &kernel_);
ASSERT_TRUE(status.ok()) << status.ToString();
EXPECT_EQ("XTimesY", kernel_->name());
EXPECT_EQ("XTimesY", kernel_->type_string());
EXPECT_EQ(2, kernel_->num_inputs());
EXPECT_EQ(DT_FLOAT, kernel_->input_type(0));
EXPECT_EQ(DT_RESOURCE, kernel_->input_type(1));
EXPECT_EQ(DEVICE_MEMORY, kernel_->input_memory_types()[0]);
EXPECT_EQ(HOST_MEMORY, kernel_->input_memory_types()[1]);
EXPECT_EQ(1, kernel_->num_outputs());
EXPECT_EQ(DT_FLOAT, kernel_->output_type(0));
EXPECT_EQ(DEVICE_MEMORY, kernel_->output_memory_types()[0]);
}
TEST_F(XlaKernelCreatorTest, FailsIfXlaCompileAttrNotSet) {
FunctionDef fdef = XTimesY();
Init({fdef});
XlaKernelCreator xla_kernel_creator;
Status status =
xla_kernel_creator.CreateKernel(flr_, ToNodeProperties(R"proto(
name: 'XTimesY'
op: 'XTimesY'
input: 'a'
input: 'b'
)proto"),
&kernel_);
EXPECT_TRUE(absl::IsInternal(status)) << status;
}
TEST_F(XlaKernelCreatorTest, FailsIfXlaCompileAttrIsSetToFalse) {
FunctionDef fdef = XTimesY();
(*fdef.mutable_attr())["_XlaMustCompile"] = BoolAttr(false);
Init({fdef});
XlaKernelCreator xla_kernel_creator;
Status status =
xla_kernel_creator.CreateKernel(flr_, ToNodeProperties(R"proto(
name: 'XTimesY'
op: 'XTimesY'
input: 'a'
input: 'b'
)proto"),
&kernel_);
EXPECT_TRUE(absl::IsInternal(status)) << status;
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/xla_kernel_creator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/xla_kernel_creator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
779e5c14-9a64-4b41-8d98-5656de7f3bb9 | cpp | tensorflow/tensorflow | extract_outside_compilation_pass | tensorflow/compiler/jit/extract_outside_compilation_pass.cc | tensorflow/compiler/jit/extract_outside_compilation_pass_test.cc | #include "tensorflow/compiler/jit/extract_outside_compilation_pass.h"
#include "absl/container/flat_hash_map.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/jit/encapsulate_subgraphs_pass.h"
#include "tensorflow/compiler/jit/encapsulate_util.h"
#include "tensorflow/compiler/tf2xla/side_effect_util.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "xla/status_macros.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace {
std::optional<string> HostGraphControlRetMapping(const Node* n) {
if (HasNodeAttr(n->def(), kXlaHasHostTransferAttrName)) {
return n->name();
}
return std::nullopt;
}
absl::StatusOr<Node*> AddHostComputeKeyPlaceholder(
const string& xla_cluster_name, Graph* g) {
NodeDef key_def;
NodeDefBuilder builder(absl::StrCat(xla_cluster_name, "_key_placeholder"),
"Placeholder");
builder.Attr("dtype", DT_STRING);
builder.Attr("shape", PartialTensorShape({2}));
builder.Attr("_host_compute_call_node", xla_cluster_name);
Status s = builder.Finalize(&key_def);
if (!s.ok()) return s;
Node* n = g->AddNode(key_def, &s);
if (!s.ok()) return s;
return n;
}
bool IsKeyPlaceholderNode(const Node& n) {
return n.type_string() == "Placeholder" &&
absl::EndsWith(n.name(), "_key_placeholder");
}
std::vector<Node*> GatherNodesWithType(const Graph& g, const string& type) {
std::vector<Node*> result;
for (Node* n : g.nodes()) {
if (n->type_string() == type) {
result.push_back(n);
}
}
return result;
}
Status GetArgDataTypes(const std::vector<Node*>& arg_nodes,
std::vector<DataType>* recv_at_host_dtypes) {
recv_at_host_dtypes->resize(arg_nodes.size(), DT_INVALID);
for (auto* n : arg_nodes) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index));
DataType dtype;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "T", &dtype));
(*recv_at_host_dtypes)[index] = dtype;
}
for (int i = 0, end = recv_at_host_dtypes->size(); i < end; i++) {
if ((*recv_at_host_dtypes)[i] == DT_INVALID) {
return errors::Internal("Cannot get datatype for input ", i);
}
}
return absl::OkStatus();
}
absl::StatusOr<Node*> BuildRecvAtHostNode(
Graph* g, const string& oc_cluster_name,
const std::vector<DataType>& recv_at_host_dtypes, Node* key_placeholder) {
NodeDefBuilder recv_at_host_builder(
absl::StrCat("outside_compilation_", oc_cluster_name, "_recv"),
"_XlaRecvAtHost");
NodeDef recv_at_host_def;
recv_at_host_builder.Attr("Toutputs", recv_at_host_dtypes);
AttrValue device_ordinal_value;
device_ordinal_value.set_placeholder("_device_ordinal");
recv_at_host_builder.Attr("device_ordinal", device_ordinal_value);
recv_at_host_builder.Attr(
"key", absl::StrCat("host_compute_channel_", oc_cluster_name));
recv_at_host_builder.Attr(kXlaHasHostTransferAttrName, true);
recv_at_host_builder.Input(key_placeholder->name(), 0, DT_STRING);
TF_RETURN_IF_ERROR(recv_at_host_builder.Finalize(&recv_at_host_def));
TF_ASSIGN_OR_RETURN(Node * recv_at_host_node, g->AddNode(recv_at_host_def));
return recv_at_host_node;
}
absl::StatusOr<Node*> ReplaceArgNodesWithRecvAtHostNode(
Graph* g, const string& oc_cluster_name,
std::vector<DataType>* recv_at_host_dtypes, Node* key_placeholder) {
std::vector<Node*> arg_nodes = GatherNodesWithType(*g, "_Arg");
TF_RETURN_IF_ERROR(GetArgDataTypes(arg_nodes, recv_at_host_dtypes));
TF_ASSIGN_OR_RETURN(
Node * recv_at_host_node,
BuildRecvAtHostNode(g, oc_cluster_name, *recv_at_host_dtypes,
key_placeholder));
for (auto* n : arg_nodes) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index));
std::vector<OutEdgeInfo> out_edge_info;
out_edge_info.reserve(n->out_edges().size());
for (auto edge : n->out_edges()) {
out_edge_info.push_back(
{edge->dst(), edge->src_output(), edge->dst_input()});
}
g->RemoveNode(n);
for (const OutEdgeInfo& edge : out_edge_info) {
if (edge.dst_input == Graph::kControlSlot) {
g->AddControlEdge(recv_at_host_node, edge.dst);
} else {
g->AddEdge(recv_at_host_node, index, edge.dst, edge.dst_input);
}
}
for (int i = 0, end = out_edge_info.size(); i < end; i++) {
const OutEdgeInfo edge = out_edge_info[i];
if (edge.dst_input == Graph::kControlSlot) {
continue;
}
Node* dst = edge.dst;
NodeDef new_def = dst->def();
*new_def.mutable_input(edge.dst_input) =
absl::StrCat(recv_at_host_node->name(), ":", index);
TF_ASSIGN_OR_RETURN(Node * dst_replace, ReplaceNode(g, dst, new_def));
for (int j = i + 1, end = out_edge_info.size(); j < end; j++) {
if (out_edge_info[j].dst == dst) {
out_edge_info[j].dst = dst_replace;
}
}
}
}
g->AddEdge(key_placeholder, 0, recv_at_host_node, 0);
return recv_at_host_node;
}
Status GetRetDataTypes(const std::vector<Node*>& ret_nodes,
std::vector<DataType>* send_from_host_dtypes) {
send_from_host_dtypes->resize(ret_nodes.size(), DT_INVALID);
for (auto* n : ret_nodes) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index));
DataType dtype;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "T", &dtype));
(*send_from_host_dtypes)[index] = dtype;
}
for (int i = 0, end = send_from_host_dtypes->size(); i < end; i++) {
if ((*send_from_host_dtypes)[i] == DT_INVALID) {
return errors::Internal("Cannot get datatype for output ", i);
}
}
return absl::OkStatus();
}
absl::StatusOr<Node*> BuildSendFromHostNode(
Graph* g, const string& oc_cluster_name,
const std::vector<Node*>& ret_nodes,
const std::vector<DataType>& send_from_host_dtypes, Node* key_placeholder) {
NodeDefBuilder send_from_host_builder(
absl::StrCat("outside_compilation_", oc_cluster_name, "_send"),
"_XlaSendFromHost");
NodeDef send_from_host_def;
send_from_host_builder.Attr("Tinputs", send_from_host_dtypes);
AttrValue device_ordinal_value;
device_ordinal_value.set_placeholder("_device_ordinal");
send_from_host_builder.Attr("device_ordinal", device_ordinal_value);
send_from_host_builder.Attr(
"key", absl::StrCat("host_compute_channel_", oc_cluster_name));
send_from_host_builder.Attr(kXlaHasHostTransferAttrName, true);
std::vector<NodeDefBuilder::NodeOut> inputs(send_from_host_dtypes.size());
for (auto* n : ret_nodes) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index));
const int num_dtypes = send_from_host_dtypes.size();
if (index < 0 || index >= num_dtypes) {
return errors::Internal("Invalid _Retval index: ", index);
}
for (auto edge : n->in_edges()) {
inputs[index] =
NodeDefBuilder::NodeOut{edge->src()->name(), edge->src_output(),
edge->src()->output_type(edge->src_output())};
}
}
send_from_host_builder.Input(inputs);
send_from_host_builder.Input(key_placeholder->name(), 0, DT_STRING);
TF_RETURN_IF_ERROR(send_from_host_builder.Finalize(&send_from_host_def));
TF_ASSIGN_OR_RETURN(Node * send_from_host_node,
g->AddNode(send_from_host_def));
return send_from_host_node;
}
absl::StatusOr<Node*> ReplaceRetNodesWithSendFromHostNode(
Graph* g, const string& oc_cluster_name,
std::vector<DataType>* send_from_host_dtypes, Node* key_placeholder) {
std::vector<Node*> ret_nodes = GatherNodesWithType(*g, "_Retval");
TF_RETURN_IF_ERROR(GetRetDataTypes(ret_nodes, send_from_host_dtypes));
TF_ASSIGN_OR_RETURN(
Node * send_from_host_node,
BuildSendFromHostNode(g, oc_cluster_name, ret_nodes,
*send_from_host_dtypes, key_placeholder));
for (auto* n : ret_nodes) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index));
for (auto edge : n->in_edges()) {
if (edge->src_output() == Graph::kControlSlot) {
g->AddControlEdge(edge->src(), send_from_host_node);
} else {
g->AddEdge(edge->src(), edge->src_output(), send_from_host_node, index);
}
}
g->RemoveNode(n);
}
g->AddEdge(key_placeholder, 0, send_from_host_node,
send_from_host_dtypes->size());
return send_from_host_node;
}
std::optional<std::vector<PartialTensorShape>> GetInferredInputShapes(
int num_inputs, Node* send_from_host_node) {
std::vector<PartialTensorShape> results(num_inputs);
for (int i = 0; i < num_inputs; i++) {
const Edge* e;
if (!send_from_host_node->input_edge(i, &e).ok()) {
return std::nullopt;
}
std::vector<PartialTensorShape> shapes;
if (!GetNodeAttr(e->src()->attrs(), kXlaInferredShapesAttrName, &shapes)
.ok()) {
return std::nullopt;
}
const PartialTensorShape shape = shapes[e->src_output()];
if (!shape.IsFullyDefined()) {
return std::nullopt;
}
results[e->dst_input()] = shape;
}
return results;
}
string host_compute_node_name(const string& original_oc_name) {
return absl::StrCat("outside_compilation_", original_oc_name,
"_host_compute");
}
absl::StatusOr<NodeDef> BuildXlaHostComputeNodeDef(
const Node* call_node, const std::map<string, int>& host_compute_core,
const absl::flat_hash_map<string, std::vector<string>>& cluster_deps) {
string original_oc_name;
TF_RETURN_IF_ERROR(GetNodeAttr(
call_node->attrs(), "_outside_compilation_subgraph", &original_oc_name));
NodeDefBuilder host_compute_builder(host_compute_node_name(original_oc_name),
"XlaHostCompute");
host_compute_builder.Attr(kXlaOriginalOutsideCompilationNodeName,
host_compute_builder.node_name());
for (const auto& attr : call_node->attrs()) {
host_compute_builder.Attr(attr.first, attr.second);
}
const auto iter = host_compute_core.find(original_oc_name);
if (iter != host_compute_core.end()) {
int core = iter->second;
host_compute_builder.Attr("tpu_core", core);
}
std::vector<string> xla_token_input_nodes;
xla_token_input_nodes.emplace_back(kXlaTokenArgNodeName);
auto cluster_deps_it = cluster_deps.find(original_oc_name);
if (cluster_deps_it != cluster_deps.end()) {
for (const auto& dep : cluster_deps_it->second) {
xla_token_input_nodes.emplace_back(host_compute_node_name(dep));
}
}
host_compute_builder.Attr(kXlaTokenInputNodesAttrName, xla_token_input_nodes);
std::vector<DataType> input_dtypes;
TF_RETURN_IF_ERROR(GetNodeAttr(call_node->attrs(), "Tinputs", &input_dtypes));
std::vector<NodeDefBuilder::NodeOut> inputs(input_dtypes.size());
for (auto e : call_node->in_edges()) {
if (e->IsControlEdge()) {
continue;
}
const int input_dtypes_size = input_dtypes.size();
if (e->dst_input() < 0 || e->dst_input() >= input_dtypes_size) {
return errors::Internal("Invalid dst_input: ", e->dst_input());
}
inputs[e->dst_input()] = NodeDefBuilder::NodeOut{
e->src()->name(), e->src_output(), input_dtypes[e->dst_input()]};
}
host_compute_builder.Input(inputs);
NodeDef new_def;
TF_RETURN_IF_ERROR(host_compute_builder.Finalize(&new_def));
return new_def;
}
TF_ATTRIBUTE_NOINLINE absl::StatusOr<Node*> ReplaceOutsideCompilationCallNode(
Graph* g, Node* call_node, const std::map<string, int>& host_compute_core,
const absl::flat_hash_map<string, std::vector<string>>& cluster_deps) {
TF_ASSIGN_OR_RETURN(
NodeDef node_def,
BuildXlaHostComputeNodeDef(call_node, host_compute_core, cluster_deps));
TF_ASSIGN_OR_RETURN(Node * host_compute_node,
ReplaceNode(g, call_node, node_def));
VLOG(4) << "Added HostCompute node: " << host_compute_node->DebugString();
return host_compute_node;
}
Status ResetDeviceOrdinalToPlaceholderValue(Graph* g) {
AttrValue device_ordinal_value;
device_ordinal_value.set_placeholder("_device_ordinal");
for (Node* n : g->nodes()) {
if (!HasNodeAttr(n->def(), kXlaHasHostTransferAttrName)) {
continue;
}
if (n->type_string() == "_XlaRecvAtHost" ||
n->type_string() == "_XlaSendFromHost") {
n->ClearAttr("device_ordinal");
n->AddAttr("device_ordinal", device_ordinal_value);
} else if (n->IsIfNode()) {
for (const string& attr_name :
std::vector<string>{"then_branch", "else_branch"}) {
NameAttrList branch_func;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), attr_name, &branch_func));
(*branch_func.mutable_attr())["_device_ordinal"] = device_ordinal_value;
n->ClearAttr(attr_name);
n->AddAttr(attr_name, branch_func);
}
} else if (n->IsWhileNode()) {
for (const string& attr_name : std::vector<string>{"cond", "body"}) {
NameAttrList branch_func;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), attr_name, &branch_func));
(*branch_func.mutable_attr())["_device_ordinal"] = device_ordinal_value;
n->ClearAttr(attr_name);
n->AddAttr(attr_name, branch_func);
}
} else if (HasNodeAttr(n->def(), "_device_ordinal")) {
n->ClearAttr("_device_ordinal");
n->AddAttr("_device_ordinal", device_ordinal_value);
} else {
return errors::Internal("Unknown node marked with ",
kXlaHasHostTransferAttrName, ": ",
n->DebugString());
}
}
return absl::OkStatus();
}
bool HasLiftedArgs(const FunctionDef& function_def) {
return absl::c_any_of(function_def.node_def(), [](const NodeDef& node_def) {
return (node_def.op() == "Placeholder" &&
node_def.attr().find(kXlaLiftedArgOutsideCompilationAttrName) !=
node_def.attr().end());
});
}
absl::StatusOr<std::vector<std::pair<Node*, Node*>>>
LiftedArgsAndOutsideCompilationNodesInFunctionBody(
const FunctionBody& function_body,
const std::unordered_map<string, Node*>& outside_compilation_attr_to_node) {
std::vector<std::pair<Node*, Node*>>
lifted_arg_nodes_and_outside_compilation_nodes;
for (Node* n : function_body.graph->op_nodes()) {
string oc_cluster;
if (n->type_string() == "Placeholder" &&
GetNodeAttr(n->def(), kXlaLiftedArgOutsideCompilationAttrName,
&oc_cluster)
.ok()) {
TF_RET_CHECK(outside_compilation_attr_to_node.find(oc_cluster) !=
outside_compilation_attr_to_node.end());
lifted_arg_nodes_and_outside_compilation_nodes.emplace_back(
n, outside_compilation_attr_to_node.at(oc_cluster));
}
}
return lifted_arg_nodes_and_outside_compilation_nodes;
}
absl::StatusOr<std::vector<DataType>> UpdateTypesAttribute(
const std::vector<std::pair<Node*, Node*>>&
lifted_arg_nodes_and_outside_compilation_nodes,
const string& type_attr_name, Node* n) {
std::vector<DataType> data_types;
data_types.reserve(lifted_arg_nodes_and_outside_compilation_nodes.size());
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), type_attr_name, &data_types));
for (auto pair : lifted_arg_nodes_and_outside_compilation_nodes) {
Node* outside_compilation_node = pair.second;
DataType data_type;
TF_RET_CHECK(outside_compilation_node->IsIdentity() ||
outside_compilation_node->type_string() == "Placeholder");
if (outside_compilation_node->IsIdentity()) {
TF_RETURN_IF_ERROR(
GetNodeAttr(outside_compilation_node->def(), "T", &data_type));
} else {
TF_RETURN_IF_ERROR(
GetNodeAttr(outside_compilation_node->def(), "dtype", &data_type));
}
data_types.push_back(data_type);
}
n->ClearAttr(type_attr_name);
n->AddAttr(type_attr_name, data_types);
return data_types;
}
void AddEdgesFromOutsideCompilationNodes(
const int original_arg_count, const int arg_to_input_edge_offset,
const std::vector<DataType>& data_types,
const std::vector<Node*>& outside_compilation_nodes, Graph* g, Node* n) {
for (int i = original_arg_count, end = data_types.size(); i < end; i++) {
Node* outside_compilation_node =
outside_compilation_nodes[i - original_arg_count];
g->AddEdge(outside_compilation_node, 0, n, i + arg_to_input_edge_offset);
}
}
absl::StatusOr<Node*> AddOutsideCompilationInputArgToFunctionBody(
const FunctionBody& function_body, const int arg_idx,
const DataType& data_type) {
NodeDefBuilder arg_builder(absl::StrCat("arg_", arg_idx), "_Arg");
arg_builder.Attr("T", data_type);
arg_builder.Attr("index", arg_idx);
NodeDef arg_def;
TF_RETURN_IF_ERROR(arg_builder.Finalize(&arg_def));
TF_ASSIGN_OR_RETURN(Node * arg_node, function_body.graph->AddNode(arg_def));
return arg_node;
}
Status AddMatchingRetvalNode(const FunctionBody& function_body,
const int arg_idx, const DataType& data_type,
Node* arg_node) {
NodeDefBuilder ret_builder(absl::StrCat("ret_", arg_idx), "_Retval");
ret_builder.Attr("T", data_type);
ret_builder.Attr("index", arg_idx);
ret_builder.Input(arg_node->name(), 0, data_type);
NodeDef ret_def;
TF_RETURN_IF_ERROR(ret_builder.Finalize(&ret_def));
TF_ASSIGN_OR_RETURN(Node * ret_node, function_body.graph->AddNode(ret_def));
function_body.graph->AddEdge(arg_node, 0, ret_node, 0);
return absl::OkStatus();
}
void ReplaceLiftedArgNodePlaceholderWithArg(
const FunctionBody& function_body, const int original_arg_count,
const int arg_idx, const std::vector<Node*>& lifted_arg_nodes,
Node* arg_node) {
Node* lifted_arg_node = lifted_arg_nodes[arg_idx - original_arg_count];
if (!lifted_arg_node) {
return;
}
for (const Edge* e : lifted_arg_node->out_edges()) {
if (e->IsControlEdge()) {
function_body.graph->AddControlEdge(arg_node, e->dst());
} else {
function_body.graph->AddEdge(arg_node, 0, e->dst(), e->dst_input());
}
}
function_body.graph->RemoveNode(lifted_arg_node);
}
Status AddFunctionWithNewName(const std::string& new_name,
const std::string& func_attr_name,
const FunctionDef& function_def,
NameAttrList* func_attr, Node* callsite_node,
FunctionLibraryDefinition* fld) {
TF_RETURN_IF_ERROR(fld->AddFunctionDef(function_def));
func_attr->set_name(new_name);
callsite_node->ClearAttr(func_attr_name);
callsite_node->AddAttr(func_attr_name, *func_attr);
return absl::OkStatus();
}
Status PostprocessLiftedArgsForWhile(
const std::unordered_map<string, Node*>& outside_compilation_attr_to_node,
Graph* g, Node* n, FunctionLibraryDefinition* fld) {
TF_RET_CHECK(n->IsWhileNode());
NameAttrList body_func;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "body", &body_func));
const FunctionDef* body_function_def = fld->Find(body_func.name());
TF_RET_CHECK(body_function_def);
if (!HasLiftedArgs(*body_function_def)) {
return absl::OkStatus();
}
std::unique_ptr<FunctionBody> body_function_body;
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(*body_function_def,
AttrSlice(&body_func.attr()), fld,
&body_function_body));
int original_arg_count = body_function_body->arg_nodes.size();
TF_ASSIGN_OR_RETURN(
auto lifted_arg_nodes_and_outside_compilation_nodes,
LiftedArgsAndOutsideCompilationNodesInFunctionBody(
*body_function_body, outside_compilation_attr_to_node));
TF_ASSIGN_OR_RETURN(
std::vector<DataType> data_types,
UpdateTypesAttribute(lifted_arg_nodes_and_outside_compilation_nodes, "T",
n));
std::vector<Node*> outside_compilation_nodes;
outside_compilation_nodes.reserve(
lifted_arg_nodes_and_outside_compilation_nodes.size());
std::transform(
lifted_arg_nodes_and_outside_compilation_nodes.begin(),
lifted_arg_nodes_and_outside_compilation_nodes.end(),
std::back_inserter(outside_compilation_nodes),
[](const std::pair<Node*, Node*>& pair) { return pair.second; });
AddEdgesFromOutsideCompilationNodes(original_arg_count,
0,
data_types, outside_compilation_nodes, g,
n);
std::vector<Node*> lifted_arg_nodes;
lifted_arg_nodes.reserve(
lifted_arg_nodes_and_outside_compilation_nodes.size());
std::transform(
lifted_arg_nodes_and_outside_compilation_nodes.begin(),
lifted_arg_nodes_and_outside_compilation_nodes.end(),
std::back_inserter(lifted_arg_nodes),
[](const std::pair<Node*, Node*>& pair) { return pair.first; });
for (int i = original_arg_count, end = data_types.size(); i < end; i++) {
TF_ASSIGN_OR_RETURN(Node * arg_node,
AddOutsideCompilationInputArgToFunctionBody(
*body_function_body, i, data_types[i]));
TF_RETURN_IF_ERROR(
AddMatchingRetvalNode(*body_function_body, i, data_types[i], arg_node));
ReplaceLiftedArgNodePlaceholderWithArg(
*body_function_body, original_arg_count, i, lifted_arg_nodes, arg_node);
}
const auto new_body_function_name =
fld->UniqueFunctionName(absl::StrCat(body_func.name(), "_lifted_arg_"));
FunctionDef rewritten_body_function_def;
TF_RETURN_IF_ERROR(GraphToFunctionDef(
*body_function_body->graph, new_body_function_name,
HostGraphControlRetMapping, &rewritten_body_function_def));
TF_RETURN_IF_ERROR(AddFunctionWithNewName(new_body_function_name, "body",
rewritten_body_function_def,
&body_func, n, fld));
NameAttrList cond_func;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "cond", &cond_func));
const FunctionDef* cond_function_def = fld->Find(cond_func.name());
TF_RET_CHECK(cond_function_def);
std::unique_ptr<FunctionBody> cond_function_body;
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(*cond_function_def,
AttrSlice(&cond_func.attr()), fld,
&cond_function_body));
for (int i = original_arg_count, end = data_types.size(); i < end; i++) {
absl::StatusOr<Node*> arg_node_or =
AddOutsideCompilationInputArgToFunctionBody(*cond_function_body, i,
data_types[i]);
TF_RETURN_IF_ERROR(arg_node_or.status());
}
const auto new_cond_function_name =
fld->UniqueFunctionName(absl::StrCat(cond_func.name(), "_lifted_arg_"));
FunctionDef rewritten_cond_function_def;
TF_RETURN_IF_ERROR(GraphToFunctionDef(
*cond_function_body->graph, new_cond_function_name,
HostGraphControlRetMapping, &rewritten_cond_function_def));
TF_RETURN_IF_ERROR(AddFunctionWithNewName(new_cond_function_name, "cond",
rewritten_cond_function_def,
&cond_func, n, fld));
return absl::OkStatus();
}
Status PostprocessLiftedArgsForIf(
const std::unordered_map<string, Node*>& outside_compilation_attr_to_node,
Graph* g, Node* n, FunctionLibraryDefinition* fld) {
TF_RET_CHECK(n->IsIfNode());
NameAttrList then_branch_func;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "then_branch", &then_branch_func));
const FunctionDef* then_branch_function_def =
fld->Find(then_branch_func.name());
TF_RET_CHECK(then_branch_function_def);
NameAttrList else_branch_func;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "else_branch", &else_branch_func));
const FunctionDef* else_branch_function_def =
fld->Find(else_branch_func.name());
TF_RET_CHECK(else_branch_function_def);
if (!HasLiftedArgs(*then_branch_function_def) &&
!HasLiftedArgs(*else_branch_function_def)) {
return absl::OkStatus();
}
std::unique_ptr<FunctionBody> then_branch_function_body;
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
*then_branch_function_def, AttrSlice(&then_branch_func.attr()), fld,
&then_branch_function_body));
std::unique_ptr<FunctionBody> else_branch_function_body;
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
*else_branch_function_def, AttrSlice(&else_branch_func.attr()), fld,
&else_branch_function_body));
int original_arg_count = then_branch_function_body->arg_nodes.size();
TF_ASSIGN_OR_RETURN(
auto then_branch_lifted_arg_nodes_and_outside_compilation_nodes,
LiftedArgsAndOutsideCompilationNodesInFunctionBody(
*then_branch_function_body, outside_compilation_attr_to_node));
TF_ASSIGN_OR_RETURN(
auto else_branch_lifted_arg_nodes_and_outside_compilation_nodes,
LiftedArgsAndOutsideCompilationNodesInFunctionBody(
*else_branch_function_body, outside_compilation_attr_to_node));
std::vector<Node*> outside_compilation_nodes;
std::vector<Node*> then_branch_lifted_arg_nodes;
outside_compilation_nodes.reserve(
then_branch_lifted_arg_nodes_and_outside_compilation_nodes.size());
then_branch_lifted_arg_nodes.reserve(
then_branch_lifted_arg_nodes_and_outside_compilation_nodes.size());
for (const auto& pair :
then_branch_lifted_arg_nodes_and_outside_compilation_nodes) {
outside_compilation_nodes.push_back(pair.second);
then_branch_lifted_arg_nodes.push_back(pair.first);
}
for (const auto& pair :
else_branch_lifted_arg_nodes_and_outside_compilation_nodes) {
if (std::find(outside_compilation_nodes.begin(),
outside_compilation_nodes.end(),
pair.second) == outside_compilation_nodes.end()) {
outside_compilation_nodes.push_back(pair.second);
then_branch_lifted_arg_nodes.push_back(nullptr);
}
}
std::vector<Node*> else_branch_lifted_arg_nodes(
outside_compilation_nodes.size());
for (const auto& pair :
else_branch_lifted_arg_nodes_and_outside_compilation_nodes) {
auto iter = std::find(outside_compilation_nodes.begin(),
outside_compilation_nodes.end(), pair.second);
TF_RET_CHECK(iter != outside_compilation_nodes.end());
int index = iter - outside_compilation_nodes.begin();
else_branch_lifted_arg_nodes[index] = pair.first;
}
std::vector<DataType> data_types;
data_types.reserve(outside_compilation_nodes.size());
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "Tin", &data_types));
for (Node* n : outside_compilation_nodes) {
data_types.push_back(n->output_type(0));
}
n->ClearAttr("Tin");
n->AddAttr("Tin", data_types);
AddEdgesFromOutsideCompilationNodes(original_arg_count,
1,
data_types, outside_compilation_nodes, g,
n);
for (int i = original_arg_count, end = data_types.size(); i < end; ++i) {
TF_ASSIGN_OR_RETURN(Node * then_branch_arg_node,
AddOutsideCompilationInputArgToFunctionBody(
*then_branch_function_body, i, data_types[i]));
ReplaceLiftedArgNodePlaceholderWithArg(
*then_branch_function_body, original_arg_count, i,
then_branch_lifted_arg_nodes, then_branch_arg_node);
TF_ASSIGN_OR_RETURN(Node * else_branch_arg_node,
AddOutsideCompilationInputArgToFunctionBody(
*else_branch_function_body, i, data_types[i]));
ReplaceLiftedArgNodePlaceholderWithArg(
*else_branch_function_body, original_arg_count, i,
else_branch_lifted_arg_nodes, else_branch_arg_node);
}
const auto new_then_function_name = fld->UniqueFunctionName(
absl::StrCat(then_branch_func.name(), "_lifted_arg_"));
FunctionDef rewritten_then_branch_function_def;
TF_RETURN_IF_ERROR(GraphToFunctionDef(
*then_branch_function_body->graph, new_then_function_name,
HostGraphControlRetMapping, &rewritten_then_branch_function_def));
TF_RETURN_IF_ERROR(AddFunctionWithNewName(
new_then_function_name, "then_branch", rewritten_then_branch_function_def,
&then_branch_func, n, fld));
const auto new_else_function_name = fld->UniqueFunctionName(
absl::StrCat(else_branch_func.name(), "_lifted_arg_"));
FunctionDef rewritten_else_branch_function_def;
TF_RETURN_IF_ERROR(GraphToFunctionDef(
*else_branch_function_body->graph, new_else_function_name,
HostGraphControlRetMapping, &rewritten_else_branch_function_def));
TF_RETURN_IF_ERROR(AddFunctionWithNewName(
new_else_function_name, "else_branch", rewritten_else_branch_function_def,
&else_branch_func, n, fld));
return absl::OkStatus();
}
Status PostprocessLiftedArgsForCall(
const std::unordered_map<string, Node*>& outside_compilation_attr_to_node,
Graph* g, Node* n, FunctionLibraryDefinition* fld) {
const FunctionDef* fdef = fld->Find(n->type_string());
TF_RET_CHECK(fdef);
if (!HasLiftedArgs(*fdef)) {
return absl::OkStatus();
}
std::unique_ptr<FunctionBody> fbody;
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(*fdef, n->attrs(), fld, &fbody));
int original_arg_count = fbody->arg_nodes.size();
TF_ASSIGN_OR_RETURN(auto lifted_arg_nodes_and_outside_compilation_nodes,
LiftedArgsAndOutsideCompilationNodesInFunctionBody(
*fbody, outside_compilation_attr_to_node));
std::vector<DataType> data_types(n->input_types().begin(),
n->input_types().end());
for (auto pair : lifted_arg_nodes_and_outside_compilation_nodes) {
Node* outside_compilation_node = pair.second;
DataType data_type;
TF_RET_CHECK(outside_compilation_node->IsIdentity() ||
outside_compilation_node->type_string() == "Placeholder");
if (outside_compilation_node->IsIdentity()) {
TF_RETURN_IF_ERROR(
GetNodeAttr(outside_compilation_node->def(), "T", &data_type));
} else {
TF_RETURN_IF_ERROR(
GetNodeAttr(outside_compilation_node->def(), "dtype", &data_type));
}
data_types.push_back(data_type);
}
std::vector<Node*> lifted_arg_nodes;
lifted_arg_nodes.reserve(
lifted_arg_nodes_and_outside_compilation_nodes.size());
std::transform(
lifted_arg_nodes_and_outside_compilation_nodes.begin(),
lifted_arg_nodes_and_outside_compilation_nodes.end(),
std::back_inserter(lifted_arg_nodes),
[](const std::pair<Node*, Node*>& pair) { return pair.first; });
for (int i = original_arg_count, end = data_types.size(); i < end; ++i) {
TF_ASSIGN_OR_RETURN(
Node * arg_node,
AddOutsideCompilationInputArgToFunctionBody(*fbody, i, data_types[i]));
ReplaceLiftedArgNodePlaceholderWithArg(*fbody, original_arg_count, i,
lifted_arg_nodes, arg_node);
}
FunctionDef rewritten_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*fbody->graph, n->type_string(),
HostGraphControlRetMapping,
&rewritten_fdef));
const auto new_function_name =
fld->UniqueFunctionName(absl::StrCat(n->type_string(), "_lifted_arg_"));
rewritten_fdef.mutable_signature()->set_name(new_function_name);
TF_RETURN_IF_ERROR(fld->AddFunctionDef(rewritten_fdef));
NodeDef node_def = n->def();
*node_def.mutable_op() = new_function_name;
for (int i = original_arg_count, end = data_types.size(); i < end; i++) {
Node* outside_compilation_node =
lifted_arg_nodes_and_outside_compilation_nodes[i - original_arg_count]
.second;
node_def.add_input(absl::StrCat(outside_compilation_node->name(), ":", 0));
}
TF_ASSIGN_OR_RETURN(n, ReplaceNode(g, n, node_def));
std::vector<Node*> outside_compilation_nodes;
outside_compilation_nodes.reserve(
lifted_arg_nodes_and_outside_compilation_nodes.size());
std::transform(
lifted_arg_nodes_and_outside_compilation_nodes.begin(),
lifted_arg_nodes_and_outside_compilation_nodes.end(),
std::back_inserter(outside_compilation_nodes),
[](const std::pair<Node*, Node*>& pair) { return pair.second; });
AddEdgesFromOutsideCompilationNodes(original_arg_count,
0,
data_types, outside_compilation_nodes, g,
n);
return absl::OkStatus();
}
absl::StatusOr<std::unordered_map<string, Node*>> OutsideCompilationAttrToNode(
const Graph& g) {
std::unordered_map<string, Node*> outside_compilation_attr_to_node;
for (Node* n : g.op_nodes()) {
bool is_lifted_arg;
string outside_compilation_attr;
if (TryGetNodeAttr(n->def(), kXlaIsLiftedArgAttrName, &is_lifted_arg) &&
TryGetNodeAttr(n->def(), "_xla_outside_compilation",
&outside_compilation_attr)) {
TF_RET_CHECK(is_lifted_arg);
TF_RET_CHECK(n->IsIdentity() || n->type_string() == "Placeholder");
outside_compilation_attr_to_node[outside_compilation_attr] = n;
}
}
return outside_compilation_attr_to_node;
}
Status PostprocessLiftedArgs(Graph* g, FunctionLibraryDefinition* fld) {
TF_ASSIGN_OR_RETURN(auto outside_compilation_attr_to_node,
OutsideCompilationAttrToNode(*g));
std::vector<Node*> call_nodes;
for (Node* n : g->op_nodes()) {
if (!HasNodeAttr(n->def(), kXlaHasHostTransferAttrName)) {
continue;
}
if (n->IsWhileNode()) {
TF_RETURN_IF_ERROR(PostprocessLiftedArgsForWhile(
outside_compilation_attr_to_node, g, n, fld));
}
if (n->IsIfNode()) {
TF_RETURN_IF_ERROR(PostprocessLiftedArgsForIf(
outside_compilation_attr_to_node, g, n, fld));
}
if (fld->Contains(n->type_string())) {
call_nodes.push_back(n);
}
}
for (Node* n : call_nodes) {
TF_RETURN_IF_ERROR(PostprocessLiftedArgsForCall(
outside_compilation_attr_to_node, g, n, fld));
}
return absl::OkStatus();
}
Status ConstructHostGraph(
const string& xla_cluster_name, const string& outside_compilation_attr_name,
const std::vector<string>& outside_compilation_host_graphs,
FunctionLibraryDefinition* fld, std::unique_ptr<Graph>* host_graph) {
host_graph->reset(new Graph(fld));
NodeDefBuilder sequencer_builder(absl::StrCat(xla_cluster_name, "_sequencer"),
"NoOp");
sequencer_builder.Attr("_xla_host_transfer_sequencer", xla_cluster_name);
NodeDef sequencer_def;
TF_RETURN_IF_ERROR(sequencer_builder.Finalize(&sequencer_def));
TF_ASSIGN_OR_RETURN(Node * sequencer, (*host_graph)->AddNode(sequencer_def));
TF_ASSIGN_OR_RETURN(
Node * key_placeholder,
AddHostComputeKeyPlaceholder(xla_cluster_name, host_graph->get()));
for (const string& host_func : outside_compilation_host_graphs) {
VLOG(4) << "Expanding host graph " << host_func;
AttrValue device_ordinal_attr;
device_ordinal_attr.set_i(0);
protobuf::Map<string, AttrValue> attrs;
attrs["_device_ordinal"] = device_ordinal_attr;
std::unique_ptr<FunctionBody> host_fbody;
const FunctionDef* host_fdef = fld->Find(host_func);
TF_RET_CHECK(host_fdef);
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(*host_fdef, AttrSlice(&attrs),
fld, &host_fbody));
FixupSourceAndSinkEdges(host_fbody->graph);
std::map<const Node*, Node*> node_map;
node_map[host_fbody->graph->source_node()] = (*host_graph)->source_node();
node_map[host_fbody->graph->sink_node()] = (*host_graph)->sink_node();
Status s;
ReverseDFS(
*host_fbody->graph, nullptr,
[&](const Node* n) {
if (!s.ok()) {
return;
}
Node* copy;
if (node_map.find(n) != node_map.end()) {
copy = node_map.at(n);
} else if (IsKeyPlaceholderNode(*n)) {
copy = key_placeholder;
node_map[n] = copy;
} else {
NodeDef copy_def = n->def();
copy_def.clear_device();
copy = (*host_graph)->AddNode(copy_def, &s);
if (!s.ok()) {
return;
}
node_map[n] = copy;
}
for (auto e : n->in_edges()) {
if (node_map.find(e->src()) == node_map.end()) {
s = errors::Internal("Cannot find node image for ",
e->src()->DebugString());
return;
}
(*host_graph)
->AddEdge(node_map[e->src()], e->src_output(), copy,
e->dst_input());
}
if (HasNodeAttr(copy->def(), kXlaHasHostTransferAttrName)) {
(*host_graph)->AddControlEdge(copy, sequencer);
}
},
NodeComparatorID());
if (!s.ok()) {
return s;
}
}
TF_RETURN_IF_ERROR(ResetDeviceOrdinalToPlaceholderValue(host_graph->get()));
if (!sequencer->in_edges().empty()) {
(*host_graph)->AddControlEdge(sequencer, (*host_graph)->sink_node());
}
PruneForReverseReachability(
host_graph->get(),
std::unordered_set<const Node*>{(*host_graph)->sink_node()});
TF_RETURN_IF_ERROR(PostprocessEdgesBetweenOutsideCompilations(
host_graph->get(), outside_compilation_attr_name));
TF_RETURN_IF_ERROR(PostprocessLiftedArgs(host_graph->get(), fld));
if (VLOG_IS_ON(4)) {
DumpGraphToFile(absl::StrCat("extract_outside_compilation_host_graph_for_",
xla_cluster_name),
**host_graph, fld);
}
return absl::OkStatus();
}
Status ExpandHostGraphIntoMainGraph(Graph* main_graph,
FunctionLibraryDefinition* fld,
const string& host_graph_func_name,
Node* xla_computation_node,
Node* pivot_node) {
AttrValue device_ordinal_attr;
device_ordinal_attr.set_i(0);
protobuf::Map<string, AttrValue> attrs;
attrs["_device_ordinal"] = device_ordinal_attr;
std::unique_ptr<FunctionBody> fbody;
const FunctionDef* host_graph_func = fld->Find(host_graph_func_name);
TF_RET_CHECK(host_graph_func);
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(*host_graph_func,
AttrSlice(&attrs), fld, &fbody));
Graph* host_graph = fbody->graph;
FixupSourceAndSinkEdges(host_graph);
std::map<const Node*, Node*> node_map;
if (pivot_node) {
node_map[host_graph->source_node()] = pivot_node;
} else {
node_map[host_graph->source_node()] = main_graph->source_node();
}
node_map[host_graph->sink_node()] = main_graph->sink_node();
Status s = absl::OkStatus();
auto copy_node_fn = [&](const Node* n) {
if (!s.ok()) {
return;
}
Node* copy;
if (node_map.find(n) != node_map.end()) {
copy = node_map.at(n);
} else {
NodeDef copy_def = n->def();
copy = main_graph->AddNode(copy_def, &s);
if (!s.ok()) {
return;
}
node_map[n] = copy;
}
for (auto e : n->in_edges()) {
if (node_map.find(e->src()) == node_map.end()) {
s = errors::Internal("Cannot find node image for ",
e->src()->DebugString());
return;
}
main_graph->AddEdge(node_map[e->src()], e->src_output(), copy,
e->dst_input());
}
if (copy->type_string() == "NoOp" &&
HasNodeAttr(copy->def(), "_xla_host_transfer_sequencer")) {
main_graph->AddControlEdge(copy, xla_computation_node);
}
};
ReverseDFS(*host_graph, nullptr, copy_node_fn, NodeComparatorID());
return s;
}
Status RewriteShapeInferenceGraph(const string& shape_inference_graph_name,
Graph* host_graph, Node* pivot_node,
FunctionLibraryDefinition* fld) {
AttrValue device_ordinal_attr;
device_ordinal_attr.set_i(0);
protobuf::Map<string, AttrValue> attrs;
attrs["_device_ordinal"] = device_ordinal_attr;
std::unique_ptr<FunctionBody> fbody;
const FunctionDef* shape_inference_graph =
fld->Find(shape_inference_graph_name);
TF_RET_CHECK(shape_inference_graph);
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(*shape_inference_graph,
AttrSlice(&attrs), fld, &fbody));
Graph* g = fbody->graph;
Node* send_from_host = nullptr;
for (Node* n : g->nodes()) {
if (n->type_string() == "_XlaSendFromHost") {
send_from_host = n;
break;
}
}
if (!send_from_host) {
return errors::Internal("Shape inference graph ",
shape_inference_graph_name,
" does not have _XlaSendFromHost node.");
}
Node* send_node_in_host_graph = nullptr;
for (Node* n : host_graph->nodes()) {
if (n->name() == send_from_host->name()) {
send_node_in_host_graph = n;
break;
}
}
if (send_node_in_host_graph) {
std::vector<Node*> nodes;
nodes.reserve(g->num_op_nodes());
for (Node* n : g->op_nodes()) {
nodes.push_back(n);
}
for (Node* n : nodes) {
g->RemoveNode(n);
}
Node* start_node = pivot_node ? pivot_node : host_graph->source_node();
struct Visit {
Node* n;
bool is_exiting;
};
std::vector<Visit> stack{{send_node_in_host_graph, false}};
std::map<Node*, Node*> node_map;
node_map[host_graph->source_node()] = g->source_node();
while (!stack.empty()) {
Visit& curr = stack.back();
if (curr.is_exiting) {
if (node_map.find(curr.n) == node_map.end()) {
Node* copy = g->CopyNode(curr.n);
if (curr.n != start_node) {
for (const Edge* e : curr.n->in_edges()) {
auto node_iter = node_map.find(e->src());
if (node_iter == node_map.end()) {
return errors::Internal("Cannot find node image for ",
e->src()->DebugString());
}
g->AddEdge(node_iter->second, e->src_output(), copy,
e->dst_input());
}
}
node_map[curr.n] = copy;
}
stack.pop_back();
} else {
curr.is_exiting = true;
if (curr.n != start_node) {
for (const Edge* e : curr.n->in_edges()) {
if (node_map.find(e->src()) != node_map.end()) {
continue;
}
stack.push_back({e->src(), false});
}
}
}
}
send_from_host = node_map[send_node_in_host_graph];
} else {
}
for (auto e : g->edges()) {
if (e->IsControlEdge()) {
g->RemoveEdge(e);
}
}
PruneForReverseReachability(g,
std::unordered_set<const Node*>{send_from_host});
if (VLOG_IS_ON(4)) {
DumpGraphToFile(shape_inference_graph_name, *g, fld);
}
FunctionDef fdef_replace;
TF_RETURN_IF_ERROR(
GraphToFunctionDef(*g, shape_inference_graph_name, &fdef_replace));
TF_RETURN_IF_ERROR(
fld->ReplaceFunction(shape_inference_graph_name, fdef_replace));
return absl::OkStatus();
}
void SetMaximalSharding(NodeDefBuilder& node_builder) {
xla::OpSharding sharding;
sharding.set_type(xla::OpSharding::MAXIMAL);
sharding.add_tile_assignment_dimensions(1);
sharding.add_tile_assignment_devices(0);
node_builder.Attr("_XlaSharding", sharding.SerializeAsString());
}
TF_ATTRIBUTE_NOINLINE absl::StatusOr<Node*> BuildSendIfPredNode(
const string& name, const string& host_transfer_key, Node* pred_node,
Graph* g) {
NodeDefBuilder send_pred_builder(name, "XlaSendToHost");
send_pred_builder.Attr("Tinput", DT_BOOL);
send_pred_builder.Attr("key", absl::StrCat(host_transfer_key, "_dtoh_0"));
send_pred_builder.Attr(kXlaTokenInputNodesAttrName,
std::vector<string>{kXlaTokenArgNodeName});
send_pred_builder.Attr(kXlaOriginalOutsideCompilationNodeName, name);
SetMaximalSharding(send_pred_builder);
send_pred_builder.Input(pred_node->name(), 0, DT_BOOL);
NodeDef send_pred_def;
TF_RETURN_IF_ERROR(send_pred_builder.Finalize(&send_pred_def));
TF_ASSIGN_OR_RETURN(Node * send_pred_node, g->AddNode(send_pred_def));
g->AddEdge(pred_node, 0, send_pred_node, 0);
return send_pred_node;
}
Status ReplaceKeyPlaceholderWithArgNode(const string& xla_cluster_name,
const string& func_name,
FunctionLibraryDefinition* fld) {
AttrValue device_ordinal_attr;
device_ordinal_attr.set_i(0);
protobuf::Map<string, AttrValue> attrs;
attrs["_device_ordinal"] = device_ordinal_attr;
std::unique_ptr<FunctionBody> fbody;
const FunctionDef* func = fld->Find(func_name);
TF_RETURN_IF_ERROR(
FunctionDefToBodyHelper(*func, AttrSlice(&attrs), fld, &fbody));
Graph* g = fbody->graph;
Node* key_placeholder = nullptr;
for (Node* n : g->nodes()) {
if (IsKeyPlaceholderNode(*n)) {
key_placeholder = n;
break;
}
}
if (!key_placeholder) {
TF_ASSIGN_OR_RETURN(key_placeholder,
AddHostComputeKeyPlaceholder(xla_cluster_name, g));
}
NodeDefBuilder arg_builder("key_arg", FunctionLibraryDefinition::kArgOp);
arg_builder.Attr("T", DT_STRING);
arg_builder.Attr("index", 0);
NodeDef arg_def;
TF_RETURN_IF_ERROR(arg_builder.Finalize(&arg_def));
TF_RETURN_IF_ERROR(ReplaceNode(g, key_placeholder, arg_def).status());
TF_RETURN_IF_ERROR(ResetDeviceOrdinalToPlaceholderValue(g));
FunctionDef replace_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(
*g, func_name, HostGraphControlRetMapping, &replace_fdef));
TF_RETURN_IF_ERROR(fld->ReplaceFunction(func_name, replace_fdef));
return absl::OkStatus();
}
TF_ATTRIBUTE_NOINLINE Status BuildHostGraphForIfNode(
const string& xla_cluster_attr_name,
const string& outside_compilation_attr_name, const string& xla_cluster_name,
const string& if_node_name, const string& host_transfer_key,
const string& host_graph_func_name, FunctionLibraryDefinition* fld,
const string& then_branch_host_func_name,
const string& else_branch_host_func_name) {
Graph host_graph(fld);
string outside_compilation_name = absl::StrCat("oc_if_", if_node_name);
AttrValue device_ordinal_value;
device_ordinal_value.set_placeholder("_device_ordinal");
TF_ASSIGN_OR_RETURN(
Node * key_placeholder,
AddHostComputeKeyPlaceholder(xla_cluster_name, &host_graph));
NodeDefBuilder recv_pred_builder(
absl::StrCat("recv_oc_if_pred_", if_node_name), "_XlaRecvAtHost");
recv_pred_builder.Attr("Toutputs", std::vector<DataType>{DT_BOOL});
recv_pred_builder.Attr("key", host_transfer_key);
recv_pred_builder.Attr("device_ordinal", device_ordinal_value);
recv_pred_builder.Attr(xla_cluster_attr_name, xla_cluster_name);
recv_pred_builder.Attr(outside_compilation_attr_name,
outside_compilation_name);
recv_pred_builder.Attr(kXlaHasHostTransferAttrName, true);
recv_pred_builder.Input(key_placeholder->name(), 0, DT_STRING);
NodeDef recv_pred_def;
TF_RETURN_IF_ERROR(recv_pred_builder.Finalize(&recv_pred_def));
TF_ASSIGN_OR_RETURN(Node * recv_pred_node, host_graph.AddNode(recv_pred_def));
host_graph.AddEdge(key_placeholder, 0, recv_pred_node, 0);
TF_RETURN_IF_ERROR(ReplaceKeyPlaceholderWithArgNode(
xla_cluster_name, then_branch_host_func_name, fld));
TF_RETURN_IF_ERROR(ReplaceKeyPlaceholderWithArgNode(
xla_cluster_name, else_branch_host_func_name, fld));
NodeDefBuilder if_builder(absl::StrCat("oc_if_", if_node_name), "If");
if_builder.Attr("Tcond", DT_BOOL);
if_builder.Attr("Tin", std::vector<DataType>{DT_STRING});
if_builder.Attr("Tout", std::vector<DataType>{});
NameAttrList host_then_branch, host_else_branch;
host_then_branch.set_name(then_branch_host_func_name);
(*host_then_branch.mutable_attr())["_device_ordinal"] = device_ordinal_value;
host_else_branch.set_name(else_branch_host_func_name);
(*host_else_branch.mutable_attr())["_device_ordinal"] = device_ordinal_value;
if_builder.Attr("then_branch", host_then_branch);
if_builder.Attr("else_branch", host_else_branch);
if_builder.Attr(kXlaHasHostTransferAttrName, true);
if_builder.Attr(xla_cluster_attr_name, xla_cluster_name);
if_builder.Attr(outside_compilation_attr_name, outside_compilation_name);
if_builder.Input(recv_pred_node->name(), 0, DT_BOOL);
std::vector<NodeDefBuilder::NodeOut> if_inputs{
{key_placeholder->name(), 0, DT_STRING}};
if_builder.Input(if_inputs);
NodeDef if_def;
TF_RETURN_IF_ERROR(if_builder.Finalize(&if_def));
TF_ASSIGN_OR_RETURN(Node * if_node, host_graph.AddNode(if_def));
host_graph.AddEdge(recv_pred_node, 0, if_node, 0);
host_graph.AddEdge(key_placeholder, 0, if_node, 1);
FunctionDef oc_host_graph_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(host_graph, host_graph_func_name,
&oc_host_graph_fdef));
if (fld->Find(host_graph_func_name)) {
TF_RETURN_IF_ERROR(
fld->ReplaceFunction(host_graph_func_name, oc_host_graph_fdef));
} else {
TF_RETURN_IF_ERROR(fld->AddFunctionDef(oc_host_graph_fdef));
}
return absl::OkStatus();
}
TF_ATTRIBUTE_NOINLINE Status AddSendLoopPredToLoopCond(
const string& cond_xla_func_name, const string& host_transfer_key,
NameAttrList* loop_cond_func, FunctionLibraryDefinition* fld,
Node* while_node) {
std::unique_ptr<FunctionBody> fbody;
const FunctionDef* loop_cond_fdef = fld->Find(loop_cond_func->name());
TF_RET_CHECK(loop_cond_fdef);
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
*loop_cond_fdef, AttrSlice(&loop_cond_func->attr()), fld, &fbody));
Graph* g = fbody->graph;
Node* ret_node = nullptr;
for (Node* n : g->nodes()) {
if (n->type_string() == "_Retval") {
if (ret_node) {
return errors::Internal("Multiple return node for loop cond function ",
loop_cond_func->name(), ": ",
ret_node->DebugString(), " and ",
n->DebugString());
} else {
ret_node = n;
}
}
}
if (!ret_node) {
return errors::Internal("No _Retval node for loop cond function ",
loop_cond_func->name());
}
Node* loop_cond;
TF_RETURN_IF_ERROR(ret_node->input_node(0, &loop_cond));
NodeDefBuilder send_loop_cond_builder(
absl::StrCat("send_oc_while_cond_", while_node->name()), "XlaSendToHost");
send_loop_cond_builder.Attr("Tinput", DT_BOOL);
send_loop_cond_builder.Attr("key",
absl::StrCat(host_transfer_key, "_dtoh_0"));
send_loop_cond_builder.Attr(kXlaTokenInputNodesAttrName,
std::vector<string>{kXlaTokenArgNodeName});
send_loop_cond_builder.Attr(kXlaOriginalOutsideCompilationNodeName,
send_loop_cond_builder.node_name());
SetMaximalSharding(send_loop_cond_builder);
send_loop_cond_builder.Input(loop_cond->name(), 0, DT_BOOL);
NodeDef send_loop_cond_def;
TF_RETURN_IF_ERROR(send_loop_cond_builder.Finalize(&send_loop_cond_def));
TF_ASSIGN_OR_RETURN(Node * send_loop_cond_node,
g->AddNode(send_loop_cond_def));
g->AddEdge(loop_cond, 0, send_loop_cond_node, 0);
FunctionDef replace_fdef;
if (loop_cond_func->name() == cond_xla_func_name) {
TF_RETURN_IF_ERROR(
GraphToFunctionDef(*g, loop_cond_func->name(), &replace_fdef));
TF_RETURN_IF_ERROR(
fld->ReplaceFunction(loop_cond_func->name(), replace_fdef));
} else {
const auto new_name = fld->UniqueFunctionName(
absl::StrCat(loop_cond_func->name(), "_send_pred_added_"));
TF_RETURN_IF_ERROR(GraphToFunctionDef(*g, new_name, &replace_fdef));
TF_RETURN_IF_ERROR(fld->AddFunctionDef(replace_fdef));
loop_cond_func->set_name(new_name);
while_node->ClearAttr("cond");
while_node->AddAttr("cond", *loop_cond_func);
}
return absl::OkStatus();
}
Status RewriteHostWhileLoopCond(
const string& cond_host_func_name, const string& while_node_name,
const string& host_transfer_key, const string& xla_cluster_attr_name,
const string& xla_cluster_name, const string& outside_compilation_attr_name,
const string& outside_compilation_name, FunctionLibraryDefinition* fld) {
TF_RETURN_IF_ERROR(ReplaceKeyPlaceholderWithArgNode(
xla_cluster_name, cond_host_func_name, fld));
AttrValue device_ordinal_temp_value;
device_ordinal_temp_value.set_i(0);
protobuf::Map<string, AttrValue> attrs;
attrs["_device_ordinal"] = device_ordinal_temp_value;
std::unique_ptr<FunctionBody> cond_fbody;
const FunctionDef* cond_host_func = fld->Find(cond_host_func_name);
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(*cond_host_func, AttrSlice(&attrs),
fld, &cond_fbody));
Graph* cond_graph = cond_fbody->graph;
Node* key_arg = nullptr;
for (Node* n : cond_graph->nodes()) {
if (n->type_string() == "_Arg") {
key_arg = n;
}
}
if (!key_arg) {
return errors::Internal(
"No _Arg node found for host compute key in function ",
cond_host_func_name);
}
NodeDefBuilder recv_pred_builder(
absl::StrCat("recv_oc_while_cond_", while_node_name), "_XlaRecvAtHost");
recv_pred_builder.Attr("Toutputs", std::vector<DataType>{DT_BOOL});
recv_pred_builder.Attr("key", host_transfer_key);
AttrValue device_ordinal_value;
device_ordinal_value.set_placeholder("_device_ordinal");
recv_pred_builder.Attr("device_ordinal", device_ordinal_value);
recv_pred_builder.Attr(xla_cluster_attr_name, xla_cluster_name);
recv_pred_builder.Attr(outside_compilation_attr_name,
outside_compilation_name);
recv_pred_builder.Attr(kXlaHasHostTransferAttrName, true);
recv_pred_builder.Input(key_arg->name(), 0, DT_STRING);
NodeDef recv_pred_def;
TF_RETURN_IF_ERROR(recv_pred_builder.Finalize(&recv_pred_def));
TF_ASSIGN_OR_RETURN(Node * recv_pred_node,
cond_graph->AddNode(recv_pred_def));
cond_graph->AddEdge(key_arg, 0, recv_pred_node, 0);
NodeDefBuilder ret_builder(
absl::StrCat("recv_oc_while_cond_ret_", while_node_name), "_Retval");
ret_builder.Attr("T", DT_BOOL);
ret_builder.Attr("index", 0);
ret_builder.Input(recv_pred_node->name(), 0, DT_BOOL);
NodeDef ret_def;
TF_RETURN_IF_ERROR(ret_builder.Finalize(&ret_def));
TF_ASSIGN_OR_RETURN(Node * ret_node, cond_graph->AddNode(ret_def));
cond_graph->AddEdge(recv_pred_node, 0, ret_node, 0);
TF_RETURN_IF_ERROR(ResetDeviceOrdinalToPlaceholderValue(cond_graph));
FunctionDef cond_replace_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*cond_graph, cond_host_func_name,
HostGraphControlRetMapping,
&cond_replace_fdef));
TF_RETURN_IF_ERROR(
fld->ReplaceFunction(cond_host_func_name, cond_replace_fdef));
return absl::OkStatus();
}
Status RewriteHostWhileLoopBody(
const string& body_host_func_name, const string& while_node_name,
const string& host_transfer_key, const string& xla_cluster_attr_name,
const string& xla_cluster_name, const string& outside_compilation_attr_name,
const string& outside_compilation_name, FunctionLibraryDefinition* fld) {
TF_RETURN_IF_ERROR(ReplaceKeyPlaceholderWithArgNode(
xla_cluster_name, body_host_func_name, fld));
AttrValue device_ordinal_temp_value;
device_ordinal_temp_value.set_i(0);
protobuf::Map<string, AttrValue> attrs;
attrs["_device_ordinal"] = device_ordinal_temp_value;
std::unique_ptr<FunctionBody> body_fbody;
const FunctionDef* body_host_func = fld->Find(body_host_func_name);
TF_RET_CHECK(body_host_func);
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(*body_host_func, AttrSlice(&attrs),
fld, &body_fbody));
Graph* body_graph = body_fbody->graph;
Node* key_arg = nullptr;
for (Node* n : body_graph->nodes()) {
if (n->type_string() == "_Arg") {
key_arg = n;
}
}
if (!key_arg) {
return errors::Internal(
"No _Arg node found for host compute key in function ",
body_host_func_name);
}
NodeDefBuilder ret_builder(
absl::StrCat("recv_oc_while_body_ret_", while_node_name), "_Retval");
ret_builder.Attr("T", DT_STRING);
ret_builder.Attr("index", 0);
ret_builder.Input(key_arg->name(), 0, DT_STRING);
NodeDef ret_def;
TF_RETURN_IF_ERROR(ret_builder.Finalize(&ret_def));
TF_ASSIGN_OR_RETURN(Node * ret_node, body_graph->AddNode(ret_def));
body_graph->AddEdge(key_arg, 0, ret_node, 0);
TF_RETURN_IF_ERROR(ResetDeviceOrdinalToPlaceholderValue(body_graph));
FunctionDef body_replace_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*body_graph, body_host_func_name,
HostGraphControlRetMapping,
&body_replace_fdef));
TF_RETURN_IF_ERROR(
fld->ReplaceFunction(body_host_func_name, body_replace_fdef));
return absl::OkStatus();
}
TF_ATTRIBUTE_NOINLINE Status BuildHostGraphForWhileNode(
const string& xla_cluster_attr_name,
const string& outside_compilation_attr_name, const string& xla_cluster_name,
const string& while_node_name, const string& host_transfer_key,
const string& host_graph_func_name, FunctionLibraryDefinition* fld,
const string& cond_host_func_name, const string& body_host_func_name) {
Graph host_graph(fld);
string outside_compilation_name = absl::StrCat("oc_while_", while_node_name);
TF_ASSIGN_OR_RETURN(
Node * key_placeholder,
AddHostComputeKeyPlaceholder(xla_cluster_name, &host_graph));
TF_RETURN_IF_ERROR(RewriteHostWhileLoopCond(
cond_host_func_name, while_node_name, host_transfer_key,
xla_cluster_attr_name, xla_cluster_name, outside_compilation_attr_name,
outside_compilation_name, fld));
TF_RETURN_IF_ERROR(RewriteHostWhileLoopBody(
body_host_func_name, while_node_name, host_transfer_key,
xla_cluster_attr_name, xla_cluster_name, outside_compilation_attr_name,
outside_compilation_name, fld));
NodeDefBuilder while_builder(absl::StrCat("oc_while_", while_node_name),
"While");
while_builder.Attr("T", std::vector<DataType>{DT_STRING});
NameAttrList func;
AttrValue device_ordinal_value;
device_ordinal_value.set_placeholder("_device_ordinal");
(*func.mutable_attr())["_device_ordinal"] = device_ordinal_value;
func.set_name(cond_host_func_name);
while_builder.Attr("cond", func);
func.set_name(body_host_func_name);
while_builder.Attr("body", func);
while_builder.Attr(kXlaHasHostTransferAttrName, true);
while_builder.Attr(xla_cluster_attr_name, xla_cluster_name);
while_builder.Attr(outside_compilation_attr_name, outside_compilation_name);
while_builder.Attr("parallel_iterations", 1);
std::vector<NodeDefBuilder::NodeOut> while_inputs{
{key_placeholder->name(), 0, DT_STRING}};
while_builder.Input(while_inputs);
NodeDef while_def;
TF_RETURN_IF_ERROR(while_builder.Finalize(&while_def));
TF_ASSIGN_OR_RETURN(Node * while_node, host_graph.AddNode(while_def));
host_graph.AddEdge(key_placeholder, 0, while_node, 0);
FunctionDef oc_host_graph_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(host_graph, host_graph_func_name,
&oc_host_graph_fdef));
if (fld->Find(host_graph_func_name)) {
TF_RETURN_IF_ERROR(
fld->ReplaceFunction(host_graph_func_name, oc_host_graph_fdef));
} else {
TF_RETURN_IF_ERROR(fld->AddFunctionDef(oc_host_graph_fdef));
}
return absl::OkStatus();
}
Status BuildHostGraphForFuncCallNode(
const string& xla_cluster_attr_name, const string& xla_cluster_name,
const string& outside_compilation_attr_name,
const string& func_call_node_name, const string& func_call_host_func_name,
const string& host_graph_func_name, FunctionLibraryDefinition* fld) {
Graph host_graph(fld);
AttrValue device_ordinal_value;
device_ordinal_value.set_placeholder("_device_ordinal");
TF_ASSIGN_OR_RETURN(
Node * key_placeholder,
AddHostComputeKeyPlaceholder(xla_cluster_name, &host_graph));
TF_RETURN_IF_ERROR(ReplaceKeyPlaceholderWithArgNode(
xla_cluster_name, func_call_host_func_name, fld));
NodeDefBuilder call_builder(absl::StrCat("oc_call_", func_call_node_name),
func_call_host_func_name, fld);
call_builder.Input(key_placeholder->name(), 0, DT_STRING);
call_builder.Attr("_device_ordinal", device_ordinal_value);
call_builder.Attr(kXlaHasHostTransferAttrName, true);
call_builder.Attr(xla_cluster_attr_name, xla_cluster_name);
call_builder.Attr(outside_compilation_attr_name, call_builder.node_name());
NodeDef call_def;
TF_RETURN_IF_ERROR(call_builder.Finalize(&call_def));
TF_ASSIGN_OR_RETURN(Node * call_node, host_graph.AddNode(call_def));
host_graph.AddEdge(key_placeholder, 0, call_node, 0);
FunctionDef oc_host_graph_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(host_graph, host_graph_func_name,
HostGraphControlRetMapping,
&oc_host_graph_fdef));
if (fld->Find(host_graph_func_name)) {
TF_RETURN_IF_ERROR(
fld->ReplaceFunction(host_graph_func_name, oc_host_graph_fdef));
} else {
TF_RETURN_IF_ERROR(fld->AddFunctionDef(oc_host_graph_fdef));
}
return absl::OkStatus();
}
TF_ATTRIBUTE_NOINLINE Status ExtractOutsideCompilationForFuncCallNode(
const string& xla_cluster_attr_name,
const string& outside_compilation_attr_name, const string& xla_cluster_name,
const std::map<string, int>& host_compute_core, Graph* g, Node* n,
FunctionLibraryRuntime* flr, FunctionLibraryDefinition* fld,
std::vector<string>* host_graphs,
std::vector<string>* shape_inference_graphs,
bool* has_outside_compilation) {
bool func_has_outside_compilation = false;
NameAttrList func;
if (fld->Contains(n->type_string())) {
func.set_name(n->type_string());
typedef protobuf::Map<string, AttrValue> AttrMap;
*func.mutable_attr() = AttrMap(n->attrs().begin(), n->attrs().end());
} else if (n->IsPartitionedCall()) {
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "f", &func));
} else {
TF_RET_CHECK(n->type_string() == FunctionLibraryDefinition::kGradientOp);
func.set_name(FunctionLibraryDefinition::kGradientOp);
*func.mutable_attr() = n->def().attr();
}
string canonical_func_name;
if (func.name() == FunctionLibraryDefinition::kGradientOp) {
NameAttrList forward_func;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "f", &forward_func));
canonical_func_name = absl::StrCat("gradient_", forward_func.name());
} else {
canonical_func_name = func.name();
}
string new_func_name = absl::StrCat(canonical_func_name, "_oc");
string host_func_name =
absl::StrCat("oc_func_call_host_", canonical_func_name);
TF_RETURN_IF_ERROR(ExtractOutsideCompilationForFunction(
xla_cluster_attr_name, outside_compilation_attr_name, xla_cluster_name,
func, new_func_name, host_func_name, host_compute_core, flr, fld,
shape_inference_graphs, &func_has_outside_compilation));
if (!func_has_outside_compilation) {
return absl::OkStatus();
}
*has_outside_compilation = true;
auto replace_builder =
std::make_unique<NodeDefBuilder>(n->name(), new_func_name, fld);
std::vector<NodeDefBuilder::NodeOut> inputs(n->num_inputs());
for (const Edge* e : n->in_edges()) {
if (e->IsControlEdge()) {
continue;
}
const bool input_size_check =
e->dst_input() < static_cast<int>(inputs.size());
TF_RET_CHECK(e->dst_input() >= 0 && input_size_check);
inputs[e->dst_input()] =
NodeDefBuilder::NodeOut{e->src()->name(), e->src_output(),
e->src()->output_type(e->src_output())};
}
for (const auto& input : inputs) {
replace_builder->Input(input);
}
for (const auto& attr : n->attrs()) {
replace_builder->Attr(attr.first, attr.second);
}
auto replace_def = std::make_unique<NodeDef>();
TF_RETURN_IF_ERROR(replace_builder->Finalize(replace_def.get()));
TF_ASSIGN_OR_RETURN(Node * replace, ReplaceNode(g, n, *replace_def));
replace->AddAttr(kXlaTokenInputNodesAttrName,
std::vector<string>{kXlaTokenArgNodeName});
replace->AddAttr(kXlaOriginalOutsideCompilationNodeName, replace->name());
string oc_host_graph_name =
absl::StrCat("oc_func_host_graph_", replace->name());
TF_RETURN_IF_ERROR(BuildHostGraphForFuncCallNode(
xla_cluster_attr_name, xla_cluster_name, outside_compilation_attr_name,
replace->name(), host_func_name, oc_host_graph_name, fld));
host_graphs->push_back(oc_host_graph_name);
return absl::OkStatus();
}
Status ExtractOutsideCompilationForIfNode(
const string& xla_cluster_attr_name,
const string& outside_compilation_attr_name, const string& xla_cluster_name,
const std::map<string, int>& host_compute_core, Graph* g, Node* n,
FunctionLibraryRuntime* flr, FunctionLibraryDefinition* fld,
std::vector<string>* host_graphs,
std::vector<string>* shape_inference_graphs,
bool* has_outside_compilation) {
NameAttrList then_branch, else_branch;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "then_branch", &then_branch));
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "else_branch", &else_branch));
bool then_branch_has_outside_compilation = false;
bool else_branch_has_outside_compilation = false;
string then_branch_host_func_name =
absl::StrCat("oc_then_branch_host_if_", then_branch.name()),
else_branch_host_func_name =
absl::StrCat("oc_else_branch_host_if_", else_branch.name());
string then_branch_xla_func_name = absl::StrCat(then_branch.name(), "_oc"),
else_branch_xla_func_name = absl::StrCat(else_branch.name(), "_oc");
TF_RETURN_IF_ERROR(ExtractOutsideCompilationForFunction(
xla_cluster_attr_name, outside_compilation_attr_name, xla_cluster_name,
then_branch, then_branch_xla_func_name, then_branch_host_func_name,
host_compute_core, flr, fld, shape_inference_graphs,
&then_branch_has_outside_compilation));
TF_RETURN_IF_ERROR(ExtractOutsideCompilationForFunction(
xla_cluster_attr_name, outside_compilation_attr_name, xla_cluster_name,
else_branch, else_branch_xla_func_name, else_branch_host_func_name,
host_compute_core, flr, fld, shape_inference_graphs,
&else_branch_has_outside_compilation));
if (!then_branch_has_outside_compilation &&
!else_branch_has_outside_compilation) {
return absl::OkStatus();
}
*has_outside_compilation = true;
if (then_branch_has_outside_compilation) {
then_branch.set_name(then_branch_xla_func_name);
n->ClearAttr("then_branch");
n->AddAttr("then_branch", then_branch);
}
if (else_branch_has_outside_compilation) {
else_branch.set_name(else_branch_xla_func_name);
n->ClearAttr("else_branch");
n->AddAttr("else_branch", else_branch);
}
n->AddAttr(kXlaOriginalOutsideCompilationNodeName, n->name());
string host_transfer_key = absl::StrCat("oc_if_pred_", n->name());
Node* pred_node;
TF_RETURN_IF_ERROR(n->input_node(0, &pred_node));
TF_ASSIGN_OR_RETURN(
Node * send_pred_node,
BuildSendIfPredNode(absl::StrCat("send_oc_if_pred_", n->name()),
host_transfer_key, pred_node, g));
n->AddAttr(kXlaTokenInputNodesAttrName,
std::vector<string>{send_pred_node->name()});
g->AddControlEdge(send_pred_node, n);
if (!then_branch_has_outside_compilation) {
std::unique_ptr<Graph> then_branch_host_graph(new Graph(fld));
std::vector<string> then_branch_host_graphs;
TF_RETURN_IF_ERROR(ConstructHostGraph(
xla_cluster_name, outside_compilation_attr_name,
then_branch_host_graphs, fld, &then_branch_host_graph));
FunctionDef then_branch_host_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*then_branch_host_graph,
then_branch_host_func_name,
&then_branch_host_fdef));
if (fld->Find(then_branch_host_func_name)) {
TF_RETURN_IF_ERROR(fld->ReplaceFunction(then_branch_host_func_name,
then_branch_host_fdef));
} else {
TF_RETURN_IF_ERROR(fld->AddFunctionDef(then_branch_host_fdef));
}
}
if (!else_branch_has_outside_compilation) {
std::unique_ptr<Graph> else_branch_host_graph(new Graph(fld));
std::vector<string> else_branch_host_graphs;
TF_RETURN_IF_ERROR(ConstructHostGraph(
xla_cluster_name, outside_compilation_attr_name,
else_branch_host_graphs, fld, &else_branch_host_graph));
FunctionDef else_branch_host_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*else_branch_host_graph,
else_branch_host_func_name,
&else_branch_host_fdef));
if (fld->Find(else_branch_host_func_name)) {
TF_RETURN_IF_ERROR(fld->ReplaceFunction(else_branch_host_func_name,
else_branch_host_fdef));
} else {
TF_RETURN_IF_ERROR(fld->AddFunctionDef(else_branch_host_fdef));
}
}
string oc_host_graph_name = absl::StrCat("oc_if_host_graph_", n->name());
TF_RETURN_IF_ERROR(BuildHostGraphForIfNode(
xla_cluster_attr_name, outside_compilation_attr_name, xla_cluster_name,
n->name(), host_transfer_key, oc_host_graph_name, fld,
then_branch_host_func_name, else_branch_host_func_name));
host_graphs->push_back(oc_host_graph_name);
return absl::OkStatus();
}
Status ExtractOutsideCompilationForWhileNode(
const string& xla_cluster_attr_name,
const string& outside_compilation_attr_name, const string& xla_cluster_name,
const std::map<string, int>& host_compute_core, Graph* g, Node* n,
FunctionLibraryRuntime* flr, FunctionLibraryDefinition* fld,
std::vector<string>* host_graphs,
std::vector<string>* shape_inference_graphs,
bool* has_outside_compilation) {
NameAttrList cond, body;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "cond", &cond));
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "body", &body));
bool cond_has_outside_compilation = false;
bool body_has_outside_compilation = false;
string cond_host_func_name = absl::StrCat("oc_cond_host_while_", cond.name()),
body_host_func_name = absl::StrCat("oc_body_host_while_", body.name());
string cond_xla_func_name = absl::StrCat(cond.name(), "_oc"),
body_xla_func_name = absl::StrCat(body.name(), "_oc");
TF_RETURN_IF_ERROR(ExtractOutsideCompilationForFunction(
xla_cluster_attr_name, outside_compilation_attr_name, xla_cluster_name,
cond, cond_xla_func_name, cond_host_func_name, host_compute_core, flr,
fld, shape_inference_graphs, &cond_has_outside_compilation));
TF_RETURN_IF_ERROR(ExtractOutsideCompilationForFunction(
xla_cluster_attr_name, outside_compilation_attr_name, xla_cluster_name,
body, body_xla_func_name, body_host_func_name, host_compute_core, flr,
fld, shape_inference_graphs, &body_has_outside_compilation));
if (!cond_has_outside_compilation && !body_has_outside_compilation) {
return absl::OkStatus();
}
*has_outside_compilation = true;
if (cond_has_outside_compilation) {
cond.set_name(cond_xla_func_name);
n->ClearAttr("cond");
n->AddAttr("cond", cond);
}
if (body_has_outside_compilation) {
body.set_name(body_xla_func_name);
n->ClearAttr("body");
n->AddAttr("body", body);
}
n->AddAttr(kXlaOriginalOutsideCompilationNodeName, n->name());
string host_transfer_key = absl::StrCat("oc_while_pred_", n->name());
TF_RETURN_IF_ERROR(AddSendLoopPredToLoopCond(
cond_xla_func_name, host_transfer_key, &cond, fld, n));
n->AddAttr(kXlaTokenInputNodesAttrName,
std::vector<string>{kXlaTokenArgNodeName});
if (!cond_has_outside_compilation) {
std::unique_ptr<Graph> cond_host_graph(new Graph(fld));
std::vector<string> host_graphs;
TF_RETURN_IF_ERROR(ConstructHostGraph(xla_cluster_name,
outside_compilation_attr_name,
host_graphs, fld, &cond_host_graph));
FunctionDef cond_host_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*cond_host_graph, cond_host_func_name,
&cond_host_fdef));
if (fld->Find(cond_host_func_name)) {
TF_RETURN_IF_ERROR(
fld->ReplaceFunction(cond_host_func_name, cond_host_fdef));
} else {
TF_RETURN_IF_ERROR(fld->AddFunctionDef(cond_host_fdef));
}
}
if (!body_has_outside_compilation) {
std::unique_ptr<Graph> body_host_graph(new Graph(fld));
std::vector<string> host_graphs;
TF_RETURN_IF_ERROR(ConstructHostGraph(xla_cluster_name,
outside_compilation_attr_name,
host_graphs, fld, &body_host_graph));
FunctionDef body_host_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*body_host_graph, body_host_func_name,
&body_host_fdef));
if (fld->Find(body_host_func_name)) {
TF_RETURN_IF_ERROR(
fld->ReplaceFunction(body_host_func_name, body_host_fdef));
} else {
TF_RETURN_IF_ERROR(fld->AddFunctionDef(body_host_fdef));
}
}
string oc_host_graph_name = absl::StrCat("oc_while_host_graph_", n->name());
TF_RETURN_IF_ERROR(BuildHostGraphForWhileNode(
xla_cluster_attr_name, outside_compilation_attr_name, xla_cluster_name,
n->name(), host_transfer_key, oc_host_graph_name, fld,
cond_host_func_name, body_host_func_name));
host_graphs->push_back(oc_host_graph_name);
return absl::OkStatus();
}
Status ExtractOutsideCompilationForNodesWithAssociatedFunctions(
Graph* g, const string& xla_cluster_attr_name,
const string& outside_compilation_attr_name, const string& xla_cluster_name,
const std::map<string, int>& host_compute_core, FunctionLibraryRuntime* flr,
FunctionLibraryDefinition* fld, std::vector<string>* host_graphs,
std::vector<string>* shape_inference_graphs,
bool* has_outside_compilation) {
std::vector<Node*> if_nodes, while_nodes, func_call_nodes;
for (Node* n : g->nodes()) {
if (n->IsIfNode()) {
if_nodes.push_back(n);
} else if (n->IsWhileNode()) {
while_nodes.push_back(n);
} else if (IsFunctionCall(*fld, *n)) {
func_call_nodes.push_back(n);
}
}
for (Node* n : func_call_nodes) {
TF_RETURN_IF_ERROR(ExtractOutsideCompilationForFuncCallNode(
xla_cluster_attr_name, outside_compilation_attr_name, xla_cluster_name,
host_compute_core, g, n, flr, fld, host_graphs, shape_inference_graphs,
has_outside_compilation));
}
for (Node* n : if_nodes) {
TF_RETURN_IF_ERROR(ExtractOutsideCompilationForIfNode(
xla_cluster_attr_name, outside_compilation_attr_name, xla_cluster_name,
host_compute_core, g, n, flr, fld, host_graphs, shape_inference_graphs,
has_outside_compilation));
}
for (Node* n : while_nodes) {
TF_RETURN_IF_ERROR(ExtractOutsideCompilationForWhileNode(
xla_cluster_attr_name, outside_compilation_attr_name, xla_cluster_name,
host_compute_core, g, n, flr, fld, host_graphs, shape_inference_graphs,
has_outside_compilation));
}
return absl::OkStatus();
}
Status CopyOutsideCompilationConstNodes(
Graph* g, const string& outside_compilation_attr_name) {
for (Node* n : g->op_nodes()) {
if (!n->IsConstant() ||
!HasNodeAttr(n->def(), outside_compilation_attr_name)) {
continue;
}
std::vector<const Edge*> out_edges(n->out_edges().begin(),
n->out_edges().end());
bool has_non_oc_output = false;
for (const Edge* e : out_edges) {
if (!e->IsControlEdge() &&
!HasNodeAttr(e->dst()->def(), outside_compilation_attr_name)) {
has_non_oc_output = true;
break;
}
}
if (!has_non_oc_output) {
continue;
}
NodeDef copy_def = n->def();
copy_def.set_name(g->NewName(n->name()));
copy_def.mutable_attr()->erase(outside_compilation_attr_name);
TF_ASSIGN_OR_RETURN(Node * copy_node, g->AddNode(copy_def));
for (const Edge* e : n->in_edges()) {
if (e->IsControlEdge()) {
g->AddControlEdge(e->src(), copy_node);
}
}
for (const Edge* e : out_edges) {
if (!e->IsControlEdge() &&
!HasNodeAttr(e->dst()->def(), outside_compilation_attr_name)) {
Node* dst = e->dst();
int dst_input = e->dst_input();
g->RemoveEdge(e);
g->AddEdge(copy_node, 0, dst, dst_input);
}
}
}
return absl::OkStatus();
}
}
Status RewriteOutsideCompilationSubgraphFn::operator()(
const std::vector<OutputTensor>& arg_source_tensors,
std::unique_ptr<Graph>* graph, std::vector<int>* input_permutation,
std::vector<int>* output_permutation, NodeDef* node_def) {
string old_name = node_def->op();
string new_name =
absl::StrCat(xla_cluster_name_, "_", new_function_name_, "_", old_name);
node_def->set_op(new_name);
node_def->set_name(new_name);
FixupSourceAndSinkEdges(graph->get());
TF_ASSIGN_OR_RETURN(
Node * key_placeholder,
AddHostComputeKeyPlaceholder(xla_cluster_name_, graph->get()));
std::vector<DataType> recv_at_host_dtypes;
TF_ASSIGN_OR_RETURN(
Node * recv_at_host_node,
ReplaceArgNodesWithRecvAtHostNode(graph->get(), new_name,
&recv_at_host_dtypes, key_placeholder));
std::vector<DataType> send_from_host_dtypes;
TF_ASSIGN_OR_RETURN(
Node * send_from_host_node,
ReplaceRetNodesWithSendFromHostNode(
graph->get(), new_name, &send_from_host_dtypes, key_placeholder));
for (Node* n : (*graph)->nodes()) {
if (IsKeyPlaceholderNode(*n)) {
continue;
}
n->AddAttr(xla_cluster_attr_name_, xla_cluster_name_);
n->AddAttr(outside_compilation_attr_name_, old_name);
}
std::optional<std::vector<PartialTensorShape>> shapes =
GetInferredInputShapes(send_from_host_dtypes.size(), send_from_host_node);
for (Node* n : (*graph)->nodes()) {
n->ClearAttr(kXlaInferredShapesAttrName);
}
for (Node* n : (*graph)->nodes()) {
if (HasNodeAttr(n->def(), kXlaConnectedToXlaComputationAttrName)) {
(*graph)->AddControlEdge(n, send_from_host_node);
n->ClearAttr(kXlaConnectedToXlaComputationAttrName);
}
if (HasNodeAttr(n->def(), kXlaConnectedFromXlaComputationAttrName)) {
(*graph)->AddControlEdge(recv_at_host_node, n);
n->ClearAttr(kXlaConnectedFromXlaComputationAttrName);
}
}
if (send_from_host_node->in_edges().size() > 1) {
(*graph)->AddControlEdge(send_from_host_node, (*graph)->sink_node());
}
PruneForReverseReachability(
graph->get(), std::unordered_set<const Node*>{(*graph)->sink_node()});
AddNodeAttr("_outside_compilation_subgraph", old_name, node_def);
if (shapes) {
NameAttrList shape_inference_graph;
AddNodeAttr("shape_inference_graph", shape_inference_graph, node_def);
AddNodeAttr("shapes", *shapes, node_def);
} else {
string shape_inference_func_name =
absl::StrCat("_outside_compilation_shape_inference_", new_name);
NameAttrList shape_inference_graph;
shape_inference_graph.set_name(shape_inference_func_name);
AddNodeAttr("shape_inference_graph", shape_inference_graph, node_def);
AddNodeAttr("shapes", std::vector<TensorShapeProto>{}, node_def);
}
AddNodeAttr("ancestors", std::vector<string>{}, node_def);
AddNodeAttr("Tinputs", recv_at_host_dtypes, node_def);
AddNodeAttr("Toutputs", send_from_host_dtypes, node_def);
AddNodeAttr("key", absl::StrCat("host_compute_channel_", new_name), node_def);
return absl::OkStatus();
}
Status ExtractOutsideCompilationForFunction(
const string& xla_cluster_attr_name,
const string& outside_compilation_attr_name, const string& xla_cluster_name,
const NameAttrList& func_name_attrs, const string& new_func_name,
const string& host_graph_func_name,
const std::map<string, int>& host_compute_core, FunctionLibraryRuntime* flr,
FunctionLibraryDefinition* fld, std::vector<string>* shape_inference_graphs,
bool* has_outside_compilation) {
const string& func_name = func_name_attrs.name();
FunctionLibraryRuntime::Handle handle;
TF_RETURN_IF_ERROR(
flr->Instantiate(func_name, AttrSlice(&func_name_attrs.attr()), &handle));
Status ret_status = absl::OkStatus();
auto cleanup_handle = gtl::MakeCleanup([&]() {
auto s = flr->ReleaseHandle(handle);
if (!s.ok()) {
ret_status.Update(s);
}
});
const FunctionBody* fbody = flr->GetFunctionBody(handle);
*has_outside_compilation = false;
for (Node* n : fbody->graph->nodes()) {
if (HasNodeAttr(n->def(), outside_compilation_attr_name)) {
*has_outside_compilation = true;
break;
}
}
if (VLOG_IS_ON(4)) {
DumpGraphToFile(
absl::StrCat("extract_outside_compilation_for_func_before_", func_name),
*fbody->graph, fld);
}
std::unique_ptr<Graph> graph_out;
std::vector<string> outside_compilation_host_graphs;
std::vector<string> shape_inference_graphs_to_rewrite;
if (*has_outside_compilation) {
TF_RETURN_IF_ERROR(CopyOutsideCompilationConstNodes(
fbody->graph, outside_compilation_attr_name));
TF_ASSIGN_OR_RETURN(auto cluster_deps,
OutsideCompilationClusterDependencies(
fbody->graph, outside_compilation_attr_name));
TF_RETURN_IF_ERROR(PreprocessEdgesBetweenOutsideCompilations(
fbody->graph, outside_compilation_attr_name));
auto rewrite_fn = std::make_unique<RewriteOutsideCompilationSubgraphFn>(
xla_cluster_attr_name, outside_compilation_attr_name, xla_cluster_name,
new_func_name);
TF_RETURN_IF_ERROR(EncapsulateSubgraphsInFunctions(
outside_compilation_attr_name, *fbody->graph, *rewrite_fn,
true, &graph_out, fld));
std::vector<Node*> outside_compilation_nodes;
for (Node* n : graph_out->nodes()) {
if (HasNodeAttr(n->def(), "_outside_compilation_subgraph")) {
outside_compilation_nodes.push_back(n);
outside_compilation_host_graphs.push_back(n->name());
auto shape_inference_graph = std::make_unique<NameAttrList>();
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "shape_inference_graph",
shape_inference_graph.get()));
if (!shape_inference_graph->name().empty()) {
shape_inference_graphs->push_back(shape_inference_graph->name());
shape_inference_graphs_to_rewrite.push_back(
shape_inference_graph->name());
const FunctionDef* xla_fdef = fld->Find(n->name());
if (!xla_fdef) {
return errors::Internal("Cannot find XLA function ", n->name());
}
auto shape_inference_fdef = std::make_unique<FunctionDef>(*xla_fdef);
shape_inference_fdef->mutable_signature()->set_name(
shape_inference_graph->name());
if (fld->Find(shape_inference_graph->name())) {
TF_RETURN_IF_ERROR(fld->ReplaceFunction(
shape_inference_graph->name(), *shape_inference_fdef));
} else {
TF_RETURN_IF_ERROR(fld->AddFunctionDef(*shape_inference_fdef));
}
}
}
}
std::map<string, Node*> host_compute_nodes;
for (Node* n : outside_compilation_nodes) {
auto host_compute_node_or = ReplaceOutsideCompilationCallNode(
graph_out.get(), n, host_compute_core, *cluster_deps);
TF_RETURN_IF_ERROR(host_compute_node_or.status());
Node* host_compute_node = host_compute_node_or.value();
host_compute_nodes[host_compute_node->name()] = host_compute_node;
}
for (const auto& iter : host_compute_nodes) {
Node* host_compute_node = iter.second;
std::vector<string> token_input_node_names;
TF_RETURN_IF_ERROR(GetNodeAttr(host_compute_node->def(),
kXlaTokenInputNodesAttrName,
&token_input_node_names));
for (const string& node_name : token_input_node_names) {
if (node_name == kXlaTokenArgNodeName) {
continue;
}
auto iter = host_compute_nodes.find(node_name);
TF_RET_CHECK(iter != host_compute_nodes.end());
graph_out->AddControlEdge(iter->second, host_compute_node);
}
}
}
Graph* g = (*has_outside_compilation) ? graph_out.get() : fbody->graph;
TF_RETURN_IF_ERROR(ExtractOutsideCompilationForNodesWithAssociatedFunctions(
g, xla_cluster_attr_name, outside_compilation_attr_name, xla_cluster_name,
host_compute_core, flr, fld, &outside_compilation_host_graphs,
shape_inference_graphs, has_outside_compilation));
if (*has_outside_compilation) {
std::unique_ptr<Graph> host_graph;
TF_RETURN_IF_ERROR(
ConstructHostGraph(xla_cluster_name, outside_compilation_attr_name,
outside_compilation_host_graphs, fld, &host_graph));
auto host_graph_fdef = std::make_unique<FunctionDef>();
TF_RETURN_IF_ERROR(GraphToFunctionDef(*host_graph, host_graph_func_name,
HostGraphControlRetMapping,
host_graph_fdef.get()));
if (fld->Find(host_graph_func_name)) {
TF_RETURN_IF_ERROR(
fld->ReplaceFunction(host_graph_func_name, *host_graph_fdef));
} else {
TF_RETURN_IF_ERROR(fld->AddFunctionDef(*host_graph_fdef));
}
for (const string& shape_inference_graph :
shape_inference_graphs_to_rewrite) {
TF_RETURN_IF_ERROR(
RewriteShapeInferenceGraph(shape_inference_graph, host_graph.get(),
nullptr, fld));
}
for (const string& func : outside_compilation_host_graphs) {
TF_RETURN_IF_ERROR(fld->RemoveFunction(func));
}
auto updated_fdef = std::make_unique<FunctionDef>();
TF_RETURN_IF_ERROR(
GraphToFunctionDef(*g, new_func_name, updated_fdef.get()));
updated_fdef->mutable_signature()->set_is_stateful(true);
const FunctionDef* original_fdef = fld->Find(func_name);
if (original_fdef) {
for (const auto& attr : original_fdef->attr()) {
(*updated_fdef->mutable_attr())[attr.first] = attr.second;
}
}
if (fld->Find(new_func_name)) {
TF_RETURN_IF_ERROR(fld->ReplaceFunction(new_func_name, *updated_fdef));
} else {
TF_RETURN_IF_ERROR(fld->AddFunctionDef(*updated_fdef));
}
if (VLOG_IS_ON(4)) {
DumpGraphToFile(
absl::StrCat("extract_outside_compilation_for_func_after_",
func_name),
*g, fld);
}
}
return ret_status;
}
Status ExtractOutsideCompilation(
const string& xla_cluster_attr_name,
const string& outside_compilation_attr_name,
const std::unordered_map<string, XlaClusterInfo>& clusters, Graph* g,
FunctionLibraryRuntime* flr, FunctionLibraryDefinition* fld,
bool* modified) {
if (VLOG_IS_ON(4)) {
DumpGraphToFile("extract_outside_compilation_before", *g, fld);
}
*modified = false;
auto node_name_index = g->BuildNodeNameIndex();
for (auto& iter : clusters) {
string xla_cluster_name = iter.first;
Node* n = iter.second.node;
auto const& func_name_attrs = iter.second.func_name_attrs;
auto const& host_compute_core = iter.second.host_compute_core;
std::vector<string> shape_inference_graphs;
bool has_outside_compilation;
string host_graph_func_name =
absl::StrCat("oc_host_graph_", xla_cluster_name);
TF_RETURN_IF_ERROR(ExtractOutsideCompilationForFunction(
xla_cluster_attr_name, outside_compilation_attr_name, xla_cluster_name,
func_name_attrs, func_name_attrs.name(), host_graph_func_name,
host_compute_core, flr, fld, &shape_inference_graphs,
&has_outside_compilation));
*modified |= has_outside_compilation;
if (has_outside_compilation) {
string pivot_name = absl::StrCat(xla_cluster_name, "/pivot");
Node* pivot_node = node_name_index[pivot_name];
TF_RETURN_IF_ERROR(ExpandHostGraphIntoMainGraph(
g, fld, host_graph_func_name, n, pivot_node));
TF_RETURN_IF_ERROR(fld->RemoveFunction(host_graph_func_name));
for (const auto& shape_inference_graph_name : shape_inference_graphs) {
TF_RETURN_IF_ERROR(RewriteShapeInferenceGraph(
shape_inference_graph_name, g, pivot_node, fld));
}
}
}
if (VLOG_IS_ON(4)) {
DumpGraphToFile("extract_outside_compilation_after", *g, fld);
}
return absl::OkStatus();
}
} | #include "tensorflow/compiler/jit/extract_outside_compilation_pass.h"
#include "absl/strings/match.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/jit/encapsulate_util.h"
#include "xla/test.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
TEST(RewriteOutsideCompilationSubgraphFnTest, Basic) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output arg0 = ops::_Arg(s.WithOpName("arg0"), DT_INT32, 0);
Output arg1 = ops::_Arg(s.WithOpName("arg1"), DT_FLOAT, 1);
Output arg2 = ops::_Arg(s.WithOpName("arg2"), DT_INT32, 2);
Output add = ops::Add(s.WithOpName("add"), arg0, arg0);
auto ret0 = ops::_Retval(s.WithOpName("ret0"), add, 0);
auto ret1 = ops::_Retval(s.WithOpName("ret1"), arg1, 1);
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
auto node_name_image = g->BuildNodeNameIndex();
Node *add_node = node_name_image["add"];
EXPECT_NE(add_node, nullptr);
add_node->AddAttr(kXlaConnectedToXlaComputationAttrName, "cluster");
add_node->AddAttr(kXlaConnectedFromXlaComputationAttrName, "cluster");
RewriteOutsideCompilationSubgraphFn rewrite_fn("_xla", "_oc", "cluster", "");
std::vector<OutputTensor> arg_source_tensors;
NodeDef call_node_def;
call_node_def.set_op("0");
TF_CHECK_OK(
rewrite_fn(arg_source_tensors, &g, nullptr, nullptr, &call_node_def));
node_name_image = g->BuildNodeNameIndex();
Node *key_placeholder = node_name_image["cluster_key_placeholder"];
EXPECT_NE(key_placeholder, nullptr);
for (Node *n : g->nodes()) {
EXPECT_NE(n->type_string(), "_Arg");
}
Node *recv_at_host = node_name_image["outside_compilation_cluster__0_recv"];
EXPECT_NE(recv_at_host, nullptr);
std::vector<DataType> recv_at_host_dtypes;
TF_CHECK_OK(
GetNodeAttr(recv_at_host->attrs(), "Toutputs", &recv_at_host_dtypes));
EXPECT_EQ(recv_at_host_dtypes.size(), 3);
EXPECT_EQ(recv_at_host_dtypes[0], DT_INT32);
EXPECT_EQ(recv_at_host_dtypes[1], DT_FLOAT);
EXPECT_EQ(recv_at_host_dtypes[2], DT_INT32);
for (Node *n : g->nodes()) {
EXPECT_NE(n->type_string(), "_Retval");
}
Node *send_from_host = node_name_image["outside_compilation_cluster__0_send"];
EXPECT_NE(send_from_host, nullptr);
std::vector<DataType> send_from_host_dtypes;
TF_CHECK_OK(
GetNodeAttr(send_from_host->attrs(), "Tinputs", &send_from_host_dtypes));
EXPECT_EQ(send_from_host_dtypes.size(), 2);
EXPECT_EQ(send_from_host_dtypes[0], DT_INT32);
EXPECT_EQ(send_from_host_dtypes[1], DT_FLOAT);
add_node = node_name_image["add"];
EXPECT_NE(add_node, nullptr);
EXPECT_TRUE(HasNodeAttr(add_node->def(), "_xla"));
EXPECT_TRUE(HasNodeAttr(add_node->def(), "_oc"));
bool has_control_edge_from_recv_at_host = false;
for (auto e : add_node->in_edges()) {
if (e->IsControlEdge() && e->src() == recv_at_host) {
has_control_edge_from_recv_at_host = true;
}
}
EXPECT_TRUE(has_control_edge_from_recv_at_host);
bool has_control_edge_to_send_from_host = false;
for (auto e : add_node->out_edges()) {
if (e->IsControlEdge() && e->dst() == send_from_host) {
has_control_edge_to_send_from_host = true;
}
}
EXPECT_TRUE(has_control_edge_to_send_from_host);
NameAttrList shape_inference_graph;
TF_CHECK_OK(GetNodeAttr(AttrSlice(&call_node_def.attr()),
"shape_inference_graph", &shape_inference_graph));
EXPECT_EQ(shape_inference_graph.name(),
"_outside_compilation_shape_inference_cluster__0");
}
TEST(RewriteOutsideCompilationSubgraphFnTest, NoSendFromHost) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output arg0 = ops::_Arg(s.WithOpName("arg0"), DT_INT32, 0);
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
RewriteOutsideCompilationSubgraphFn rewrite_fn("_xla", "_oc", "cluster", "");
std::vector<OutputTensor> arg_source_tensors;
NodeDef call_node_def;
call_node_def.set_op("0");
TF_CHECK_OK(
rewrite_fn(arg_source_tensors, &g, nullptr, nullptr, &call_node_def));
auto node_name_image = g->BuildNodeNameIndex();
Node *key_placeholder = node_name_image["cluster_key_placeholder"];
EXPECT_NE(key_placeholder, nullptr);
Node *recv_at_host = node_name_image["outside_compilation_cluster__0_recv"];
EXPECT_NE(recv_at_host, nullptr);
Node *send_from_host = node_name_image["outside_compilation_cluster__0_send"];
EXPECT_EQ(send_from_host, nullptr);
}
TEST(RewriteOutsideCompilationSubgraphFnTest, NoRecvAtHost) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output const0 = ops::Const(s.WithOpName("const0"), 1, {2});
auto ret = ops::_Retval(s.WithOpName("ret"), const0, 0);
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
RewriteOutsideCompilationSubgraphFn rewrite_fn("_xla", "_oc", "cluster", "");
std::vector<OutputTensor> arg_source_tensors;
NodeDef call_node_def;
call_node_def.set_op("0");
TF_CHECK_OK(
rewrite_fn(arg_source_tensors, &g, nullptr, nullptr, &call_node_def));
auto node_name_image = g->BuildNodeNameIndex();
Node *key_placeholder = node_name_image["cluster_key_placeholder"];
EXPECT_NE(key_placeholder, nullptr);
Node *recv_at_host = node_name_image["outside_compilation_cluster__0_recv"];
EXPECT_EQ(recv_at_host, nullptr);
Node *send_from_host = node_name_image["outside_compilation_cluster__0_send"];
EXPECT_NE(send_from_host, nullptr);
}
TEST(RewriteOutsideCompilationSubgraphFnTest, NoKeyPlaceholder) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output const0 = ops::Const(s.WithOpName("const0"), 1, {2});
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
RewriteOutsideCompilationSubgraphFn rewrite_fn("_xla", "_oc", "cluster", "");
std::vector<OutputTensor> arg_source_tensors;
NodeDef call_node_def;
call_node_def.set_op("0");
TF_CHECK_OK(
rewrite_fn(arg_source_tensors, &g, nullptr, nullptr, &call_node_def));
auto node_name_image = g->BuildNodeNameIndex();
Node *key_placeholder = node_name_image["cluster_key_placeholder"];
EXPECT_EQ(key_placeholder, nullptr);
Node *recv_at_host = node_name_image["outside_compilation_cluster__0_recv"];
EXPECT_EQ(recv_at_host, nullptr);
Node *send_from_host = node_name_image["outside_compilation_cluster__0_send"];
EXPECT_EQ(send_from_host, nullptr);
}
TEST(RewriteOutsideCompilationSubgraphFnTest, ShapesInferred) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output const0 = ops::Const(s.WithOpName("const0"), 1, {2});
auto ret = ops::_Retval(s.WithOpName("ret"), const0, 0);
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
auto node_name_image = g->BuildNodeNameIndex();
Node *const0_node = node_name_image["const0"];
EXPECT_NE(const0_node, nullptr);
PartialTensorShape shape({2});
const0_node->AddAttr(kXlaInferredShapesAttrName,
std::vector<PartialTensorShape>{shape});
RewriteOutsideCompilationSubgraphFn rewrite_fn("_xla", "_oc", "cluster", "");
std::vector<OutputTensor> arg_source_tensors;
NodeDef call_node_def;
call_node_def.set_op("0");
TF_CHECK_OK(
rewrite_fn(arg_source_tensors, &g, nullptr, nullptr, &call_node_def));
node_name_image = g->BuildNodeNameIndex();
std::vector<TensorShapeProto> shapes;
TF_CHECK_OK(GetNodeAttr(AttrSlice(&call_node_def.attr()), "shapes", &shapes));
EXPECT_EQ(shapes.size(), 1);
EXPECT_EQ(shapes[0].dim_size(), 1);
}
class ExtractOutsideCompilationForFunctionTest : public ::testing::Test {
public:
void SetUp() override {
SessionOptions session_options;
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::AddDevices(
session_options, "/job:localhost/replica:0/task:0", &devices));
device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices));
}
Status ExtractOutsideCompilationTest(
const string &xla_cluster_attr_name,
const string &outside_compilation_attr_name,
const string &xla_cluster_name, const NameAttrList &func_name_attrs,
const string &new_func_name, const string &host_graph_func_name,
const std::map<string, int> &host_compute_core,
FunctionLibraryDefinition *fld,
std::vector<string> *shape_inference_graphs,
bool *has_outside_compilation) {
OptimizerOptions opts;
pflr_ = std::make_unique<ProcessFunctionLibraryRuntime>(
device_mgr_.get(), Env::Default(), nullptr,
TF_GRAPH_DEF_VERSION, fld, opts,
nullptr);
auto flr = pflr_->GetFLR("/job:localhost/replica:0/task:0/cpu:0");
return ExtractOutsideCompilationForFunction(
xla_cluster_attr_name, outside_compilation_attr_name, xla_cluster_name,
func_name_attrs, new_func_name, host_graph_func_name, host_compute_core,
flr, fld, shape_inference_graphs, has_outside_compilation);
}
private:
std::unique_ptr<DeviceMgr> device_mgr_;
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr_;
};
TEST_F(ExtractOutsideCompilationForFunctionTest, Basic) {
FunctionDefLibrary fdl;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output const0 = ops::Const(s.WithOpName("const0"), 1, {2});
Output identity0 = ops::Identity(s.WithOpName("identity0"), const0);
Output identity1 = ops::Identity(s.WithOpName("identity1"), identity0);
Output identity2 = ops::Identity(s.WithOpName("identity2"), identity1);
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
auto node_name_image = g->BuildNodeNameIndex();
node_name_image["identity0"]->AddAttr("_oc", "0");
node_name_image["identity1"]->AddAttr("_oc", "1");
PartialTensorShape shape({2});
node_name_image["identity1"]->AddAttr(
kXlaInferredShapesAttrName, std::vector<PartialTensorShape>{shape});
FunctionDef *xla_fdef = fdl.add_function();
TF_CHECK_OK(GraphToFunctionDef(*g, "cluster", xla_fdef));
}
FunctionLibraryDefinition fld(OpRegistry::Global(), fdl);
protobuf::Map<string, tensorflow::AttrValue> attrs;
std::map<string, int> host_compute_core = {{"0", 1}, {"1", 0}};
std::vector<string> shape_inference_graphs;
bool has_outside_compilation;
NameAttrList name_attrs;
name_attrs.set_name("cluster");
*name_attrs.mutable_attr() = attrs;
TF_CHECK_OK(ExtractOutsideCompilationTest(
"_xla", "_oc", "cluster", name_attrs, "cluster_rewritten", "host_graph",
host_compute_core, &fld, &shape_inference_graphs,
&has_outside_compilation));
std::unique_ptr<FunctionBody> xla_fbody;
TF_CHECK_OK(FunctionDefToBodyHelper(*fld.Find("cluster_rewritten"),
AttrSlice(), &fld, &xla_fbody));
auto node_name_index = xla_fbody->graph->BuildNodeNameIndex();
Node *host_compute_0 = node_name_index["outside_compilation_0_host_compute"];
EXPECT_NE(host_compute_0, nullptr);
Node *host_compute_1 = node_name_index["outside_compilation_1_host_compute"];
EXPECT_NE(host_compute_1, nullptr);
int tpu_core;
TF_CHECK_OK(GetNodeAttr(host_compute_0->attrs(), "tpu_core", &tpu_core));
EXPECT_EQ(tpu_core, 1);
TF_CHECK_OK(GetNodeAttr(host_compute_1->attrs(), "tpu_core", &tpu_core));
EXPECT_EQ(tpu_core, 0);
std::vector<TensorShapeProto> shapes;
TF_CHECK_OK(GetNodeAttr(host_compute_0->attrs(), "shapes", &shapes));
EXPECT_EQ(shapes.size(), 0);
TF_CHECK_OK(GetNodeAttr(host_compute_1->attrs(), "shapes", &shapes));
EXPECT_EQ(shapes.size(), 1);
EXPECT_EQ(shapes[0].dim_size(), 1);
NameAttrList shape_inference_graph;
TF_CHECK_OK(GetNodeAttr(host_compute_0->attrs(), "shape_inference_graph",
&shape_inference_graph));
EXPECT_EQ(shape_inference_graph.name(), "");
TF_CHECK_OK(GetNodeAttr(host_compute_1->attrs(), "shape_inference_graph",
&shape_inference_graph));
EXPECT_EQ(shape_inference_graph.name(), "");
EXPECT_EQ(shape_inference_graphs.size(), 0);
std::unique_ptr<FunctionBody> host_fbody;
AttrValue device_ordinal_temp_value;
device_ordinal_temp_value.set_i(0);
protobuf::Map<string, AttrValue> host_func_attrs;
host_func_attrs["_device_ordinal"] = device_ordinal_temp_value;
TF_CHECK_OK(FunctionDefToBodyHelper(
*fld.Find("host_graph"), AttrSlice(&host_func_attrs), &fld, &host_fbody));
Graph *host_graph = host_fbody->graph;
Node *key_placeholder = nullptr, *sequencer = nullptr;
for (Node *n : host_graph->nodes()) {
if (n->type_string() == "Placeholder" &&
absl::EndsWith(n->name(), "_key_placeholder")) {
EXPECT_EQ(key_placeholder, nullptr);
key_placeholder = n;
} else if (HasNodeAttr(n->def(), "_xla_host_transfer_sequencer")) {
EXPECT_EQ(sequencer, nullptr);
sequencer = n;
}
}
EXPECT_NE(key_placeholder, nullptr);
EXPECT_NE(sequencer, nullptr);
int num_send_from_host = 0, num_recv_at_host = 0;
std::vector<Node *> send_recv_nodes;
for (Node *n : host_graph->nodes()) {
if (n->type_string() == "_XlaSendFromHost") {
num_send_from_host++;
send_recv_nodes.push_back(n);
} else if (n->type_string() == "_XlaRecvAtHost") {
num_recv_at_host++;
send_recv_nodes.push_back(n);
}
}
EXPECT_EQ(num_send_from_host, 1);
EXPECT_EQ(num_recv_at_host, 1);
for (Node *n : send_recv_nodes) {
Node *input_node;
TF_CHECK_OK(n->input_node(n->num_inputs() - 1, &input_node));
EXPECT_EQ(input_node, key_placeholder);
bool has_control_edge_to_sequencer = false;
for (const Edge *e : n->out_edges()) {
if (e->IsControlEdge() && e->dst() == sequencer) {
has_control_edge_to_sequencer = true;
break;
}
}
EXPECT_TRUE(has_control_edge_to_sequencer);
}
}
TEST_F(ExtractOutsideCompilationForFunctionTest, NoHostGraph) {
FunctionDefLibrary fdl;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output const0 = ops::Const(s.WithOpName("const0"), 1, {2});
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
FunctionDef *xla_fdef = fdl.add_function();
TF_CHECK_OK(GraphToFunctionDef(*g, "cluster", xla_fdef));
}
FunctionLibraryDefinition fld(OpRegistry::Global(), fdl);
protobuf::Map<string, tensorflow::AttrValue> attrs;
std::map<string, int> host_compute_core = {{"0", 1}, {"1", 0}};
std::vector<string> shape_inference_graphs;
bool has_outside_compilation;
NameAttrList name_attrs;
name_attrs.set_name("cluster");
*name_attrs.mutable_attr() = attrs;
TF_CHECK_OK(ExtractOutsideCompilationTest(
"_xla", "_oc", "cluster", name_attrs, "cluster_rewritten", "host_graph",
host_compute_core, &fld, &shape_inference_graphs,
&has_outside_compilation));
EXPECT_EQ(fld.Find("host_graph"), nullptr);
}
TEST_F(ExtractOutsideCompilationForFunctionTest, OutsideCompilationInIf) {
FunctionDefLibrary fdl;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output arg = ops::_Arg(s.WithOpName("arg"), DT_INT32, 0);
Output identity = ops::Identity(s.WithOpName("identity_true_fn"), arg);
ops::_Retval retval(s.WithOpName("retval"), identity, 0);
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
auto node_name_image = g->BuildNodeNameIndex();
node_name_image["identity_true_fn"]->AddAttr("_oc", "0");
PartialTensorShape shape({2});
node_name_image["identity_true_fn"]->AddAttr(
kXlaInferredShapesAttrName, std::vector<PartialTensorShape>{shape});
FunctionDef *true_fn_fdef = fdl.add_function();
TF_CHECK_OK(GraphToFunctionDef(*g, "true_fn", true_fn_fdef));
}
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output arg = ops::_Arg(s.WithOpName("arg"), DT_INT32, 0);
Output identity = ops::Identity(s.WithOpName("identity_false_fn"), arg);
ops::_Retval retval(s.WithOpName("retval"), identity, 0);
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
auto node_name_image = g->BuildNodeNameIndex();
node_name_image["identity_false_fn"]->AddAttr("_oc", "0");
PartialTensorShape shape({2});
node_name_image["identity_false_fn"]->AddAttr(
kXlaInferredShapesAttrName, std::vector<PartialTensorShape>{shape});
FunctionDef *false_fn_fdef = fdl.add_function();
TF_CHECK_OK(GraphToFunctionDef(*g, "false_fn", false_fn_fdef));
}
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output cond = ops::Const(s.WithOpName("const0"), true, {2});
Output input = ops::Const(s.WithOpName("const1"), 1, {2});
NameAttrList true_fn;
true_fn.set_name("true_fn");
NameAttrList false_fn;
false_fn.set_name("false_fn");
auto if_op = ops::If(s.WithOpName("if"), cond,
std::initializer_list<Input>{cond, input}, {DT_INT32},
true_fn, false_fn);
ops::_Retval retval(s.WithOpName("retval"), if_op.output[0], 0);
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
FunctionDef *xla_fdef = fdl.add_function();
TF_CHECK_OK(GraphToFunctionDef(*g, "cluster", xla_fdef));
}
FunctionLibraryDefinition fld(OpRegistry::Global(), fdl);
protobuf::Map<string, tensorflow::AttrValue> attrs;
std::map<string, int> host_compute_core;
std::vector<string> shape_inference_graphs;
bool has_outside_compilation;
NameAttrList name_attrs;
name_attrs.set_name("cluster");
*name_attrs.mutable_attr() = attrs;
TF_CHECK_OK(ExtractOutsideCompilationTest(
"_xla", "_oc", "cluster", name_attrs, "cluster_rewritten", "host_graph",
host_compute_core, &fld, &shape_inference_graphs,
&has_outside_compilation));
{
std::unique_ptr<FunctionBody> host_fbody;
AttrValue device_ordinal_temp_value;
device_ordinal_temp_value.set_i(0);
protobuf::Map<string, AttrValue> host_func_attrs;
host_func_attrs["_device_ordinal"] = device_ordinal_temp_value;
TF_CHECK_OK(FunctionDefToBodyHelper(*fld.Find("host_graph"),
AttrSlice(&host_func_attrs), &fld,
&host_fbody));
Graph *host_graph = host_fbody->graph;
auto node_name_index = host_graph->BuildNodeNameIndex();
Node *recv_if_pred_node = node_name_index["recv_oc_if_pred_if"];
EXPECT_NE(recv_if_pred_node, nullptr);
Node *if_oc_node = node_name_index["oc_if_if"];
EXPECT_NE(if_oc_node, nullptr);
Node *if_oc_node_cond_input;
TF_CHECK_OK(if_oc_node->input_node(0, &if_oc_node_cond_input));
EXPECT_EQ(if_oc_node_cond_input, recv_if_pred_node);
const FunctionDef *true_def = fld.Find("oc_then_branch_host_if_true_fn");
EXPECT_NE(true_def, nullptr);
bool has_identity_true_fn_node = false;
for (const auto &node_def : true_def->node_def()) {
if (node_def.name() == "identity_true_fn") {
has_identity_true_fn_node = true;
break;
}
}
EXPECT_TRUE(has_identity_true_fn_node);
const FunctionDef *false_def = fld.Find("oc_else_branch_host_if_false_fn");
EXPECT_NE(false_def, nullptr);
bool has_identity_false_fn_node = false;
for (const auto &node_def : false_def->node_def()) {
if (node_def.name() == "identity_false_fn") {
has_identity_false_fn_node = true;
break;
}
}
EXPECT_TRUE(has_identity_false_fn_node);
}
{
std::unique_ptr<FunctionBody> xla_fbody;
TF_CHECK_OK(FunctionDefToBodyHelper(*fld.Find("cluster_rewritten"),
AttrSlice(), &fld, &xla_fbody));
Graph *xla_graph = xla_fbody->graph;
auto node_name_index = xla_graph->BuildNodeNameIndex();
Node *send_if_pred_node = node_name_index["send_oc_if_pred_if"];
EXPECT_NE(send_if_pred_node, nullptr);
bool has_control_edge_to_if = false;
for (const Edge *e : send_if_pred_node->out_edges()) {
if (e->IsControlEdge() && e->dst()->name() == "if") {
has_control_edge_to_if = true;
break;
}
}
EXPECT_TRUE(has_control_edge_to_if);
Node *if_node = node_name_index["if"];
EXPECT_NE(if_node, nullptr);
std::vector<string> token_inputs;
TF_CHECK_OK(
GetNodeAttr(if_node->def(), "_xla_token_input_nodes", &token_inputs));
EXPECT_THAT(token_inputs, ::testing::ElementsAre("send_oc_if_pred_if"));
}
}
TEST_F(ExtractOutsideCompilationForFunctionTest, OutsideCompilationInWhile) {
FunctionDefLibrary fdl;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output arg = ops::_Arg(s.WithOpName("arg"), DT_BOOL, 0);
Output identity = ops::Identity(s.WithOpName("identity_cond_fn"), arg);
ops::_Retval retval(s.WithOpName("retval"), identity, 0);
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
auto node_name_image = g->BuildNodeNameIndex();
node_name_image["identity_cond_fn"]->AddAttr("_oc", "0");
PartialTensorShape shape({2});
node_name_image["identity_cond_fn"]->AddAttr(
kXlaInferredShapesAttrName, std::vector<PartialTensorShape>{shape});
FunctionDef *cond_fn_fdef = fdl.add_function();
TF_CHECK_OK(GraphToFunctionDef(*g, "cond_fn", cond_fn_fdef));
}
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output arg = ops::_Arg(s.WithOpName("arg"), DT_BOOL, 0);
Output identity = ops::Identity(s.WithOpName("identity_body_fn"), arg);
ops::_Retval retval(s.WithOpName("retval"), identity, 0);
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
auto node_name_image = g->BuildNodeNameIndex();
node_name_image["identity_body_fn"]->AddAttr("_oc", "0");
PartialTensorShape shape({2});
node_name_image["identity_body_fn"]->AddAttr(
kXlaInferredShapesAttrName, std::vector<PartialTensorShape>{shape});
FunctionDef *body_fn_fdef = fdl.add_function();
TF_CHECK_OK(GraphToFunctionDef(*g, "body_fn", body_fn_fdef));
}
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("const0"), true, {2});
NameAttrList cond_fn;
cond_fn.set_name("cond_fn");
NameAttrList body_fn;
body_fn.set_name("body_fn");
auto while_op =
ops::While(s.WithOpName("while"), std::initializer_list<Input>{input},
cond_fn, body_fn);
ops::_Retval retval(s.WithOpName("retval"), while_op.output[0], 0);
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
FunctionDef *xla_fdef = fdl.add_function();
TF_CHECK_OK(GraphToFunctionDef(*g, "cluster", xla_fdef));
}
FunctionLibraryDefinition fld(OpRegistry::Global(), fdl);
protobuf::Map<string, tensorflow::AttrValue> attrs;
std::map<string, int> host_compute_core;
std::vector<string> shape_inference_graphs;
bool has_outside_compilation;
NameAttrList name_attrs;
name_attrs.set_name("cluster");
*name_attrs.mutable_attr() = attrs;
TF_CHECK_OK(ExtractOutsideCompilationTest(
"_xla", "_oc", "cluster", name_attrs, "cluster_rewritten", "host_graph",
host_compute_core, &fld, &shape_inference_graphs,
&has_outside_compilation));
{
std::unique_ptr<FunctionBody> host_fbody;
AttrValue device_ordinal_temp_value;
device_ordinal_temp_value.set_i(0);
protobuf::Map<string, AttrValue> host_func_attrs;
host_func_attrs["_device_ordinal"] = device_ordinal_temp_value;
TF_CHECK_OK(FunctionDefToBodyHelper(*fld.Find("host_graph"),
AttrSlice(&host_func_attrs), &fld,
&host_fbody));
Graph *host_graph = host_fbody->graph;
auto node_name_index = host_graph->BuildNodeNameIndex();
Node *while_oc_node = node_name_index["oc_while_while"];
EXPECT_NE(while_oc_node, nullptr);
const FunctionDef *cond_def = fld.Find("oc_cond_host_while_cond_fn");
EXPECT_NE(cond_def, nullptr);
bool has_identity_cond_fn_node = false;
for (const auto &node_def : cond_def->node_def()) {
if (node_def.name() == "identity_cond_fn") {
has_identity_cond_fn_node = true;
break;
}
}
EXPECT_TRUE(has_identity_cond_fn_node);
const FunctionDef *body_def = fld.Find("oc_body_host_while_body_fn");
EXPECT_NE(body_def, nullptr);
bool has_identity_body_fn_node = false;
for (const auto &node_def : body_def->node_def()) {
if (node_def.name() == "identity_body_fn") {
has_identity_body_fn_node = true;
break;
}
}
EXPECT_TRUE(has_identity_body_fn_node);
}
{
const FunctionDef *cond_def = fld.Find("cond_fn_oc");
EXPECT_NE(cond_def, nullptr);
bool has_send_oc_while_cond_node = false;
for (const auto &node_def : cond_def->node_def()) {
if (node_def.name() == "send_oc_while_cond_while") {
has_send_oc_while_cond_node = true;
break;
}
}
EXPECT_TRUE(has_send_oc_while_cond_node);
}
}
TEST_F(ExtractOutsideCompilationForFunctionTest, OutsideCompilationInFunction) {
FunctionDefLibrary fdl;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output arg = ops::_Arg(s.WithOpName("arg"), DT_INT32, 0);
Output identity = ops::Identity(s.WithOpName("identity"), arg);
ops::_Retval retval(s.WithOpName("retval"), identity, 0);
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
auto node_name_image = g->BuildNodeNameIndex();
node_name_image["identity"]->AddAttr("_oc", "0");
PartialTensorShape shape({2});
node_name_image["identity"]->AddAttr(
kXlaInferredShapesAttrName, std::vector<PartialTensorShape>{shape});
FunctionDef *true_fn_fdef = fdl.add_function();
TF_CHECK_OK(GraphToFunctionDef(*g, "fn", true_fn_fdef));
}
FunctionLibraryDefinition fld(OpRegistry::Global(), fdl);
{
std::unique_ptr<Graph> g(new Graph(&fld));
tensorflow::TensorProto tensor_proto;
tensor_proto.set_dtype(tensorflow::DT_INT32);
tensorflow::TensorShapeProto shape;
shape.add_dim()->set_size(2);
*tensor_proto.mutable_tensor_shape() = shape;
for (int i = 0; i < 2; ++i) {
tensor_proto.add_int_val(1);
}
NodeDef const_def;
TF_CHECK_OK(NodeDefBuilder("const", "Const")
.Attr("dtype", DT_INT32)
.Attr("value", tensor_proto)
.Finalize(&const_def));
Status s;
Node *const_node = g->AddNode(const_def, &s);
TF_CHECK_OK(s);
NodeDef fn_def;
TF_CHECK_OK(NodeDefBuilder("fn", "fn", &fld)
.Input("const", 0, DT_INT32)
.Finalize(&fn_def));
Node *fn_node = g->AddNode(fn_def, &s);
TF_CHECK_OK(s);
g->AddEdge(const_node, 0, fn_node, 0);
NodeDef ret_def;
TF_CHECK_OK(NodeDefBuilder("ret", "_Retval")
.Attr("index", 0)
.Attr("T", DT_INT32)
.Input("fn", 0, DT_INT32)
.Finalize(&ret_def));
Node *ret_node = g->AddNode(ret_def, &s);
TF_CHECK_OK(s);
g->AddEdge(fn_node, 0, ret_node, 0);
FunctionDef *xla_fdef = fdl.add_function();
TF_CHECK_OK(GraphToFunctionDef(*g, "cluster", xla_fdef));
TF_CHECK_OK(fld.AddFunctionDef(*xla_fdef));
}
protobuf::Map<string, tensorflow::AttrValue> attrs;
std::map<string, int> host_compute_core;
std::vector<string> shape_inference_graphs;
bool has_outside_compilation;
NameAttrList name_attrs;
name_attrs.set_name("cluster");
*name_attrs.mutable_attr() = attrs;
TF_CHECK_OK(ExtractOutsideCompilationTest(
"_xla", "_oc", "cluster", name_attrs, "cluster_rewritten", "host_graph",
host_compute_core, &fld, &shape_inference_graphs,
&has_outside_compilation));
{
std::unique_ptr<FunctionBody> host_fbody;
AttrValue device_ordinal_temp_value;
device_ordinal_temp_value.set_i(0);
protobuf::Map<string, AttrValue> host_func_attrs;
host_func_attrs["_device_ordinal"] = device_ordinal_temp_value;
TF_CHECK_OK(FunctionDefToBodyHelper(*fld.Find("host_graph"),
AttrSlice(&host_func_attrs), &fld,
&host_fbody));
Graph *host_graph = host_fbody->graph;
auto node_name_index = host_graph->BuildNodeNameIndex();
Node *call_node = node_name_index["oc_call_fn"];
EXPECT_NE(call_node, nullptr);
std::unique_ptr<FunctionBody> call_fbody;
TF_CHECK_OK(FunctionDefToBodyHelper(*fld.Find("oc_func_call_host_fn"),
AttrSlice(&host_func_attrs), &fld,
&call_fbody));
bool has_recv = false, has_send = false;
for (Node *n : call_fbody->graph->nodes()) {
if (n->type_string() == "_XlaRecvAtHost") {
has_recv = true;
} else if (n->type_string() == "_XlaSendFromHost") {
has_send = true;
}
}
EXPECT_TRUE(has_recv);
EXPECT_TRUE(has_send);
}
{
std::unique_ptr<FunctionBody> xla_fbody;
TF_CHECK_OK(FunctionDefToBodyHelper(*fld.Find("cluster_rewritten"),
AttrSlice(), &fld, &xla_fbody));
Graph *xla_graph = xla_fbody->graph;
auto node_name_index = xla_graph->BuildNodeNameIndex();
Node *fn_node = node_name_index["fn"];
EXPECT_NE(fn_node, nullptr);
EXPECT_EQ(fn_node->type_string(), "fn_oc");
std::unique_ptr<FunctionBody> call_fbody;
TF_CHECK_OK(FunctionDefToBodyHelper(*fld.Find("fn_oc"), AttrSlice(), &fld,
&call_fbody));
bool has_hc = false;
for (Node *n : call_fbody->graph->nodes()) {
if (n->type_string() == "XlaHostCompute") {
has_hc = true;
}
}
EXPECT_TRUE(has_hc);
}
}
TEST_F(ExtractOutsideCompilationForFunctionTest,
OutsideCompilationClusterDataDependency) {
FunctionDefLibrary fdl;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output const0 = ops::Const(s.WithOpName("const0"), 1, {2});
Output identity0 = ops::Identity(s.WithOpName("identity0"), const0);
Output identity1 = ops::Identity(s.WithOpName("identity1"), identity0);
Output identity2 = ops::Identity(s.WithOpName("identity2"), identity1);
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
std::cout << "Graph is " << (*g).ToGraphDefDebug().DebugString()
<< std::endl;
auto node_name_image = g->BuildNodeNameIndex();
node_name_image["identity0"]->AddAttr("_oc", "0");
node_name_image["identity1"]->AddAttr("_oc", "1");
PartialTensorShape shape({2});
node_name_image["identity1"]->AddAttr(
kXlaInferredShapesAttrName, std::vector<PartialTensorShape>{shape});
FunctionDef *xla_fdef = fdl.add_function();
TF_CHECK_OK(GraphToFunctionDef(*g, "cluster", xla_fdef));
}
FunctionLibraryDefinition fld(OpRegistry::Global(), fdl);
protobuf::Map<string, tensorflow::AttrValue> attrs;
std::map<string, int> host_compute_core = {{"0", 1}, {"1", 0}};
std::vector<string> shape_inference_graphs;
bool has_outside_compilation;
NameAttrList name_attrs;
name_attrs.set_name("cluster");
*name_attrs.mutable_attr() = attrs;
TF_CHECK_OK(ExtractOutsideCompilationTest(
"_xla", "_oc", "cluster", name_attrs, "cluster_rewritten", "host_graph",
host_compute_core, &fld, &shape_inference_graphs,
&has_outside_compilation));
std::unique_ptr<FunctionBody> xla_fbody;
TF_CHECK_OK(FunctionDefToBodyHelper(*fld.Find("cluster_rewritten"),
AttrSlice(), &fld, &xla_fbody));
auto node_name_index = xla_fbody->graph->BuildNodeNameIndex();
Node *host_compute_0 = node_name_index["outside_compilation_0_host_compute"];
EXPECT_NE(host_compute_0, nullptr);
Node *host_compute_1 = node_name_index["outside_compilation_1_host_compute"];
EXPECT_NE(host_compute_1, nullptr);
std::vector<string> token_input_nodes;
TF_CHECK_OK(GetNodeAttr(AttrSlice(host_compute_0->attrs()),
"_xla_token_input_nodes", &token_input_nodes));
std::vector<string> expected_token_input_nodes_0({"_xla_token_arg_node"});
EXPECT_EQ(token_input_nodes, expected_token_input_nodes_0);
token_input_nodes.clear();
std::vector<string> expected_token_input_nodes_1(
{"_xla_token_arg_node", "outside_compilation_0_host_compute"});
TF_CHECK_OK(GetNodeAttr(AttrSlice(host_compute_1->attrs()),
"_xla_token_input_nodes", &token_input_nodes));
EXPECT_EQ(token_input_nodes, expected_token_input_nodes_1);
bool has_control_edge = false;
for (const Edge *e : host_compute_1->in_edges()) {
if (e->IsControlEdge() && e->src() == host_compute_0) {
has_control_edge = true;
break;
}
}
EXPECT_TRUE(has_control_edge);
}
TEST_F(ExtractOutsideCompilationForFunctionTest,
OutsideCompilationClusterControlDependency) {
FunctionDefLibrary fdl;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output const0 = ops::Const(s.WithOpName("const0"), 1, {2});
Output identity0 = ops::Identity(s.WithOpName("identity0"), const0);
Output identity1 = ops::Identity(
s.WithOpName("identity1").WithControlDependencies(identity0), const0);
Output identity2 = ops::Identity(s.WithOpName("identity2"), identity1);
std::unique_ptr<Graph> g(new Graph(OpRegistry::Global()));
TF_CHECK_OK(s.ToGraph(g.get()));
std::cout << "Graph is " << (*g).ToGraphDefDebug().DebugString()
<< std::endl;
auto node_name_image = g->BuildNodeNameIndex();
node_name_image["identity0"]->AddAttr("_oc", "0");
node_name_image["identity1"]->AddAttr("_oc", "1");
PartialTensorShape shape({2});
node_name_image["identity1"]->AddAttr(
kXlaInferredShapesAttrName, std::vector<PartialTensorShape>{shape});
FunctionDef *xla_fdef = fdl.add_function();
TF_CHECK_OK(GraphToFunctionDef(*g, "cluster", xla_fdef));
}
FunctionLibraryDefinition fld(OpRegistry::Global(), fdl);
protobuf::Map<string, tensorflow::AttrValue> attrs;
std::map<string, int> host_compute_core = {{"0", 1}, {"1", 0}};
std::vector<string> shape_inference_graphs;
bool has_outside_compilation;
NameAttrList name_attrs;
name_attrs.set_name("cluster");
*name_attrs.mutable_attr() = attrs;
TF_CHECK_OK(ExtractOutsideCompilationTest(
"_xla", "_oc", "cluster", name_attrs, "cluster_rewritten", "host_graph",
host_compute_core, &fld, &shape_inference_graphs,
&has_outside_compilation));
std::unique_ptr<FunctionBody> xla_fbody;
TF_CHECK_OK(FunctionDefToBodyHelper(*fld.Find("cluster_rewritten"),
AttrSlice(), &fld, &xla_fbody));
auto node_name_index = xla_fbody->graph->BuildNodeNameIndex();
Node *host_compute_0 = node_name_index["outside_compilation_0_host_compute"];
EXPECT_NE(host_compute_0, nullptr);
Node *host_compute_1 = node_name_index["outside_compilation_1_host_compute"];
EXPECT_NE(host_compute_1, nullptr);
std::vector<string> token_input_nodes;
TF_CHECK_OK(GetNodeAttr(AttrSlice(host_compute_0->attrs()),
"_xla_token_input_nodes", &token_input_nodes));
std::vector<string> expected_token_input_nodes_0({"_xla_token_arg_node"});
EXPECT_EQ(token_input_nodes, expected_token_input_nodes_0);
token_input_nodes.clear();
std::vector<string> expected_token_input_nodes_1(
{"_xla_token_arg_node", "outside_compilation_0_host_compute"});
TF_CHECK_OK(GetNodeAttr(AttrSlice(host_compute_1->attrs()),
"_xla_token_input_nodes", &token_input_nodes));
EXPECT_EQ(token_input_nodes, expected_token_input_nodes_1);
bool has_control_edge = false;
for (const Edge *e : host_compute_1->in_edges()) {
if (e->IsControlEdge() && e->src() == host_compute_0) {
has_control_edge = true;
break;
}
}
EXPECT_TRUE(has_control_edge);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/extract_outside_compilation_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/extract_outside_compilation_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2cef0ad9-29f3-4fb7-a084-6fed5363bd83 | cpp | tensorflow/tensorflow | pjrt_compile_util | tensorflow/compiler/jit/pjrt_compile_util.cc | tensorflow/compiler/jit/pjrt_compile_util_test.cc | #include "tensorflow/compiler/jit/pjrt_compile_util.h"
#include <vector>
#include "tensorflow/compiler/jit/device_compilation_profiler.h"
#include "tensorflow/compiler/jit/device_compiler.h"
#include "tensorflow/compiler/jit/xla_compile_util.h"
#include "tensorflow/compiler/jit/xla_compiler_options_util.h"
#include "tensorflow/compiler/jit/xla_platform_info.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "xla/pjrt/pjrt_client.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/lib/core/refcount.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
using PjRtDeviceCompiler =
DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>;
Status CompileToPjRtLoadedExecutable(
const DeviceBase* device, const XlaPlatformInfo& platform_info,
const NameAttrList& function,
const std::vector<XlaCompiler::Argument>& args,
DeviceCompileMode compile_mode, bool has_ref_vars,
bool may_alias_resource_update, FunctionLibraryRuntime* flr,
ResourceMgr* rm, const XlaCompiler::CompilationResult** compilation_result,
xla::PjRtClient** client, xla::PjRtLoadedExecutable** executable) {
PjRtDeviceCompiler* pjrt_device_compiler;
DeviceCompilationProfiler* profiler;
TF_RETURN_IF_ERROR(GetOrCreatePjRtDeviceCompilerAndProfiler(
platform_info, rm, flr, &pjrt_device_compiler, &profiler));
core::ScopedUnref pjrt_device_compiler_ref(pjrt_device_compiler);
core::ScopedUnref profiler_ref(profiler);
*client = pjrt_device_compiler->client();
XlaCompiler::Options options = GenerateCompilerOptionsForPjRt(
*flr, device, platform_info, pjrt_device_compiler);
XlaCompiler::CompileOptions compile_options =
GenerateCompileOptions(has_ref_vars, may_alias_resource_update);
return pjrt_device_compiler->CompileIfNeeded(
options, function, args, compile_options, compile_mode, profiler,
compilation_result, executable);
}
Status CompileToPjRtLoadedExecutable(
const OpKernelContext& ctx, const XlaPlatformInfo& platform_info,
const NameAttrList& function,
const std::vector<XlaCompiler::Argument>& args,
DeviceCompileMode compile_mode, bool has_ref_vars,
bool may_alias_resource_update,
const XlaCompiler::CompilationResult** compilation_result,
xla::PjRtClient** client, xla::PjRtLoadedExecutable** executable) {
TF_ASSIGN_OR_RETURN(ResourceMgr * rm, GetResourceMgrForDeviceCompiler(
ctx, platform_info.device_type()));
return CompileToPjRtLoadedExecutable(
ctx.device(), platform_info, function, args, compile_mode, has_ref_vars,
may_alias_resource_update, ctx.function_library(), rm, compilation_result,
client, executable);
}
} | #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include "tensorflow/compiler/jit/pjrt_compile_util.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/compiler/jit/test_util.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
namespace tensorflow {
namespace {
StatusOr<std::unique_ptr<Graph>> SampleGraphAddXY() {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0);
auto b = ops::_Arg(scope.WithOpName("B"), DT_INT32, 1);
auto c = ops::Add(scope.WithOpName("C"), a, b);
auto d = ops::_Retval(scope.WithOpName("D"), c, 0);
TF_RETURN_IF_ERROR(scope.ToGraph(graph.get()));
return graph;
}
StatusOr<FunctionDef> SampleFuntionAddXY(const std::string& name) {
TF_ASSIGN_OR_RETURN(auto graph, SampleGraphAddXY());
FunctionDef fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*graph, name, &fdef));
return fdef;
}
std::vector<XlaCompiler::Argument> SampleArgsForAddXY() {
std::vector<XlaCompiler::Argument> args(2);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].type = DT_INT32;
args[0].shape = TensorShape({2});
args[1].kind = XlaCompiler::Argument::kParameter;
args[1].type = DT_INT32;
args[1].shape = TensorShape({2});
return args;
}
TEST(PjrtCompileUtilTest, CompileToPjRtLoadedExecutable) {
DeviceSetup device_setup;
TF_ASSERT_OK_AND_ASSIGN(auto fdef, SampleFuntionAddXY("foo"));
device_setup.AddDevicesAndSetUp({DEVICE_GPU}, fdef);
Device* device = device_setup.GetDevice(DEVICE_GPU);
const XlaPlatformInfo platform_info = XlaPlatformInfoFromDevice(device);
NameAttrList function;
function.set_name("foo");
ResourceMgr resource_mgr("");
const XlaCompiler::CompilationResult* compilation_result = nullptr;
xla::PjRtLoadedExecutable* pjrt_executable = nullptr;
xla::PjRtClient* pjrt_client = nullptr;
TF_EXPECT_OK(CompileToPjRtLoadedExecutable(
device, platform_info, function, SampleArgsForAddXY(),
DeviceCompileMode::kStrict, true,
true, device_setup.flr(), &resource_mgr,
&compilation_result, &pjrt_client, &pjrt_executable));
EXPECT_TRUE(compilation_result != nullptr);
EXPECT_TRUE(pjrt_executable != nullptr);
EXPECT_TRUE(pjrt_client != nullptr);
}
TEST(PjrtCompileUtilTest, CompileToPjRtLoadedExecutableWithOpKernelContext) {
DeviceSetup device_setup;
TF_ASSERT_OK_AND_ASSIGN(auto fdef, SampleFuntionAddXY("foo"));
device_setup.AddDevicesAndSetUp({DEVICE_GPU}, fdef);
Device* device = device_setup.GetDevice(DEVICE_GPU);
const XlaPlatformInfo platform_info = XlaPlatformInfoFromDevice(device);
NameAttrList function;
function.set_name("foo");
ResourceMgr resource_mgr("");
OpKernelContext::Params params;
params.resource_manager = &resource_mgr;
params.device = device;
params.function_library = device_setup.flr();
OpKernelContext ctx(¶ms, 1);
const XlaCompiler::CompilationResult* compilation_result = nullptr;
xla::PjRtLoadedExecutable* pjrt_executable = nullptr;
xla::PjRtClient* pjrt_client = nullptr;
TF_EXPECT_OK(CompileToPjRtLoadedExecutable(
ctx, platform_info, function, SampleArgsForAddXY(),
DeviceCompileMode::kStrict, true,
true, &compilation_result, &pjrt_client,
&pjrt_executable));
EXPECT_TRUE(compilation_result != nullptr);
EXPECT_TRUE(pjrt_executable != nullptr);
EXPECT_TRUE(pjrt_client != nullptr);
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/pjrt_compile_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/pjrt_compile_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
04076f27-7215-4ac2-a0ad-d0a45b77807a | cpp | tensorflow/tensorflow | build_xla_ops_pass | tensorflow/compiler/jit/build_xla_ops_pass.cc | tensorflow/compiler/jit/build_xla_ops_pass_test.cc | #include "tensorflow/compiler/jit/build_xla_ops_pass.h"
#include "absl/algorithm/container.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope_internal.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/control_flow_ops.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/logging_ops.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/jit/device_util.h"
#include "tensorflow/compiler/jit/encapsulate_subgraphs_pass.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "tensorflow/compiler/tf2xla/cc/ops/xla_jit_ops.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/status_macros.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/memory_types.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace {
struct DebuggingOpts {
bool print_outputs;
bool check_input_numerics;
bool check_output_numerics;
};
void MoveOutgoingEdges(Graph* g, Node* old_node, Node* new_node) {
std::vector<const Edge*> out_edges(old_node->out_edges().begin(),
old_node->out_edges().end());
for (const Edge* edge : out_edges) {
g->AddEdge(new_node, edge->src_output(), edge->dst(), edge->dst_input());
g->RemoveEdge(edge);
}
}
Output ControlToData(const Scope& scope, Node* control) {
Output data = ops::Const(scope.WithOpName("ctrl_as_data"),
Tensor(DT_INT32, TensorShape({0})));
scope.graph()->AddControlEdge(control, data.node());
return Output(data.node());
}
Operation DataToControl(const Scope& scope, Output data) {
return Operation(
ops::Identity(scope.WithOpName("data_as_ctrl"), data).node());
}
void MergeOutgoingDataEdges(const Scope& s, Node* old_node, Node* new_node,
absl::string_view cluster_name,
const DebuggingOpts& debugging_opts) {
if (!s.status().ok()) {
return;
}
std::vector<Output> merged_outputs(old_node->num_outputs(), Output(nullptr));
std::vector<const Edge*> data_edges;
absl::c_copy_if(old_node->out_edges(), std::back_inserter(data_edges),
[](const Edge* e) { return !e->IsControlEdge(); });
for (const Edge* e : data_edges) {
int oidx = e->src_output();
Output merged_output = merged_outputs[oidx];
if (merged_output.node() == nullptr) {
Output new_output(new_node, oidx);
if (debugging_opts.print_outputs) {
string cpu_device = "/job:localhost/replica:0/task:0/device:CPU:0";
ops::Print print_op(s.WithOpName("print_", oidx)
.WithDevice(cpu_device)
.WithAssignedDevice(cpu_device),
new_output, {new_output},
ops::Print::Attrs{}
.Message(absl::StrCat("output ", oidx, " from ",
old_node->name(), " is "))
.FirstN(1000)
.Summarize(-1));
new_output = print_op;
}
if (debugging_opts.check_output_numerics &&
DataTypeIsFloating(new_output.type())) {
ops::CheckNumerics check_numerics_op(
s.WithOpName("check_output_", oidx)
.WithDevice(new_node->requested_device())
.WithAssignedDevice(new_node->assigned_device_name()),
new_output,
absl::StrCat("CheckNumerics failed for output ", oidx, "(",
new_output.name(), ") from cluster ", cluster_name));
new_output = check_numerics_op;
}
ops::_XlaMerge xla_merge_op(s.WithOpName("merge_oidx_", oidx),
Output(old_node, oidx), new_output);
merged_output = merged_outputs[oidx] = xla_merge_op.output;
}
Node* dst = e->dst();
int dst_idx = e->dst_input();
s.graph()->RemoveEdge(e);
s.graph()->AddEdge(merged_output.node(), merged_output.index(), dst,
dst_idx);
}
}
void MergeOutgoingControlEdges(const Scope& s, Node* old_node, Node* new_node) {
if (!s.status().ok()) {
return;
}
std::vector<const Edge*> ctrl_edges;
absl::c_copy_if(old_node->out_edges(), std::back_inserter(ctrl_edges),
[](const Edge* e) { return e->IsControlEdge(); });
if (ctrl_edges.empty()) {
return;
}
if (ctrl_edges.size() == 1 && ctrl_edges.front()->dst()->IsSink()) {
s.graph()->AddControlEdge(new_node, s.graph()->sink_node());
return;
}
Output old_ctrl_as_data = ControlToData(s, old_node);
Output new_ctrl_as_data = ControlToData(s, new_node);
ops::Merge ctrl_merge_as_data(s.WithOpName("ctrl_merge"),
{old_ctrl_as_data, new_ctrl_as_data});
Operation ctrl_merge = DataToControl(s, ctrl_merge_as_data.output);
for (const Edge* e : ctrl_edges) {
s.graph()->AddControlEdge(ctrl_merge.node(), e->dst());
s.graph()->RemoveControlEdge(e);
}
}
struct XlaClusterInfo {
std::vector<Output> constant_inputs;
std::vector<Output> non_constant_inputs;
std::vector<Output> resource_inputs;
NameAttrList function;
};
Output IncomingEdgeAsOutput(const Edge* e) {
return Output(e->src(), e->src_output());
}
Status GetXlaClusterInfo(Node* n, XlaClusterInfo* result) {
int num_constant_inputs, num_resource_inputs;
TF_RETURN_IF_ERROR(
GetNodeAttr(n->attrs(), kXlaNumConstantArgsAttr, &num_constant_inputs));
TF_RETURN_IF_ERROR(
GetNodeAttr(n->attrs(), kXlaNumResourceArgsAttr, &num_resource_inputs));
if (num_constant_inputs < 0 || num_resource_inputs < 0 ||
num_constant_inputs + num_resource_inputs > n->num_inputs()) {
return errors::InvalidArgument(
"Invalid number of constant/resource arguments to XLA kernel.");
}
int num_non_constant_inputs =
n->num_inputs() - num_constant_inputs - num_resource_inputs;
std::vector<const Edge*> input_edges_vector;
TF_RETURN_IF_ERROR(n->input_edges(&input_edges_vector));
absl::Span<const Edge*> input_edges(input_edges_vector);
absl::c_transform(input_edges.subspan(0, num_constant_inputs),
std::back_inserter(result->constant_inputs),
IncomingEdgeAsOutput);
absl::c_transform(
input_edges.subspan(num_constant_inputs, num_non_constant_inputs),
std::back_inserter(result->non_constant_inputs), IncomingEdgeAsOutput);
absl::c_transform(
input_edges.subspan(num_constant_inputs + num_non_constant_inputs,
num_resource_inputs),
std::back_inserter(result->resource_inputs), IncomingEdgeAsOutput);
result->function.set_name(n->type_string());
*result->function.mutable_attr() = n->def().attr();
return absl::OkStatus();
}
Status CopyIncomingControlEdges(Graph* g, Node* from, Node* to) {
for (const Edge* e : from->in_edges()) {
if (e->IsControlEdge()) {
g->AddControlEdge(e->src(), to);
}
}
return absl::OkStatus();
}
void RemoveAllIncomingControlEdges(Graph* g, Node* n) {
std::vector<const Edge*> incoming_ctrl_edges;
absl::c_copy_if(n->in_edges(), std::back_inserter(incoming_ctrl_edges),
[](const Edge* e) { return e->IsControlEdge(); });
for (const Edge* e : incoming_ctrl_edges) {
g->RemoveControlEdge(e);
}
}
Status DeviceRequiresCompilation(const jit::DeviceInfoCache& device_info_cache,
jit::DeviceId device, bool* result) {
const XlaOpRegistry::DeviceRegistration* registration =
device_info_cache.GetCompilationDevice(device);
*result = registration->autoclustering_policy ==
XlaOpRegistry::AutoclusteringPolicy::kAlways;
return absl::OkStatus();
}
absl::StatusOr<Node*> ReplaceFunctionCallWithPartitionedCall(
const GraphOptimizationPassOptions& options,
const FunctionLibraryDefinition& flib_def, Node* n, Graph* g,
const NameAttrList& func, const Scope& root) {
string config_string = options.session_options->config.SerializeAsString();
int input_count = absl::c_count_if(
n->in_edges(), [](const Edge* e) { return !e->IsControlEdge(); });
std::vector<Output> args(input_count);
for (const Edge* e : n->in_edges()) {
if (!e->IsControlEdge()) {
args[e->dst_input()] = Output(e->src(), e->src_output());
}
}
ops::StatefulPartitionedCall call(
root.WithOpName("stateful_partitioned_call"), args, n->output_types(),
func, ops::StatefulPartitionedCall::Attrs{}.ConfigProto(config_string));
for (const Edge* e : n->in_edges()) {
if (e->IsControlEdge()) {
g->AddControlEdge(e->src(), call.operation.node());
}
}
std::vector<const Edge*> edges_to_delete;
for (const Edge* e : n->out_edges()) {
edges_to_delete.push_back(e);
if (e->IsControlEdge()) {
g->AddControlEdge(call.operation.node(), e->dst());
} else {
g->AddEdge(call.operation.node(), e->src_output(), e->dst(),
e->dst_input());
}
}
for (const Edge* e : edges_to_delete) {
g->RemoveEdge(e);
}
g->RemoveNode(n);
return call.operation.node();
}
absl::StatusOr<jit::DeviceId> InferDeviceForCluster(
jit::DeviceInfoCache* device_info_cache, Node* n,
const string& function_name, const FunctionLibraryDefinition& flib_def) {
const FunctionDef* func_def = flib_def.Find(function_name);
TF_RET_CHECK(func_def) << "Could not find " << function_name;
jit::DeviceSet device_set;
for (const NodeDef& ndef : func_def->node_def()) {
VLOG(3) << ndef.DebugString();
if (!ndef.device().empty()) {
TF_ASSIGN_OR_RETURN(jit::DeviceId device_id,
device_info_cache->GetIdFor(ndef.device()));
device_set.Insert(device_id);
}
}
if (!n->assigned_device_name().empty()) {
TF_ASSIGN_OR_RETURN(jit::DeviceId device_id,
device_info_cache->GetIdFor(n->assigned_device_name()));
device_set.Insert(device_id);
}
TF_ASSIGN_OR_RETURN(jit::DeviceId result,
PickDeviceForXla(*device_info_cache, device_set,
true));
VLOG(2) << "For " << function_name << " PickDeviceForXla("
<< device_info_cache->DebugString(device_set) << ") -> "
<< device_info_cache->GetNameFor(result);
return result;
}
std::vector<Output> GetXlaRunArgs(const Scope& s,
const XlaClusterInfo& cluster_info,
const DebuggingOpts& debugging_opts) {
std::vector<Output> xla_run_args;
xla_run_args.reserve(cluster_info.non_constant_inputs.size() +
cluster_info.resource_inputs.size());
int input_idx = 0;
for (const Output& o : cluster_info.non_constant_inputs) {
if (debugging_opts.check_input_numerics && DataTypeIsFloating(o.type())) {
ops::CheckNumerics check_numerics_op(
s.WithOpName("check_input_", input_idx), o,
absl::StrCat("CheckNumerics failed for input ", input_idx, "(",
o.name(), ") into ", cluster_info.function.name()));
xla_run_args.push_back(check_numerics_op);
} else {
xla_run_args.push_back(o);
}
input_idx++;
}
absl::c_copy(cluster_info.resource_inputs, std::back_inserter(xla_run_args));
return xla_run_args;
}
absl::StatusOr<MemoryTypeVector> GetOutputMemoryTypes(const Scope& root,
Node* n) {
MemoryTypeVector input_mtypes, output_mtypes;
DeviceType device_type("");
TF_RETURN_IF_ERROR(
DeviceNameToDeviceType(n->assigned_device_name(), &device_type));
TF_RETURN_IF_ERROR(MemoryTypesForNode(root.graph()->op_registry(),
device_type, n->def(), &input_mtypes,
&output_mtypes));
return output_mtypes;
}
Status PredicateInt32Inputs(const Scope& root, Node* n,
Operation predicate_as_control) {
std::vector<Output> int32_inputs;
std::vector<int> int32_inputs_input_idxs;
for (const Edge* e : n->in_edges()) {
if (e->IsControlEdge()) {
continue;
}
if (e->src()->output_type(e->src_output()) == DT_INT32) {
TF_ASSIGN_OR_RETURN(MemoryTypeVector source_output_mem_types,
GetOutputMemoryTypes(root, e->src()));
if (source_output_mem_types[e->src_output()] == DEVICE_MEMORY) {
int32_inputs.push_back(Output(e->src(), e->src_output()));
int32_inputs_input_idxs.push_back(e->dst_input());
}
}
}
if (int32_inputs.empty()) {
return absl::OkStatus();
}
ops::IdentityN identity_n(root.WithOpName("int32_id_n"), int32_inputs);
root.graph()->AddControlEdge(predicate_as_control.node(),
identity_n.operation.node());
for (int i = 0, end = int32_inputs.size(); i < end; i++) {
TF_RETURN_IF_ERROR(root.graph()->UpdateEdge(identity_n[i].node(), i, n,
int32_inputs_input_idxs[i]));
}
return absl::OkStatus();
}
Status ReplaceNodeWithXlaCompileAndXlaRun(
jit::DeviceInfoCache* device_info_cache,
const GraphOptimizationPassOptions& options,
const FunctionLibraryDefinition& flib_def, bool lazy_compilation_enabled,
const DebuggingOpts& debugging_opts, Graph* g, Node* n) {
XlaClusterInfo cluster_info;
TF_RETURN_IF_ERROR(GetXlaClusterInfo(n, &cluster_info));
TF_ASSIGN_OR_RETURN(
jit::DeviceId device,
InferDeviceForCluster(device_info_cache, n, cluster_info.function.name(),
flib_def));
bool requires_compilation;
TF_RETURN_IF_ERROR(DeviceRequiresCompilation(*device_info_cache, device,
&requires_compilation));
if (!lazy_compilation_enabled) {
requires_compilation = true;
}
string device_name_str = string(device_info_cache->GetNameFor(device));
Status status;
Scope root = NewInternalScope(g, &status, nullptr)
.NewSubScope(n->name())
.WithDevice(n->requested_device())
.WithAssignedDevice(device_name_str);
ops::_XlaCompile xla_compile(root.WithOpName("xla_compile"),
cluster_info.constant_inputs,
cluster_info.non_constant_inputs,
cluster_info.resource_inputs,
requires_compilation,
cluster_info.function);
bool has_ref_attr;
TF_RETURN_IF_ERROR(
GetNodeAttr(n->attrs(), kXlaHasReferenceVarsAttr, &has_ref_attr));
xla_compile.operation.node()->AddAttr(kXlaHasReferenceVarsAttr, has_ref_attr);
TF_RETURN_IF_ERROR(
CopyIncomingControlEdges(g, n, xla_compile.key.node()));
std::vector<Output> xla_run_args =
GetXlaRunArgs(root, cluster_info, debugging_opts);
if (requires_compilation) {
ops::_XlaRun xla_run(root.WithOpName("xla_run"), xla_run_args,
xla_compile.key, n->output_types());
MoveOutgoingEdges(g, n,
xla_run.operation.node());
g->RemoveNode(n);
} else {
ops::Switch s(root.WithOpName("predicated_compilation_key"),
xla_compile.key, xla_compile.compilation_successful);
Output predicated_compilation_key = s.output_true;
Output inverse_predicated_compilation_key = s.output_false;
ops::_XlaRun xla_run(root.WithOpName("xla_run"), xla_run_args,
predicated_compilation_key, n->output_types());
MergeOutgoingControlEdges(root, n,
xla_run.operation.node());
MergeOutgoingDataEdges(root, n,
xla_run.operation.node(),
cluster_info.function.name(), debugging_opts);
TF_RETURN_IF_ERROR(root.status());
RemoveAllIncomingControlEdges(g, n);
Operation inverse_predicate_as_control =
DataToControl(root, inverse_predicated_compilation_key);
g->AddControlEdge(inverse_predicate_as_control.node(), n);
n->ClearAttr(kXlaCompiledKernelAttr);
TF_ASSIGN_OR_RETURN(Node* const pco, ReplaceFunctionCallWithPartitionedCall(
options, flib_def, n, g,
cluster_info.function, root));
TF_RETURN_IF_ERROR(
PredicateInt32Inputs(root, pco, inverse_predicate_as_control));
}
return absl::OkStatus();
}
}
Status BuildXlaOpsPass::Run(const GraphOptimizationPassOptions& options) {
Graph* graph = options.graph->get();
std::vector<Node*> xla_compiled_kernels;
absl::c_copy_if(graph->op_nodes(), std::back_inserter(xla_compiled_kernels),
[](const Node* n) {
if (n->IsSend() || n->IsRecv() || n->IsControlFlow()) {
return false;
}
return IsXlaCompiledKernel(*n);
});
bool lazy_compilation_enabled =
enable_lazy_compilation_
? *enable_lazy_compilation_
: GetBuildXlaOpsPassFlags()->tf_xla_enable_lazy_compilation;
jit::DeviceInfoCache device_info_cache;
const BuildXlaOpsPassFlags& flags = *GetBuildXlaOpsPassFlags();
DebuggingOpts debugging_opts;
debugging_opts.print_outputs = flags.tf_xla_print_cluster_outputs;
debugging_opts.check_input_numerics =
flags.tf_xla_check_cluster_input_numerics;
debugging_opts.check_output_numerics =
flags.tf_xla_check_cluster_output_numerics;
VLOG(1) << "print_outputs = " << debugging_opts.print_outputs;
VLOG(1) << "check_input_numerics = " << debugging_opts.check_input_numerics;
VLOG(1) << "check_output_numerics = " << debugging_opts.check_output_numerics;
for (Node* n : xla_compiled_kernels) {
TF_RETURN_IF_ERROR(ReplaceNodeWithXlaCompileAndXlaRun(
&device_info_cache, options, *options.flib_def,
lazy_compilation_enabled, debugging_opts, graph, n));
}
if (VLOG_IS_ON(1)) {
DumpGraphToFile("build_xla_ops", *graph, options.flib_def);
}
return absl::OkStatus();
}
} | #include "tensorflow/compiler/jit/build_xla_ops_pass.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/jit/encapsulate_subgraphs_pass.h"
#include "tensorflow/compiler/jit/node_matchers.h"
#include "tensorflow/compiler/jit/test_util.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
class BuildXlaOpsTest : public ::testing::Test {
protected:
void SetUp() override {
CHECK(DeviceFactory::AddDevices(
SessionOptions(), "/job:localhost/replica:0/task:0", &devices_)
.ok());
}
private:
std::vector<std::unique_ptr<Device>> devices_;
};
using ::tensorflow::testing::FindNodeByName;
using ::tensorflow::testing::matchers::Attr;
using ::tensorflow::testing::matchers::CtrlDeps;
using ::tensorflow::testing::matchers::Inputs;
using ::tensorflow::testing::matchers::NodeWith;
using ::tensorflow::testing::matchers::Op;
using ::tensorflow::testing::matchers::Out;
using ::testing::_;
Status BuildXlaOps(const Scope& s, const FunctionDefLibrary& fdef_lib,
std::unique_ptr<Graph>* result) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
TF_RETURN_IF_ERROR(s.ToGraph(graph.get()));
FunctionLibraryDefinition flib_def(graph->op_registry(), fdef_lib);
static const char* kCpuDevice = "/job:localhost/replica:0/task:0/cpu:0";
for (Node* n : graph->nodes()) {
if (n->requested_device().empty()) {
n->set_assigned_device_name(kCpuDevice);
} else {
n->set_assigned_device_name(n->requested_device());
}
}
FixupSourceAndSinkEdges(graph.get());
GraphOptimizationPassWrapper wrapper;
GraphOptimizationPassOptions opt_options =
wrapper.CreateGraphOptimizationPassOptions(&graph);
opt_options.flib_def = &flib_def;
BuildXlaOpsPass pass(true);
TF_RETURN_IF_ERROR(pass.Run(opt_options));
VLOG(3) << graph->ToGraphDefDebug().DebugString();
*result = std::move(graph);
return absl::OkStatus();
}
Status MakeXlaCompiledKernel(Graph* graph, const string& callee_name,
const string& node_name, int num_constant_args,
int num_resource_args, Node** result) {
NodeDef call_node;
call_node.set_name(node_name);
call_node.set_op(callee_name);
AddNodeAttr(kXlaCompiledKernelAttr, true, &call_node);
AddNodeAttr(kXlaNumConstantArgsAttr, num_constant_args, &call_node);
AddNodeAttr(kXlaNumResourceArgsAttr, num_resource_args, &call_node);
TF_ASSIGN_OR_RETURN(*result, graph->AddNode(call_node));
return absl::OkStatus();
}
Status MakeXlaCompiledKernel(Graph* graph, const string& callee_name,
const string& node_name, Node** result) {
return MakeXlaCompiledKernel(graph, callee_name, node_name,
0, 0,
result);
}
Node* MakeWrite(const Scope& scope, Output value_to_write, const string& id) {
Output var_handle = ops::VarHandleOp(scope.WithOpName("Var_" + id), DT_FLOAT,
TensorShape({}));
ops::AssignVariableOp assign_op(scope.WithOpName("Assignee_" + id),
var_handle, value_to_write);
return assign_op.operation.node();
}
Node* MakeWrite(const Scope& scope, const string& id) {
return MakeWrite(
scope, ops::Const(scope.WithOpName("ValueToAssign" + id), 1.0f), id);
}
FunctionDefLibrary CreateFunctionDefLibWithConstFunction(const string& name) {
FunctionDefLibrary fdef_lib;
FunctionDef func = FunctionDefHelper::Create(
name, {}, {"out: float"},
{}, {FunctionDefHelper::Const("one", 1.0f)},
{{"out", "out:output:0"}});
*fdef_lib.add_function() = std::move(func);
return fdef_lib;
}
TEST_F(BuildXlaOpsTest, ControlDepsPreserved) {
const char* kXlaDeviceName = "/job:worker/replica:0/task:0/device:XLA_CPU:0";
Scope root = Scope::NewRootScope().WithDevice(kXlaDeviceName).ExitOnError();
FunctionDefLibrary fdef_lib =
CreateFunctionDefLibWithConstFunction("cluster_0");
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(fdef_lib));
Node* call;
TF_ASSERT_OK(MakeXlaCompiledKernel(root.graph(), "cluster_0", "C", &call));
call->AddAttr(kXlaHasReferenceVarsAttr, false);
call->set_requested_device(kXlaDeviceName);
Node* write_op = MakeWrite(root, "write");
write_op->AddAttr(kXlaHasReferenceVarsAttr, false);
root.graph()->AddControlEdge(call, write_op);
std::unique_ptr<Graph> graph;
TF_ASSERT_OK(BuildXlaOps(root, fdef_lib, &graph));
Node* write_op_new = FindNodeByName(graph.get(), write_op->name());
ASSERT_NE(write_op_new, nullptr);
EXPECT_THAT(write_op_new, NodeWith(CtrlDeps(NodeWith(Op("_XlaRun")))));
}
TEST_F(BuildXlaOpsTest, CleanFailureOnBogusAttr) {
Scope root = Scope::NewRootScope().ExitOnError();
FunctionDefLibrary fdef_lib =
CreateFunctionDefLibWithConstFunction("cluster_0");
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(fdef_lib));
Node* call;
TF_ASSERT_OK(
MakeXlaCompiledKernel(root.graph(), "cluster_0", "C", 100, 100, &call));
Node* write_op = MakeWrite(root, "write");
root.graph()->AddControlEdge(call, write_op);
std::unique_ptr<Graph> graph;
Status failure_status = BuildXlaOps(root, fdef_lib, &graph);
ASSERT_FALSE(failure_status.ok());
EXPECT_EQ(failure_status.code(), error::INVALID_ARGUMENT);
}
TEST_F(BuildXlaOpsTest, OnNonXlaDevice) {
Scope root = Scope::NewRootScope().ExitOnError();
FunctionDefLibrary fdef_lib =
CreateFunctionDefLibWithConstFunction("cluster_0");
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(fdef_lib));
Node* call;
TF_ASSERT_OK(MakeXlaCompiledKernel(root.graph(), "cluster_0", "C", &call));
TF_ASSERT_OK(root.DoShapeInference(call));
call->AddAttr(kXlaHasReferenceVarsAttr, false);
Node* write_op = MakeWrite(root, Output(call), "write_result");
write_op->AddAttr(kXlaHasReferenceVarsAttr, false);
auto xla_compile = NodeWith(Op("_XlaCompile"), Attr("must_compile", false));
auto predicated_compilation_key =
NodeWith(Op("Switch"), Inputs(Out(0, xla_compile), Out(1, xla_compile)));
auto xla_run =
NodeWith(Op("_XlaRun"), Inputs(Out(1, predicated_compilation_key)));
auto tf_call =
NodeWith(Op("StatefulPartitionedCall"),
CtrlDeps(NodeWith(Op("Identity"),
Inputs(Out(0, predicated_compilation_key)))));
auto merge = NodeWith(Op("_XlaMerge"), Inputs(Out(tf_call), Out(xla_run)));
auto assign_var = NodeWith(Op("AssignVariableOp"), Inputs(_, Out(merge)));
std::unique_ptr<Graph> graph;
TF_ASSERT_OK(BuildXlaOps(root, fdef_lib, &graph));
Node* write_op_new = FindNodeByName(graph.get(), write_op->name());
ASSERT_NE(write_op_new, nullptr);
EXPECT_THAT(write_op_new, assign_var);
}
TEST_F(BuildXlaOpsTest, OnXlaDevice) {
const char* kXlaDeviceName = "/job:worker/replica:0/task:0/device:XLA_CPU:0";
Scope root = Scope::NewRootScope().WithDevice(kXlaDeviceName).ExitOnError();
FunctionDefLibrary fdef_lib =
CreateFunctionDefLibWithConstFunction("cluster_0");
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(fdef_lib));
Node* call;
TF_ASSERT_OK(MakeXlaCompiledKernel(root.graph(), "cluster_0", "C", &call));
call->set_requested_device(kXlaDeviceName);
TF_ASSERT_OK(root.DoShapeInference(call));
call->AddAttr(kXlaHasReferenceVarsAttr, false);
Node* write_op = MakeWrite(root, Output(call), "write_result");
write_op->AddAttr(kXlaHasReferenceVarsAttr, false);
std::unique_ptr<Graph> graph;
TF_ASSERT_OK(BuildXlaOps(root, fdef_lib, &graph));
auto xla_op =
NodeWith(Op("_XlaRun"), Inputs(Out(NodeWith(Op("_XlaCompile")))));
auto assign_var =
NodeWith(Op("AssignVariableOp"), Inputs(Out(NodeWith()), Out(xla_op)));
Node* write_op_new = FindNodeByName(graph.get(), write_op->name());
ASSERT_NE(write_op_new, nullptr);
EXPECT_THAT(write_op_new, assign_var);
}
TEST_F(BuildXlaOpsTest, NoExtraMergeForEdgeToSink) {
Scope root = Scope::NewRootScope().ExitOnError();
FunctionDefLibrary fdef_lib =
CreateFunctionDefLibWithConstFunction("cluster_0");
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(fdef_lib));
Node* call;
TF_ASSERT_OK(MakeXlaCompiledKernel(root.graph(), "cluster_0", "C", &call));
call->AddAttr(kXlaHasReferenceVarsAttr, false);
std::unique_ptr<Graph> graph;
TF_ASSERT_OK(BuildXlaOps(root, fdef_lib, &graph));
Node* sink_node = graph->sink_node();
EXPECT_THAT(sink_node,
NodeWith(CtrlDeps(NodeWith(Op("_XlaRun")),
NodeWith(Op("StatefulPartitionedCall")),
NodeWith(Op("NoOp")))));
}
#ifdef GOOGLE_CUDA
FunctionDefLibrary CreateFunctionDefLibWithInt32Input(const string& name) {
FunctionDefLibrary fdef_lib;
FunctionDef func = FunctionDefHelper::Create(
name, {"in: int32"},
{"out: int32"},
{}, {{{"out"}, "Identity", {"in"}}},
{{"out", "out:output:0"}});
*fdef_lib.add_function() = std::move(func);
return fdef_lib;
}
TEST_F(BuildXlaOpsTest, NoDeviceToHostCopiesForClustersWithInt32Inputs) {
const char* kXlaDeviceName = "/job:worker/replica:0/task:0/device:GPU:0";
Scope root = Scope::NewRootScope()
.WithDevice(kXlaDeviceName)
.WithAssignedDevice(kXlaDeviceName)
.ExitOnError();
FunctionDefLibrary fdef_lib =
CreateFunctionDefLibWithInt32Input("cluster_int32");
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(fdef_lib));
Node* call;
TF_ASSERT_OK(
MakeXlaCompiledKernel(root.graph(), "cluster_int32", "C", &call));
call->set_requested_device(kXlaDeviceName);
call->AddAttr(kXlaHasReferenceVarsAttr, false);
auto var =
ops::VarHandleOp(root.WithOpName("var"), DT_INT32, TensorShape({}));
auto int32_on_device =
ops::ReadVariableOp(root.WithOpName("int32_on_device"), var, DT_INT32);
root.graph()->AddEdge(int32_on_device.node(), 0, call, 0);
std::unique_ptr<Graph> graph;
TF_ASSERT_OK(BuildXlaOps(root, fdef_lib, &graph));
Node* stateful_partitioned_call_op = nullptr;
for (Node* n : graph->op_nodes()) {
if (n->type_string() == "StatefulPartitionedCall") {
ASSERT_EQ(stateful_partitioned_call_op, nullptr);
stateful_partitioned_call_op = n;
}
}
ASSERT_NE(stateful_partitioned_call_op, nullptr);
auto xla_compile = NodeWith(Op("_XlaCompile"));
auto switch_on_compilation_pred =
NodeWith(Op("Switch"), Inputs(Out(0, xla_compile), Out(1, xla_compile)));
auto ctrl_dep =
NodeWith(Op("Identity"), Inputs(Out(0, switch_on_compilation_pred)));
EXPECT_THAT(
stateful_partitioned_call_op,
NodeWith(Inputs(Out(NodeWith(Op("IdentityN"), CtrlDeps(ctrl_dep))))));
}
#endif
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/build_xla_ops_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/build_xla_ops_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
abfebcd9-00e5-45bc-b724-26719c12e44e | cpp | tensorflow/tensorflow | device_compiler_client | tensorflow/compiler/jit/device_compiler_client.cc | tensorflow/compiler/jit/device_compiler_client_test.cc | #include "tensorflow/compiler/jit/device_compiler_client.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/core/util/determinism.h"
namespace tensorflow {
xla::ExecutableBuildOptions GetExecutableBuildOptions(
const XlaCompiler::Options& options,
const XlaCompiler::CompilationResult& result, int default_device_ordinal) {
xla::ExecutableBuildOptions build_options;
if (result.collective_info) {
build_options.set_num_replicas(result.collective_info->group_size);
}
if (options.device_ordinal != -1) {
build_options.set_device_ordinal(options.device_ordinal);
} else if (default_device_ordinal != -1) {
build_options.set_device_ordinal(default_device_ordinal);
}
build_options.set_result_layout(result.xla_output_shape);
build_options.set_device_allocator(options.device_allocator.get());
build_options.set_alias_passthrough_params(options.alias_passthrough_params);
build_options.mutable_debug_options()->set_xla_detailed_logging(
options.detailed_logging);
if (tensorflow::OpDeterminismRequired()) {
build_options.mutable_debug_options()->set_xla_gpu_deterministic_ops(true);
}
return build_options;
}
} | #include "tensorflow/compiler/jit/device_compiler_client.h"
#include <gtest/gtest.h>
namespace tensorflow {
namespace {
TEST(GetExecutableOptionTest, Basic) {
XlaCompiler::Options options;
options.device_ordinal = 0;
options.alias_passthrough_params = true;
options.detailed_logging = true;
XlaCompiler::CompilationResult result;
xla::Shape xla_output_shape;
result.xla_output_shape = xla_output_shape;
auto build_option =
GetExecutableBuildOptions(options, result, -1);
EXPECT_EQ(build_option.device_ordinal(), 0);
EXPECT_EQ(build_option.result_layout()->ToString(),
xla_output_shape.ToString());
EXPECT_EQ(build_option.alias_passthrough_params(), true);
EXPECT_EQ(build_option.debug_options().xla_detailed_logging(), true);
EXPECT_EQ(build_option.debug_options().xla_enable_dumping(), true);
}
TEST(GetExecutableOptionTest, DefaultDeviceOrdinal) {
XlaCompiler::Options options;
XlaCompiler::CompilationResult result;
auto build_option =
GetExecutableBuildOptions(options, result, 0);
EXPECT_EQ(build_option.device_ordinal(), 0);
}
TEST(GetExecutableOptionTest, DeviceOrdinalNotSet) {
XlaCompiler::Options options;
XlaCompiler::CompilationResult result;
auto build_option =
GetExecutableBuildOptions(options, result, -1);
EXPECT_EQ(build_option.device_ordinal(), -1);
}
TEST(GetExecutableOptionTest, DumpingWithoutDetailedLogging) {
XlaCompiler::Options options;
options.detailed_logging = false;
XlaCompiler::CompilationResult result;
auto build_option =
GetExecutableBuildOptions(options, result, -1);
EXPECT_FALSE(build_option.debug_options().xla_detailed_logging());
EXPECT_TRUE(build_option.debug_options().xla_enable_dumping());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_compiler_client.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_compiler_client_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0d3e2abc-aee0-4883-9b64-e40c9683c5e0 | cpp | tensorflow/tensorflow | compilability_check_util | tensorflow/compiler/jit/compilability_check_util.cc | tensorflow/compiler/jit/compilability_check_util_test.cc | #include "tensorflow/compiler/jit/compilability_check_util.h"
#include <algorithm>
#include <atomic>
#include <deque>
#include <iterator>
#include <limits>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/jit/device_util.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/jit/resource_operation_safety_analysis.h"
#include "tensorflow/compiler/jit/xla_activity.pb.h"
#include "tensorflow/compiler/jit/xla_activity_listener.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "tensorflow/compiler/tf2xla/const_analysis.h"
#include "tensorflow/compiler/tf2xla/resource_operation_table.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/service/graphcycles/graphcycles.h"
#include "xla/union_find.h"
#include "xla/util.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/memory_types.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/control_flow.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace {
bool HasResourceInput(const Node& node) {
return absl::c_count(node.input_types(), DT_RESOURCE) != 0;
}
void LogNotCompilable(const Node& node, absl::string_view reason = "") {
VLOG(3) << "Found uncompilable node " << node.name() << " (op "
<< node.type_string() << ")" << (reason.empty() ? "" : ": ")
<< reason;
}
bool IsInOutsideCompilationCluster(const Node& n) {
return n.attrs().Find(kXlaOutsideCompilationAttr) != nullptr;
}
Status MakeCallNodeFromAttribute(const Node& node, const std::string& attr_name,
NodeDef* node_def) {
const NameAttrList* name_attr;
TF_RETURN_IF_ERROR(GetNodeAttr(node.attrs(), attr_name, &name_attr));
node_def->set_op(name_attr->name());
*(node_def->mutable_attr()) = name_attr->attr();
return absl::OkStatus();
}
absl::StatusOr<std::vector<NodeDef>> MakeCallNodesFromAttribute(
const Node& node, absl::string_view attr_name,
absl::string_view call_name) {
std::vector<NameAttrList> attr_lists;
TF_RETURN_IF_ERROR(GetNodeAttr(node.attrs(), attr_name, &attr_lists));
std::vector<NodeDef> out;
out.reserve(attr_lists.size());
for (int i = 0; i < attr_lists.size(); i++) {
out.emplace_back();
NodeDef& inserted = out.back();
inserted.set_name(absl::StrCat(call_name, "_", i));
inserted.set_op(attr_lists[i].name());
*inserted.mutable_attr() = attr_lists[i].attr();
}
return out;
}
class SinglePassSearch {
public:
explicit SinglePassSearch(absl::Span<int const> values)
: current_index_(0), values_(values) {}
bool ScanForValue(int value) {
while (current_index_ < values_.size() &&
values_[current_index_] <= value) {
if (values_[current_index_] == value) {
current_index_++;
return true;
}
current_index_++;
}
return false;
}
private:
int current_index_;
const absl::Span<int const> values_;
};
}
RecursiveCompilabilityChecker::UncompilableNodesMap
RecursiveCompilabilityChecker::FindUncompilableNodes(
const Node& node, FunctionLibraryRuntime* lib_runtime,
const std::vector<RecursiveCompilabilityChecker::StackFrame>*
node_stack_trace) const {
std::vector<StackFrameView> stack_trace;
if (node_stack_trace != nullptr) {
for (const auto& frame : *node_stack_trace) {
stack_trace.emplace_back(
StackFrameView{frame.name, frame.function_name, frame.stack_trace});
}
}
stack_trace.emplace_back(
StackFrameView{node.name(), "", node.GetStackTrace()});
RecursiveCompilabilityChecker::UncompilableNodesMap uncompilable_nodes;
IsCompilableNode(node, lib_runtime, &stack_trace,
nullptr, &uncompilable_nodes);
return uncompilable_nodes;
}
bool RecursiveCompilabilityChecker::HasXLAKernel(
const Node& node, string* uncompilable_reason) const {
if (node.type_string() == "SymbolicGradient") {
*uncompilable_reason =
"SymbolicGradient should be handled by IsCompilableCall().";
return false;
}
if (node.type_string() == "Const") {
const AttrValue* attr = node.attrs().Find("dtype");
if (!op_filter_.allow_string_consts && attr != nullptr &&
attr->type() == DT_STRING) {
*uncompilable_reason =
"Const op with type DT_STRING is not supported by XLA.";
return false;
}
}
if (HasForwardedRefInput(node)) {
VLOG(2) << "Rejecting " << node.name() << ": Identity with unsafe cast.";
*uncompilable_reason = "Identity with unsafe cast.";
return false;
}
Status s = FindKernelDef(jit_device_type_, node.def(), nullptr, nullptr);
if (!s.ok()) {
*uncompilable_reason = s.message();
return false;
}
return true;
}
bool RecursiveCompilabilityChecker::IsCompilableIf(
const Node& if_node, FunctionLibraryRuntime* lib_runtime,
std::vector<StackFrameView>* stack_trace,
NameAttrList* encapsulating_function,
RecursiveCompilabilityChecker::UncompilableNodesMap* uncompilable_nodes)
const {
bool is_compilable = true;
is_compilable &= ExtractNodeDefAndCheckCompilability(
if_node, "then_branch", "if_then", encapsulating_function, lib_runtime,
stack_trace, uncompilable_nodes);
if (!uncompilable_nodes && !is_compilable) return is_compilable;
is_compilable &= ExtractNodeDefAndCheckCompilability(
if_node, "else_branch", "if_else", encapsulating_function, lib_runtime,
stack_trace, uncompilable_nodes);
return is_compilable;
}
bool RecursiveCompilabilityChecker::IsCompilableCase(
const Node& case_node, FunctionLibraryRuntime* lib_runtime,
std::vector<StackFrameView>* stack_trace,
NameAttrList* encapsulating_function,
RecursiveCompilabilityChecker::UncompilableNodesMap* uncompilable_nodes)
const {
absl::StatusOr<std::vector<NodeDef>> calls =
MakeCallNodesFromAttribute(case_node, "branches", "branch");
if (!calls.ok()) {
VLOG(2) << "Rejecting node " << case_node.name() << ": "
<< "missing attribute 'branches'";
return false;
}
bool is_compilable = true;
for (const NodeDef& call : *calls) {
is_compilable &=
IsCompilableCall(call, lib_runtime, stack_trace, encapsulating_function,
uncompilable_nodes);
}
return is_compilable;
}
bool RecursiveCompilabilityChecker::IsCompilableWhile(
const Node& while_node, FunctionLibraryRuntime* lib_runtime,
std::vector<StackFrameView>* stack_trace,
NameAttrList* encapsulating_function,
RecursiveCompilabilityChecker::UncompilableNodesMap* uncompilable_nodes)
const {
bool is_compilable = true;
is_compilable &= ExtractNodeDefAndCheckCompilability(
while_node, "cond", "while_cond", encapsulating_function, lib_runtime,
stack_trace, uncompilable_nodes);
if (!uncompilable_nodes && !is_compilable) return is_compilable;
is_compilable &= ExtractNodeDefAndCheckCompilability(
while_node, "body", "while_body", encapsulating_function, lib_runtime,
stack_trace, uncompilable_nodes);
return is_compilable;
}
bool RecursiveCompilabilityChecker::ExtractNodeDefAndCheckCompilability(
const Node& node, const std::string& attr_name,
const std::string& call_name, NameAttrList* encapsulating_function,
FunctionLibraryRuntime* lib_runtime,
std::vector<StackFrameView>* stack_trace,
RecursiveCompilabilityChecker::UncompilableNodesMap* uncompilable_nodes)
const {
NodeDef call;
call.set_name(call_name);
if (!MakeCallNodeFromAttribute(node, attr_name, &call).ok()) {
const auto uncompilable_reason = absl::StrCat(
"missing '", attr_name, "' attribute from node", node.name());
MaybeMarkUncompilableNode(uncompilable_reason, *stack_trace,
encapsulating_function, uncompilable_nodes);
VLOG(2) << "Rejecting node " << node.name() << ": " << uncompilable_reason
<< ".";
return false;
}
if (!IsCompilableCall(call, lib_runtime, stack_trace, encapsulating_function,
uncompilable_nodes)) {
VLOG(2) << "Rejecting node " << node.name()
<< ": can't compile : " << call.op();
return false;
}
return true;
}
bool RecursiveCompilabilityChecker::IsCompilableCall(
const NodeDef& call_def, FunctionLibraryRuntime* lib_runtime,
std::vector<StackFrameView>* stack_trace,
NameAttrList* encapsulating_function,
RecursiveCompilabilityChecker::UncompilableNodesMap* uncompilable_nodes)
const {
if (stack_trace->size() > kMaxRecursionDepth) {
std::string uncompilable_reason = "function depth limit exceeded";
MaybeMarkUncompilableNode(uncompilable_reason, *stack_trace,
encapsulating_function, uncompilable_nodes);
VLOG(2) << "Rejecting " << call_def.op() << ": " << uncompilable_reason
<< ".";
return false;
}
FunctionLibraryRuntime::Handle handle;
Status s;
NameAttrList function;
s = NameAndAttrsFromFunctionCall(call_def, &function);
if (s.ok()) {
s = lib_runtime->Instantiate(function.name(), AttrSlice(&function.attr()),
&handle);
}
if (!s.ok()) {
std::string uncompilable_reason =
absl::StrCat("could not instantiate call: '", function.name(), "'");
MaybeMarkUncompilableNode(uncompilable_reason, *stack_trace,
encapsulating_function, uncompilable_nodes);
VLOG(2) << "Rejecting " << call_def.DebugString() << ": "
<< uncompilable_reason << " : " << s;
return false;
}
auto release_handle_on_return = gtl::MakeCleanup(
[&] { TF_CHECK_OK(lib_runtime->ReleaseHandle(handle)); });
const FunctionBody* fbody = lib_runtime->GetFunctionBody(handle);
bool is_compilable = true;
for (const Node* node : fbody->graph->op_nodes()) {
stack_trace->emplace_back(
StackFrameView{node->name(), function.name(), node->GetStackTrace()});
is_compilable &= IsCompilableNode(*node, lib_runtime, stack_trace,
&function, uncompilable_nodes);
stack_trace->pop_back();
if (!uncompilable_nodes && !is_compilable) return is_compilable;
}
return is_compilable;
}
bool RecursiveCompilabilityChecker::OpIsInaccurate(const Node& node) const {
return node.type_string() == "SelfAdjointEigV2" ||
node.type_string() == "Svd";
}
bool RecursiveCompilabilityChecker::OpIsSlow(const Node& node) const {
return node.type_string() == "SelfAdjointEigV2" ||
node.type_string() == "Svd" || node.type_string() == "Qr" ||
node.type_string() == "MatrixInverse" ||
node.type_string() == "MatrixSolve" ||
node.type_string() == "ResizeBilinearGrad";
}
bool RecursiveCompilabilityChecker::IsCompilableNode(
const Node& node, FunctionLibraryRuntime* lib_runtime,
std::vector<StackFrameView>* stack_trace,
NameAttrList* encapsulating_function,
RecursiveCompilabilityChecker::UncompilableNodesMap* uncompilable_nodes)
const {
auto stack_depth = stack_trace->size();
if (op_filter_.allow_outside_compiled && IsInOutsideCompilationCluster(node))
return true;
if (node.IsSource() || node.IsSink()) {
absl::string_view uncompilable_reason = "source or sink node";
MaybeMarkUncompilableNode(uncompilable_reason, *stack_trace,
encapsulating_function, uncompilable_nodes);
LogNotCompilable(node, uncompilable_reason);
return false;
}
if (stack_depth == 1 &&
(node.type_string() == "_Arg" || node.type_string() == "_Retval")) {
absl::string_view uncompilable_reason = "top level _Arg or _Retval";
MaybeMarkUncompilableNode(uncompilable_reason, *stack_trace,
encapsulating_function, uncompilable_nodes);
LogNotCompilable(node, uncompilable_reason);
return false;
}
if (node.attrs().Find("_scoped_allocator") ||
node.attrs().Find("_forward_from")) {
absl::string_view uncompilable_reason =
"_scoped_allocator or _forward_from attribute";
MaybeMarkUncompilableNode(uncompilable_reason, *stack_trace,
encapsulating_function, uncompilable_nodes);
LogNotCompilable(node, uncompilable_reason);
return false;
}
string uncompilable_reason;
if (IsFunctionCall(*lib_runtime->GetFunctionLibraryDefinition(), node)) {
if (!IsCompilableCall(node.def(), lib_runtime, stack_trace,
encapsulating_function, uncompilable_nodes)) {
LogNotCompilable(node, "unsupported function");
return false;
}
} else if (!HasXLAKernel(node, &uncompilable_reason)) {
MaybeMarkUncompilableNode(
absl::StrCat("unsupported op: ", uncompilable_reason), *stack_trace,
encapsulating_function, uncompilable_nodes);
LogNotCompilable(node, uncompilable_reason);
return false;
}
if (node.IsWhileNode() &&
!IsCompilableWhile(node, lib_runtime, stack_trace, encapsulating_function,
uncompilable_nodes)) {
LogNotCompilable(node, "unsupported while");
return false;
}
if (node.IsIfNode() &&
!IsCompilableIf(node, lib_runtime, stack_trace, encapsulating_function,
uncompilable_nodes)) {
LogNotCompilable(node, "unsupported if");
return false;
}
if (op_filter_.require_always_compilable && node.IsCaseNode() &&
!IsCompilableCase(node, lib_runtime, stack_trace, encapsulating_function,
uncompilable_nodes)) {
LogNotCompilable(node, "unsupported case");
return false;
}
if (!op_filter_.allow_stateful_rng_ops &&
IsStatefulRandomOp(node.type_string())) {
absl::string_view uncompilable_reason = "stateful random op";
MaybeMarkUncompilableNode(uncompilable_reason, *stack_trace,
encapsulating_function, uncompilable_nodes);
LogNotCompilable(node, uncompilable_reason);
return false;
}
if (!op_filter_.allow_control_trigger && node.IsControlTrigger()) {
absl::string_view uncompilable_reason = "not allowed control trigger";
MaybeMarkUncompilableNode(uncompilable_reason, *stack_trace,
encapsulating_function, uncompilable_nodes);
LogNotCompilable(node, uncompilable_reason);
return false;
}
if (!op_filter_.allow_eliding_assert_and_checknumerics_ops &&
IsAssertOrCheckNumerics(node.type_string())) {
absl::string_view uncompilable_reason = "Assert or CheckNumerics";
MaybeMarkUncompilableNode(uncompilable_reason, *stack_trace,
encapsulating_function, uncompilable_nodes);
LogNotCompilable(node, uncompilable_reason);
return false;
}
if (!op_filter_.allow_collective_reduce_v2 &&
node.type_string() == "CollectiveReduceV2") {
absl::string_view uncompilable_reason = "Collective op";
MaybeMarkUncompilableNode(uncompilable_reason, *stack_trace,
encapsulating_function, uncompilable_nodes);
LogNotCompilable(node, uncompilable_reason);
return false;
}
if (!op_filter_.allow_where_op && node.type_string() == "Where") {
absl::string_view uncompilable_reason = "Where op";
MaybeMarkUncompilableNode(uncompilable_reason, *stack_trace,
encapsulating_function, uncompilable_nodes);
LogNotCompilable(node, uncompilable_reason);
return false;
}
if (!op_filter_.allow_unique_op && node.type_string() == "Unique") {
absl::string_view uncompilable_reason = "Unique op";
MaybeMarkUncompilableNode(uncompilable_reason, *stack_trace,
encapsulating_function, uncompilable_nodes);
LogNotCompilable(node, uncompilable_reason);
return false;
}
if (!op_filter_.allow_ops_producing_or_consuming_variant &&
OpProducesOrConsumesVariant(node)) {
absl::string_view uncompilable_reason = "DT_VARIANT producer/consumer";
MaybeMarkUncompilableNode(uncompilable_reason, *stack_trace,
encapsulating_function, uncompilable_nodes);
LogNotCompilable(node, uncompilable_reason);
return false;
}
if (!op_filter_.allow_stack_ops && IsStackOp(node)) {
absl::string_view uncompilable_reason = "Stack op";
MaybeMarkUncompilableNode(uncompilable_reason, *stack_trace,
encapsulating_function, uncompilable_nodes);
LogNotCompilable(node, uncompilable_reason);
return false;
}
if (!op_filter_.allow_tensor_array_ops && IsTensorArrayOp(node)) {
absl::string_view uncompilable_reason = "TensorArray op";
MaybeMarkUncompilableNode(uncompilable_reason, *stack_trace,
encapsulating_function, uncompilable_nodes);
LogNotCompilable(node, uncompilable_reason);
return false;
}
if (!op_filter_.allow_resource_ops_in_called_functions && stack_depth > 1 &&
HasResourceInput(node)) {
absl::string_view uncompilable_reason =
"resource variable op in called function";
MaybeMarkUncompilableNode(uncompilable_reason, *stack_trace,
encapsulating_function, uncompilable_nodes);
LogNotCompilable(node, uncompilable_reason);
return false;
}
if (!op_filter_.allow_inaccurate_ops && OpIsInaccurate(node)) {
absl::string_view uncompilable_reason =
"operation with numerical accuracy issues";
BroadcastOptimizationRemark(XlaOptimizationRemark::INACCURATE_OPERATION,
node.DebugString())
.IgnoreError();
MaybeMarkUncompilableNode(uncompilable_reason, *stack_trace,
encapsulating_function, uncompilable_nodes);
LogNotCompilable(node, uncompilable_reason);
return false;
}
if (!op_filter_.allow_slow_ops && OpIsSlow(node)) {
absl::string_view uncompilable_reason = "slow operation";
BroadcastOptimizationRemark(XlaOptimizationRemark::SLOW_OPERATION,
node.DebugString())
.IgnoreError();
MaybeMarkUncompilableNode(uncompilable_reason, *stack_trace,
encapsulating_function, uncompilable_nodes);
LogNotCompilable(node, uncompilable_reason);
return false;
}
return true;
}
RecursiveCompilabilityChecker::OperationFilter CreateOperationFilter(
const XlaOpRegistry::DeviceRegistration& registration) {
RecursiveCompilabilityChecker::OperationFilter op_filter;
op_filter.allow_resource_ops_in_called_functions =
registration.cluster_resource_variable_ops_unsafely;
op_filter.allow_stack_ops = registration.cluster_stack_ops;
op_filter.allow_tensor_array_ops = registration.cluster_tensor_array_ops;
op_filter.allow_stateful_rng_ops = registration.cluster_stateful_rng_ops;
op_filter.allow_control_trigger = registration.cluster_control_trigger;
op_filter.allow_eliding_assert_and_checknumerics_ops =
registration.elide_assert_and_checknumerics;
op_filter.allow_ops_producing_or_consuming_variant =
registration.cluster_variant_ops;
op_filter.allow_slow_ops = registration.cluster_slow_ops;
op_filter.allow_inaccurate_ops = registration.cluster_inaccurate_ops;
return op_filter;
}
void RecursiveCompilabilityChecker::MaybeMarkUncompilableNode(
const absl::string_view reason,
const std::vector<StackFrameView>& stack_trace,
NameAttrList* encapsulating_function,
RecursiveCompilabilityChecker::UncompilableNodesMap* uncompilable_nodes) {
if (!uncompilable_nodes) return;
UncompilableNodeInfo node_info;
node_info.uncompilable_reason = std::string(reason);
absl::c_transform(stack_trace, std::back_inserter(node_info.stack_trace),
[](const StackFrameView& stack_element) {
return StackFrame{
std::string(stack_element.name),
std::string(stack_element.function_name),
stack_element.stack_trace};
});
node_info.name = std::string(stack_trace.back().name);
auto function =
encapsulating_function ? *encapsulating_function : NameAttrList();
auto function_identifier = function.ShortDebugString();
auto it = uncompilable_nodes->find(function_identifier);
if (it == uncompilable_nodes->end()) {
std::vector<RecursiveCompilabilityChecker::UncompilableNodeInfo>
uncompilable_node_info{std::move(node_info)};
uncompilable_nodes->emplace(
std::move(function_identifier),
std::make_pair(function, std::move(uncompilable_node_info)));
} else {
it->second.second.emplace_back(std::move(node_info));
}
}
static bool HasBoolAttr(const NodeDef& node, const char* attr) {
const auto& it = node.attr().find(attr);
return it != node.attr().end() && it->second.b();
}
bool CanCreateXlaKernel(const NodeDef& node_def) {
return HasBoolAttr(node_def, kXlaMustCompileAttr);
}
Status GetBodyAndConstantsAndResources(FunctionLibraryRuntime* flr,
const NameAttrList& function,
const FunctionBody** fbody,
std::vector<int>* constant_arg_indices,
std::vector<int>* resource_arg_indices) {
FunctionLibraryRuntime::Handle handle;
TF_RETURN_IF_ERROR(
flr->Instantiate(function.name(), AttrSlice(&function.attr()), &handle));
*fbody = flr->GetFunctionBody(handle);
CHECK(*fbody);
const DataTypeVector& arg_types = (*fbody)->arg_types;
std::vector<bool> const_args(arg_types.size());
TF_RETURN_IF_ERROR(
BackwardsConstAnalysis(*((*fbody)->graph), &const_args,
nullptr, flr));
for (size_t i = 0; i < const_args.size(); ++i) {
if (const_args[i]) {
constant_arg_indices->push_back(i);
}
}
resource_arg_indices->reserve(arg_types.size());
for (size_t i = 0; i < arg_types.size(); ++i) {
if (arg_types[i] == DT_RESOURCE) {
resource_arg_indices->push_back(i);
}
}
return absl::OkStatus();
}
tensorflow::MemoryTypeVector GetInputMemoryTypes(
const tensorflow::FunctionBody* fbody,
absl::Span<int const> constant_arg_indices,
absl::Span<int const> resource_arg_indices) {
tensorflow::MemoryTypeVector input_memory_types(fbody->arg_types.size(),
tensorflow::DEVICE_MEMORY);
SinglePassSearch constants_search(constant_arg_indices);
SinglePassSearch resources_search(resource_arg_indices);
for (size_t i = 0; i < fbody->arg_types.size(); ++i) {
if (resources_search.ScanForValue(i) || constants_search.ScanForValue(i)) {
input_memory_types[i] = tensorflow::HOST_MEMORY;
}
}
return input_memory_types;
}
tensorflow::MemoryTypeVector GetOutputMemoryTypes(
const tensorflow::FunctionBody* fbody) {
tensorflow::MemoryTypeVector output_memory_types(fbody->ret_types.size(),
tensorflow::DEVICE_MEMORY);
for (size_t i = 0; i < fbody->ret_types.size(); ++i) {
if (fbody->ret_types[i] == tensorflow::DT_RESOURCE) {
output_memory_types[i] = tensorflow::HOST_MEMORY;
}
}
return output_memory_types;
}
static auto const ops_triggering_xla_compilation =
new absl::flat_hash_set<std::string>{"XlaBroadcastHelper",
"XlaCallModule",
"XlaConv",
"XlaConvV2",
"XlaDequantize",
"XlaDot",
"XlaDotV2",
"XlaDynamicSlice",
"XlaDynamicUpdateSlice",
"XlaEinsum",
"XlaGather",
"XlaIf",
"XlaKeyValueSort",
"XlaPad",
"XlaRecv",
"XlaReduce",
"XlaReduceWindow",
"XlaReplicaId",
"XlaRngBitGenerator",
"XlaScatter",
"XlaSelectAndScatter",
"XlaSelfAdjointEig",
"XlaSend",
"XlaSharding",
"XlaSort",
"XlaSpmdFullToShardShape",
"XlaSpmdShardToFullShape",
"XlaSvd",
"XlaVariadicReduceV2",
"XlaVariadicSort",
"XlaWhile"};
bool NodeCanTriggerXlaCompilation(const NodeDef& node) {
return node.attr().find(kXlaClusterIdAttr) != node.attr().end() ||
HasBoolAttr(node, kXlaMustCompileAttr) ||
HasBoolAttr(node, kXlaCompileAttr) ||
HasBoolAttr(node, kXlaScopeAttr) ||
HasBoolAttr(node, kXlaInternalScopeAttr) ||
ops_triggering_xla_compilation->count(node.op());
}
bool CanTriggerXlaCompilation(const GraphDef& graph) {
for (const FunctionDef& function : graph.library().function()) {
for (const NodeDef& node : function.node_def()) {
if (NodeCanTriggerXlaCompilation(node)) {
return true;
}
}
}
for (const NodeDef& node : graph.node()) {
if (NodeCanTriggerXlaCompilation(node)) {
return true;
}
}
return false;
}
} | #include "tensorflow/compiler/jit/compilability_check_util.h"
#include "absl/memory/memory.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/core/common_runtime/graph_def_builder_util.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
AttrValue FuncListAttr(const absl::Span<const char* const> names) {
AttrValue attr;
for (const char* name : names) {
attr.mutable_list()->add_func()->set_name(name);
}
return attr;
}
constexpr char kFunctionalIfNodeName[] = "If";
constexpr char kFunctionalCaseNodeName[] = "Case";
constexpr char kFunctionalWhileNodeName[] = "While";
constexpr char kCompilableFunctionName[] = "CompilableFn";
constexpr char kCompilableFunctionNodeName[] = "n_c";
constexpr char kUncompilableFunctionName[] = "UncompilableFn";
constexpr char kUncompilableFunctionNodeName[] = "n_c_uncompilable";
constexpr char kUncompilableFunctionTwoName[] = "UncompilableFnTwo";
constexpr char kUncompilableFunctionNodeTwoName[] = "n_d_uncompilable";
class DummyCompilableOp : public XlaOpKernel {
public:
explicit DummyCompilableOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
ctx->SetOutput(0, ctx->Input(0));
}
};
REGISTER_OP("InputFloatOp").Output("o: float");
REGISTER_OP("CompilableOp").Input("i: float").Output("o: float");
REGISTER_XLA_OP(Name("CompilableOp").Device(DEVICE_CPU_XLA_JIT),
DummyCompilableOp);
REGISTER_OP("MissingKernel").Input("i: float").Output("o: float");
class CompilabilityCheckUtilTest : public ::testing::Test {
protected:
void SetUp() override {
XlaOpRegistry::RegisterCompilationKernels();
op_filter_.allow_resource_ops_in_called_functions = false;
op_filter_.allow_stack_ops = false;
op_filter_.allow_tensor_array_ops = false;
op_filter_.allow_stateful_rng_ops = false;
op_filter_.allow_control_trigger = false;
op_filter_.allow_eliding_assert_and_checknumerics_ops = false;
op_filter_.allow_ops_producing_or_consuming_variant = false;
op_filter_.allow_inaccurate_ops = false;
op_filter_.allow_slow_ops = false;
op_filter_.allow_outside_compiled = false;
checker_ = CreateCompilabilityChecker();
}
std::unique_ptr<RecursiveCompilabilityChecker> CreateCompilabilityChecker() {
return std::make_unique<RecursiveCompilabilityChecker>(op_filter_,
device_type_);
}
FunctionLibraryRuntime* GetFunctionLibraryRuntime() {
OptimizerOptions opts;
pflr_ = std::make_unique<ProcessFunctionLibraryRuntime>(
nullptr, Env::Default(), nullptr, TF_GRAPH_DEF_VERSION,
flib_def_.get(), opts);
return pflr_->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice);
}
RecursiveCompilabilityChecker::OperationFilter op_filter_;
DeviceType device_type_ = DeviceType(DEVICE_CPU_XLA_JIT);
std::unique_ptr<FunctionDefLibrary> func_library_ =
std::make_unique<FunctionDefLibrary>();
std::unique_ptr<FunctionLibraryDefinition> flib_def_ =
std::make_unique<FunctionLibraryDefinition>(OpRegistry::Global(),
*func_library_);
std::unique_ptr<RecursiveCompilabilityChecker> checker_;
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr_;
};
TEST_F(CompilabilityCheckUtilTest, CheckNonFunctionalNodes) {
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
auto opts = builder.opts();
Node* const0 = ops::SourceOp("InputFloatOp", opts);
Node* compilable_op = ops::UnaryOp("CompilableOp", const0, opts);
Node* uncompilable_op = ops::UnaryOp("MissingKernel", compilable_op, opts);
GraphDef graph_def;
TF_EXPECT_OK(builder.ToGraphDef(&graph_def));
auto* flib_runtime = GetFunctionLibraryRuntime();
EXPECT_FALSE(checker_->IsCompilableNode(*const0, flib_runtime));
EXPECT_TRUE(checker_->IsCompilableNode(*compilable_op, flib_runtime));
EXPECT_FALSE(checker_->IsCompilableNode(*uncompilable_op, flib_runtime));
const auto uncompilable_nodes =
checker_->FindUncompilableNodes(*uncompilable_op, flib_runtime);
ASSERT_EQ(1, uncompilable_nodes.size());
auto node_info_it =
uncompilable_nodes.find(NameAttrList().ShortDebugString());
ASSERT_NE(uncompilable_nodes.end(), node_info_it);
const auto& uncompilable_nodes_inside_function = node_info_it->second.second;
ASSERT_EQ(1, uncompilable_nodes_inside_function.size());
const auto& uncompilable_node_info = uncompilable_nodes_inside_function.at(0);
EXPECT_TRUE(absl::StrContains(uncompilable_node_info.uncompilable_reason,
"unsupported op"));
ASSERT_EQ(1, uncompilable_node_info.stack_trace.size());
ASSERT_EQ("", uncompilable_node_info.stack_trace.at(0).function_name);
}
TEST_F(CompilabilityCheckUtilTest, CheckOutsideCompiledNode) {
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
auto opts = builder.opts();
Node* const0 = ops::SourceOp("InputFloatOp", opts);
Node* uncompilable_op = ops::UnaryOp("MissingKernel", const0, opts);
uncompilable_op->AddAttr("_xla_outside_compilation", "0");
GraphDef graph_def;
TF_EXPECT_OK(builder.ToGraphDef(&graph_def));
auto* flib_runtime = GetFunctionLibraryRuntime();
EXPECT_FALSE(checker_->IsCompilableNode(*uncompilable_op, flib_runtime));
const auto uncompilable_nodes =
checker_->FindUncompilableNodes(*uncompilable_op, flib_runtime);
ASSERT_EQ(1, uncompilable_nodes.size());
op_filter_.allow_outside_compiled = true;
checker_ = CreateCompilabilityChecker();
EXPECT_TRUE(checker_->IsCompilableNode(*uncompilable_op, flib_runtime));
const auto uncompilable_nodes2 =
checker_->FindUncompilableNodes(*uncompilable_op, flib_runtime);
ASSERT_EQ(0, uncompilable_nodes2.size());
}
TEST_F(CompilabilityCheckUtilTest, CheckSimpleFunctionNode) {
FunctionDefLibrary flib;
*flib.add_function() = FunctionDefHelper::Define(
kUncompilableFunctionName,
{"n_a:float"},
{"n_c_uncompilable:float"},
{},
{{{kUncompilableFunctionNodeName}, "MissingKernel", {"n_a"}}});
flib_def_.reset(new FunctionLibraryDefinition(OpRegistry::Global(), flib));
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, flib_def_.get());
std::unique_ptr<Graph> graph(new Graph(flib_def_.get()));
Node* const0 = ops::SourceOp("InputFloatOp", builder.opts());
Node* functional_node = ops::UnaryOp(kUncompilableFunctionName, const0,
builder.opts().WithName("D"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
auto* flib_runtime = GetFunctionLibraryRuntime();
EXPECT_FALSE(checker_->IsCompilableNode(*functional_node, flib_runtime));
const auto uncompilable_nodes =
checker_->FindUncompilableNodes(*functional_node, flib_runtime);
EXPECT_EQ(1, uncompilable_nodes.size());
NameAttrList function;
function.set_name(kUncompilableFunctionName);
const auto node_info_it =
uncompilable_nodes.find(function.ShortDebugString());
ASSERT_NE(uncompilable_nodes.end(), node_info_it);
const auto& uncompilable_node_list = node_info_it->second.second;
ASSERT_EQ(1, uncompilable_node_list.size());
const auto& node_info = uncompilable_node_list.at(0);
const auto& node_stack = node_info.stack_trace;
ASSERT_EQ(2, node_stack.size());
EXPECT_EQ("D", node_stack.at(0).name);
EXPECT_EQ(kUncompilableFunctionNodeName, node_stack.at(1).name);
EXPECT_EQ(kUncompilableFunctionNodeName, node_info.name);
EXPECT_TRUE(
absl::StrContains(node_info.uncompilable_reason, "unsupported op"));
}
TEST_F(CompilabilityCheckUtilTest, CheckFunctionalWhileNode) {
FunctionDefLibrary flib;
*flib.add_function() = FunctionDefHelper::Define(
kCompilableFunctionName,
{"n_a:float", "n_b:float"},
{"n_c:float"},
{},
{{{kCompilableFunctionNodeName},
"Add",
{"n_a", "n_b"},
{{"T", DT_FLOAT}}}});
*flib.add_function() = FunctionDefHelper::Define(
kUncompilableFunctionName,
{"n_a:float"},
{"n_c_uncompilable:float"},
{},
{{{kUncompilableFunctionNodeName}, "MissingKernel", {"n_a"}}});
flib_def_.reset(new FunctionLibraryDefinition(OpRegistry::Global(), flib));
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, flib_def_.get());
Node* const0 = ops::SourceOp("InputFloatOp", builder.opts());
Node* input_node = ops::UnaryOp("CompilableOp", const0, builder.opts());
NameAttrList compilable;
compilable.set_name(kCompilableFunctionName);
NameAttrList uncompilable;
uncompilable.set_name(kUncompilableFunctionName);
NodeBuilder while_builder(kFunctionalWhileNodeName, "While",
builder.opts().op_registry());
while_builder.Input({input_node, input_node})
.Attr("cond", compilable)
.Attr("body", uncompilable);
builder.opts().FinalizeBuilder(&while_builder);
GraphDef graph_def;
TF_EXPECT_OK(builder.ToGraphDef(&graph_def));
std::unique_ptr<Graph> graph(new Graph(flib_def_.get()));
TF_CHECK_OK(GraphDefBuilderToGraph(builder, graph.get()));
auto while_node_it = std::find_if(
graph->nodes().begin(), graph->nodes().end(),
[&](const Node* n) { return n->name() == kFunctionalWhileNodeName; });
EXPECT_NE(while_node_it, graph->nodes().end());
auto* flib_runtime = GetFunctionLibraryRuntime();
EXPECT_FALSE(checker_->IsCompilableNode(**while_node_it, flib_runtime));
const auto uncompilable_nodes =
checker_->FindUncompilableNodes(**while_node_it, flib_runtime);
ASSERT_EQ(1, uncompilable_nodes.size());
NameAttrList function;
function.set_name(kUncompilableFunctionName);
const auto node_info_it =
uncompilable_nodes.find(function.ShortDebugString());
ASSERT_NE(uncompilable_nodes.end(), node_info_it);
const auto& uncompilable_node_list = node_info_it->second.second;
ASSERT_EQ(1, uncompilable_node_list.size());
const auto& node_info = uncompilable_node_list.at(0);
const auto& node_stack = node_info.stack_trace;
ASSERT_EQ(2, node_stack.size());
const auto& stacktrace_first_node_info = node_stack.at(0);
EXPECT_EQ(kFunctionalWhileNodeName, stacktrace_first_node_info.name);
EXPECT_EQ("", stacktrace_first_node_info.function_name);
const auto& stacktrace_second_node_info = node_stack.at(1);
EXPECT_EQ(kUncompilableFunctionNodeName, stacktrace_second_node_info.name);
EXPECT_EQ(kUncompilableFunctionName,
stacktrace_second_node_info.function_name);
EXPECT_EQ(kUncompilableFunctionNodeName, node_info.name);
EXPECT_TRUE(
absl::StrContains(node_info.uncompilable_reason, "unsupported op"));
}
TEST_F(CompilabilityCheckUtilTest, CheckFunctionalIfNode) {
FunctionDefLibrary flib;
*flib.add_function() = FunctionDefHelper::Define(
kUncompilableFunctionName,
{"n_a:float"},
{"n_c_uncompilable:float"},
{},
{{{kUncompilableFunctionNodeName}, "MissingKernel", {"n_a"}}});
*flib.add_function() = FunctionDefHelper::Define(
kUncompilableFunctionTwoName,
{"n_a:float"},
{"n_d_uncompilable:float"},
{},
{{{kUncompilableFunctionNodeTwoName}, "MissingKernel", {"n_a"}}});
NameAttrList uncompilable_fn1_attr;
uncompilable_fn1_attr.set_name(kUncompilableFunctionName);
NameAttrList uncompilable_fn2_attr;
uncompilable_fn2_attr.set_name(kUncompilableFunctionTwoName);
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(flib));
auto predicate = ops::Placeholder(root.WithOpName("pred"), DT_BOOL);
auto placeholder = ops::Placeholder(root.WithOpName("A"), DT_INT32);
std::vector<NodeBuilder::NodeOut> if_inputs(
{NodeBuilder::NodeOut(placeholder.node())});
Node* if_node;
TF_ASSERT_OK(
NodeBuilder(kFunctionalIfNodeName, "If", &root.graph()->flib_def())
.Input(predicate.node())
.Input(if_inputs)
.Attr("then_branch", uncompilable_fn1_attr)
.Attr("else_branch", uncompilable_fn2_attr)
.Attr("Tout", {DT_INT32})
.Finalize(root.graph(), &if_node));
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(root.ToGraph(graph.get()));
flib_def_.reset(new FunctionLibraryDefinition(OpRegistry::Global(), flib));
auto if_node_it = std::find_if(
graph->nodes().begin(), graph->nodes().end(),
[&](const Node* n) { return n->name() == kFunctionalIfNodeName; });
EXPECT_NE(if_node_it, graph->nodes().end());
auto* flib_runtime = GetFunctionLibraryRuntime();
EXPECT_FALSE(checker_->IsCompilableNode(**if_node_it, flib_runtime));
const auto uncompilable_nodes =
checker_->FindUncompilableNodes(**if_node_it, flib_runtime);
ASSERT_EQ(2, uncompilable_nodes.size());
NameAttrList function_one;
function_one.set_name(kUncompilableFunctionName);
auto it = uncompilable_nodes.find(function_one.ShortDebugString());
ASSERT_NE(uncompilable_nodes.end(), it);
const auto& uncompilable_node_list = it->second.second;
ASSERT_EQ(1, uncompilable_node_list.size());
const auto& uncompilable_node_one = uncompilable_node_list.at(0);
const auto& node_one_stack = uncompilable_node_one.stack_trace;
ASSERT_EQ(2, node_one_stack.size());
const auto& node_one_stacktrace_first_node = node_one_stack.at(0);
EXPECT_EQ(kFunctionalIfNodeName, node_one_stacktrace_first_node.name);
EXPECT_EQ("", node_one_stacktrace_first_node.function_name);
const auto& stacktrace_second_node_info = node_one_stack.at(1);
EXPECT_EQ(kUncompilableFunctionNodeName, stacktrace_second_node_info.name);
EXPECT_EQ(kUncompilableFunctionName,
stacktrace_second_node_info.function_name);
EXPECT_EQ(kUncompilableFunctionNodeName, uncompilable_node_one.name);
EXPECT_TRUE(absl::StrContains(uncompilable_node_one.uncompilable_reason,
"unsupported op"));
NameAttrList function_two;
function_two.set_name(kUncompilableFunctionTwoName);
it = uncompilable_nodes.find(function_two.ShortDebugString());
ASSERT_NE(uncompilable_nodes.end(), it);
const auto& uncompilable_node_two_list = it->second.second;
ASSERT_EQ(1, uncompilable_node_two_list.size());
const auto& uncompilable_node_two = uncompilable_node_two_list.at(0);
const auto& node_two_stack = uncompilable_node_two.stack_trace;
ASSERT_EQ(2, node_two_stack.size());
const auto& node_two_stacktrace_first_node = node_two_stack.at(0);
EXPECT_EQ(kFunctionalIfNodeName, node_two_stacktrace_first_node.name);
EXPECT_EQ("", node_two_stacktrace_first_node.function_name);
const auto& node_two_stacktrace_second_node = node_two_stack.at(1);
EXPECT_EQ(kUncompilableFunctionNodeTwoName,
node_two_stacktrace_second_node.name);
EXPECT_EQ(kUncompilableFunctionTwoName,
node_two_stacktrace_second_node.function_name);
EXPECT_EQ(kUncompilableFunctionNodeTwoName, uncompilable_node_two.name);
EXPECT_TRUE(absl::StrContains(uncompilable_node_one.uncompilable_reason,
"unsupported op"));
}
TEST_F(CompilabilityCheckUtilTest, CheckFunctionalCaseNode) {
FunctionDefLibrary flib;
*flib.add_function() = FunctionDefHelper::Define(
kUncompilableFunctionName,
{"n_a:float"},
{"n_c_uncompilable:float"},
{},
{{{kUncompilableFunctionNodeName}, "MissingKernel", {"n_a"}}});
*flib.add_function() = FunctionDefHelper::Define(
kUncompilableFunctionTwoName,
{"n_a:float"},
{"n_d_uncompilable:float"},
{},
{{{kUncompilableFunctionNodeTwoName}, "MissingKernel", {"n_a"}}});
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(flib));
auto branch_index = ops::Placeholder(root.WithOpName("pred"), DT_INT32);
auto placeholder = ops::Placeholder(root.WithOpName("A"), DT_INT32);
std::vector<NodeBuilder::NodeOut> inputes(
{NodeBuilder::NodeOut(placeholder.node())});
Node* case_node;
TF_ASSERT_OK(
NodeBuilder(kFunctionalCaseNodeName, "Case", &root.graph()->flib_def())
.Input(branch_index.node())
.Input(inputes)
.Attr("branches", FuncListAttr({kUncompilableFunctionName,
kUncompilableFunctionTwoName}))
.Attr("Tout", {DT_INT32})
.Finalize(root.graph(), &case_node));
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(root.ToGraph(graph.get()));
flib_def_.reset(new FunctionLibraryDefinition(OpRegistry::Global(), flib));
auto case_node_it = std::find_if(
graph->nodes().begin(), graph->nodes().end(),
[&](const Node* n) { return n->name() == kFunctionalCaseNodeName; });
EXPECT_NE(case_node_it, graph->nodes().end());
auto* flib_runtime = GetFunctionLibraryRuntime();
op_filter_.require_always_compilable = false;
checker_ = CreateCompilabilityChecker();
EXPECT_TRUE(checker_->IsCompilableNode(**case_node_it, flib_runtime));
op_filter_.require_always_compilable = true;
checker_ = CreateCompilabilityChecker();
EXPECT_FALSE(checker_->IsCompilableNode(**case_node_it, flib_runtime));
}
TEST_F(CompilabilityCheckUtilTest, TestCanNotTriggerXlaCompilation) {
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Scope root = Scope::NewRootScope().ExitOnError();
FunctionDefLibrary library;
FunctionDef identity_func = FunctionDefHelper::Create(
"IdentityFunc",
{"x:float"},
{"res:float"},
{},
{{{"t0"}, "Identity", {"x"}, {{"T", DT_FLOAT}}}},
{{"res", "t0:output"}});
*library.add_function() = identity_func;
Output in = ops::Placeholder(root, DT_FLOAT);
NameAttrList b_name_attr;
b_name_attr.set_name("IdentityFunc");
ops::PartitionedCall call(root.WithOpName("call"), {in}, {DT_FLOAT},
b_name_attr);
GraphDef graph_def;
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(library));
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
EXPECT_FALSE(CanTriggerXlaCompilation(graph_def));
}
TEST_F(CompilabilityCheckUtilTest, TestXlaOpsCanTriggerXlaCompilation) {
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Scope root = Scope::NewRootScope().ExitOnError();
FunctionDefLibrary library;
FunctionDef sort_func = FunctionDefHelper::Create(
"SortFunc",
{"x:float"},
{"res:float"},
{},
{{{"t0"}, "XlaSort", {"x"}, {{"T", DT_FLOAT}}}},
{{"res", "t0:output"}});
*library.add_function() = sort_func;
Output in = ops::Placeholder(root, DT_FLOAT);
NameAttrList b_name_attr;
b_name_attr.set_name("SortFunc");
ops::PartitionedCall call(root.WithOpName("call"), {in}, {DT_FLOAT},
b_name_attr);
GraphDef graph_def;
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(library));
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
EXPECT_TRUE(CanTriggerXlaCompilation(graph_def));
}
TEST_F(CompilabilityCheckUtilTest, TestCanTriggerXlaCompilation) {
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Scope root = Scope::NewRootScope().ExitOnError();
FunctionDefLibrary library;
AttrValue true_attribute;
true_attribute.set_b(true);
FunctionDef identity_func = FunctionDefHelper::Create(
"IdentityFunc",
{"x:float"},
{"res:float"},
{},
{{{"t0"}, "Identity", {"x"}, {{"T", DT_FLOAT}}}},
{{"res", "t0:output"}});
(*identity_func.mutable_attr())[kXlaMustCompileAttr] = true_attribute;
FunctionDef call_identity = FunctionDefHelper::Create(
"CallIdentity",
{"x:float"},
{"z:float"}, {},
{{{"func_call"},
"PartitionedCall",
{"x"},
{{"Tin", DataTypeSlice({DT_FLOAT})},
{"Tout", DataTypeSlice({DT_FLOAT})},
{"f",
FunctionDefHelper::FunctionRef("IdentityRef", {{"T", DT_FLOAT}})},
{kXlaMustCompileAttr, true}}}},
{{"z", "func_call:output:0"}});
*library.add_function() = identity_func;
*library.add_function() = call_identity;
Output in = ops::Placeholder(root, DT_FLOAT);
NameAttrList b_name_attr;
b_name_attr.set_name("CallIdentity");
ops::PartitionedCall call(root.WithOpName("call"), {in}, {DT_FLOAT},
b_name_attr);
GraphDef graph_def;
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(library));
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
EXPECT_TRUE(CanTriggerXlaCompilation(graph_def));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/compilability_check_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/compilability_check_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
39252d35-6f59-4b20-8f33-d23b8995e42d | cpp | tensorflow/tensorflow | xla_cluster_util | tensorflow/compiler/jit/xla_cluster_util.cc | tensorflow/compiler/jit/xla_cluster_util_test.cc | #include "tensorflow/compiler/jit/xla_cluster_util.h"
#include <string>
#include <unordered_map>
#include "absl/algorithm/container.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "tensorflow/compiler/jit/flags.h"
#include "xla/status_macros.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/graph/control_flow.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/strings/proto_serialization.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/fingerprint.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/core/util/xla_config_registry.h"
namespace tensorflow {
const char* const kXlaClusterAttr = "_XlaCluster";
const char* const kXlaCompileTimeConstantInputsAttr =
"_XlaCompileTimeConstantInputs";
namespace {
string DescribeCycle(const xla::GraphCycles* cycles, const Graph& graph,
int src, int dst) {
int32_t max_path_size = graph.num_node_ids() + 1;
std::vector<int32> path(max_path_size);
int32_t path_size = cycles->FindPath(dst, src, max_path_size, path.data());
if (path_size == 0) {
return "";
}
auto node_name = [&graph](int node_id) {
if (!FastBoundsCheck(node_id, graph.num_node_ids())) {
return string("(null)");
}
auto* node = graph.FindNodeId(node_id);
if (node == nullptr) {
return string("(null)");
}
return node->name();
};
string description;
absl::StrAppend(&description, "Edge from ", node_name(src), " to ",
node_name(dst), " would create a cycle.\n");
path.resize(path_size);
for (int32_t node_id : path) {
string ascii_art;
if (node_id == dst) {
ascii_art = "+-> ";
} else if (node_id != src) {
ascii_art = "| ";
} else {
ascii_art = "+-- ";
}
absl::StrAppend(&description, ascii_art, node_name(node_id), "\n");
}
return description;
}
bool AlwaysForwardsRefInput(const Node& node) { return node.IsIdentity(); }
}
bool HasForwardedRefInput(const Node& node) {
if (AlwaysForwardsRefInput(node)) {
for (const Edge* incoming_edge : node.in_edges()) {
if (incoming_edge->IsControlEdge()) {
continue;
}
Node* incoming_node = incoming_edge->src();
if (IsRefType(incoming_node->output_type(incoming_edge->src_output()))) {
VLOG(2) << "Node " << node.def().ShortDebugString() << " has ref input "
<< incoming_node->name() << " " << incoming_node->type_string();
return true;
}
}
}
return false;
}
absl::StatusOr<bool> CreateCycleDetectionGraph(const Graph* graph,
xla::GraphCycles* cycles) {
for (int i = 0; i < graph->num_node_ids(); ++i) {
CHECK_EQ(i, cycles->NewNode());
}
std::vector<ControlFlowInfo> control_flow_info;
TF_RETURN_IF_ERROR(BuildControlFlowInfo(graph, &control_flow_info));
std::unordered_map<string, int> frame_nodes;
auto GetOrAddFrameNodeId = [&frame_nodes, cycles](const string& frame_name) {
int& frame_id = frame_nodes.emplace(frame_name, -1).first->second;
if (frame_id < 0) {
frame_id = cycles->NewNode();
}
return frame_id;
};
for (Edge const* edge : graph->edges()) {
if (edge->dst()->IsEnter() || edge->src()->IsExit()) {
const char* src_type = "pre-enter";
const char* dst_type = "post-exit";
int src = edge->src()->id();
int dst = edge->dst()->id();
if (edge->dst()->IsEnter()) {
const string& frame_name =
control_flow_info[edge->dst()->id()].frame_name;
dst = GetOrAddFrameNodeId(frame_name);
dst_type = "frame";
}
if (edge->src()->IsExit()) {
const string& frame_name =
control_flow_info[edge->src()->id()].frame_name;
src = GetOrAddFrameNodeId(frame_name);
src_type = "frame";
}
if (!cycles->InsertEdge(src, dst)) {
VLOG(1) << "Cycle detected when adding " << src_type << "->" << dst_type
<< " edge: " << DescribeCycle(cycles, *graph, src, dst);
return false;
}
continue;
}
if (edge->src()->IsNextIteration()) {
continue;
}
if (!cycles->InsertEdge(edge->src()->id(), edge->dst()->id())) {
return errors::Internal(
"Found cycle in graph without control flow operator during XLA "
"compilation: ",
DescribeCycle(cycles, *graph, edge->src()->id(), edge->dst()->id()));
}
}
return true;
}
std::optional<absl::string_view> GetXlaClusterForNode(const Node& node) {
const AttrValue* attr_value = node.attrs().Find(kXlaClusterAttr);
if (attr_value == nullptr) {
return std::nullopt;
}
Status s = AttrValueHasType(*attr_value, "string");
if (!s.ok()) {
return std::nullopt;
}
return attr_value->s();
}
bool HasResourceInputOrOutput(const Node& node) {
return std::find(node.input_types().begin(), node.input_types().end(),
DT_RESOURCE) != node.input_types().end() ||
std::find(node.output_types().begin(), node.output_types().end(),
DT_RESOURCE) != node.output_types().end();
}
void RemoveFromXlaCluster(NodeDef* node_def) {
node_def->mutable_attr()->erase(kXlaClusterAttr);
}
void RemoveFromXlaCluster(Node* node) { node->ClearAttr(kXlaClusterAttr); }
namespace {
typedef xla_config_registry::XlaGlobalJitLevel XlaGlobalJitLevel;
XlaGlobalJitLevel GetXlaGlobalJitLevel(
const OptimizerOptions::GlobalJitLevel& jit_level_in_session_opts) {
XlaGlobalJitLevel result;
if (jit_level_in_session_opts == OptimizerOptions::DEFAULT) {
result.single_gpu = result.general = OptimizerOptions::OFF;
} else {
result.single_gpu = result.general = jit_level_in_session_opts;
}
MarkForCompilationPassFlags* flags = GetMarkForCompilationPassFlags();
if (flags->xla_auto_jit_flag.optimization_level_single_gpu !=
OptimizerOptions::DEFAULT) {
result.single_gpu = static_cast<OptimizerOptions::GlobalJitLevel>(
flags->xla_auto_jit_flag.optimization_level_single_gpu);
}
if (flags->xla_auto_jit_flag.optimization_level_general !=
OptimizerOptions::DEFAULT) {
result.general = static_cast<OptimizerOptions::GlobalJitLevel>(
flags->xla_auto_jit_flag.optimization_level_general);
}
return result;
}
int GetGpuNumber(const string& device_name) {
DeviceNameUtils::ParsedName parsed_name;
if (!DeviceNameUtils::ParseFullName(device_name, &parsed_name)) {
return -1;
}
return parsed_name.type == DEVICE_GPU ? parsed_name.id : -1;
}
}
bool IsSingleGpuGraph(const Graph& g) {
int gpus_seen = 0;
absl::flat_hash_set<string> devices_seen;
for (Node* n : g.op_nodes()) {
if (devices_seen.contains(n->assigned_device_name())) {
continue;
}
int gpu_number = GetGpuNumber(n->assigned_device_name());
if (gpu_number != -1) {
if (++gpus_seen > 1) {
return false;
}
}
devices_seen.insert(n->assigned_device_name());
}
return gpus_seen == 1;
}
OptimizerOptions::GlobalJitLevel GetGlobalJitLevelForGraph(
const GraphOptimizationPassOptions& options) {
OptimizerOptions::GlobalJitLevel jit_level_in_session_opts =
options.session_options->config.graph_options()
.optimizer_options()
.global_jit_level();
XlaGlobalJitLevel xla_global_jit_level =
GetXlaGlobalJitLevel(jit_level_in_session_opts);
if (xla_global_jit_level.single_gpu == xla_global_jit_level.general) {
VLOG(4) << "GetGlobalJitLevelForGraph returning "
<< xla_global_jit_level.single_gpu;
return xla_global_jit_level.single_gpu;
}
OptimizerOptions::GlobalJitLevel result =
IsSingleGpuGraph(**options.graph) ? xla_global_jit_level.single_gpu
: xla_global_jit_level.general;
VLOG(4) << "GetGlobalJitLevelForGraph returning " << result;
return result;
}
bool MayCallFunction(const Node& n, const FunctionLibraryDefinition* flib_def) {
if (flib_def->Contains(n.type_string())) {
return true;
}
return absl::c_any_of(n.def().attr(),
[](const std::pair<string, AttrValue>& name_attr_pair) {
return name_attr_pair.second.has_func();
});
}
bool IsShapeConsumerOp(const Node& node) {
return node.type_string() == "Shape" || node.type_string() == "Rank" ||
node.type_string() == "Size";
}
namespace {
struct ClusterInfo {
int size;
absl::flat_hash_map<absl::string_view, int> op_histogram;
};
void HistogramMapToRepeatedOpAndCount(
protobuf::RepeatedPtrField<XlaAutoClusteringSummary::OpAndCount>* result,
const absl::flat_hash_map<absl::string_view, int>& histogram) {
for (const auto& pair : histogram) {
XlaAutoClusteringSummary::OpAndCount* new_entry = result->Add();
new_entry->set_op(std::string(pair.first));
new_entry->set_count(pair.second);
}
absl::c_sort(*result, [](const XlaAutoClusteringSummary::OpAndCount& a,
const XlaAutoClusteringSummary::OpAndCount& b) {
return a.op() < b.op();
});
}
void ClusterInfoToProtobuf(XlaAutoClusteringSummary::Cluster* result,
absl::string_view name, const ClusterInfo& info) {
result->set_name(std::string(name));
result->set_size(info.size);
HistogramMapToRepeatedOpAndCount(result->mutable_op_histogram(),
info.op_histogram);
}
}
XlaAutoClusteringSummary GetXlaAutoClusteringSummary(const Graph& graph) {
absl::flat_hash_map<absl::string_view, ClusterInfo> cluster_name_to_info;
XlaAutoClusteringSummary result;
absl::flat_hash_map<absl::string_view, int> unclustered_op_histogram;
for (Node* n : graph.nodes()) {
std::optional<absl::string_view> cluster_name = GetXlaClusterForNode(*n);
if (cluster_name) {
result.set_clustered_node_count(result.clustered_node_count() + 1);
ClusterInfo* info = &cluster_name_to_info[*cluster_name];
info->size++;
info->op_histogram[n->type_string()]++;
} else {
result.set_unclustered_node_count(result.unclustered_node_count() + 1);
unclustered_op_histogram[n->type_string()]++;
}
}
for (const auto& pair : cluster_name_to_info) {
XlaAutoClusteringSummary::Cluster* new_cluster = result.add_clusters();
ClusterInfoToProtobuf(new_cluster, pair.first, pair.second);
}
absl::c_sort(*result.mutable_clusters(),
[&](const XlaAutoClusteringSummary::Cluster& a,
const XlaAutoClusteringSummary::Cluster& b) {
return a.name() < b.name();
});
HistogramMapToRepeatedOpAndCount(result.mutable_unclustered_op_histogram(),
unclustered_op_histogram);
return result;
}
namespace {
using CallTargetListTy = absl::InlinedVector<NameAttrList, 2>;
CallTargetListTy GetCallTargetListFromNode(
const Node& n, FunctionLibraryRuntime* lib_runtime) {
const FunctionLibraryDefinition& flib_def =
*lib_runtime->GetFunctionLibraryDefinition();
if (flib_def.Find(n.type_string())) {
NameAttrList callee;
callee.set_name(n.type_string());
*callee.mutable_attr() = n.def().attr();
return {callee};
}
CallTargetListTy result;
for (const auto& name_attr_pair : n.attrs()) {
const AttrValue& attr_value = name_attr_pair.second;
if (attr_value.value_case() == AttrValue::kFunc) {
result.push_back(attr_value.func());
} else if (attr_value.value_case() == AttrValue::kList) {
result.insert(result.end(), attr_value.list().func().begin(),
attr_value.list().func().end());
}
}
return result;
}
enum class Direction { kForward, kBackward };
Status GetNodesRelatedToRefVariablesInDirection(
const Graph& graph, FunctionLibraryRuntime* lib_runtime,
Direction direction, int depth, absl::flat_hash_set<Node*>* result);
absl::StatusOr<bool> DoesAnyCalleeHaveRefNodes(
const CallTargetListTy& call_target_list,
FunctionLibraryRuntime* lib_runtime, Direction direction, int depth) {
const int kMaxDepth = 10;
if (depth == kMaxDepth && !call_target_list.empty()) {
return true;
}
absl::flat_hash_set<Node*> callee_ref_nodes;
for (const NameAttrList& call_target : call_target_list) {
const OpRegistrationData* op_reg;
if (OpRegistry::Global()->LookUp(call_target.name(), &op_reg).ok()) {
const OpDef& op = op_reg->op_def;
if (absl::c_any_of(op.output_arg(), [](const OpDef::ArgDef arg) {
return arg.is_ref();
})) {
return true;
}
continue;
}
callee_ref_nodes.clear();
FunctionLibraryRuntime::Handle handle;
if (!lib_runtime
->Instantiate(call_target.name(), AttrSlice(&call_target.attr()),
&handle)
.ok()) {
VLOG(2) << "Could not find " << call_target.name()
<< " in the function library.";
return true;
}
auto release_handle_on_return = gtl::MakeCleanup(
[&] { TF_CHECK_OK(lib_runtime->ReleaseHandle(handle)); });
const FunctionBody* fbody = lib_runtime->GetFunctionBody(handle);
TF_RETURN_IF_ERROR(GetNodesRelatedToRefVariablesInDirection(
*fbody->graph, lib_runtime, direction, depth + 1, &callee_ref_nodes));
if (!callee_ref_nodes.empty()) {
return true;
}
}
return false;
}
Status GetNodesRelatedToRefVariablesInDirection(
const Graph& graph, FunctionLibraryRuntime* lib_runtime,
Direction direction, int depth, absl::flat_hash_set<Node*>* result) {
std::vector<Node*> nodes_in_order;
if (direction == Direction::kForward) {
GetReversePostOrder(graph, &nodes_in_order,
NodeComparatorName());
} else {
GetPostOrder(graph, &nodes_in_order,
NodeComparatorName());
}
size_t old_result_size;
int iterations = 0;
const int kMaxIterations = 10 * 1000;
std::vector<bool> callee_has_ref_nodes_cache;
callee_has_ref_nodes_cache.resize(graph.num_node_ids());
auto does_callee_have_ref_nodes = [&](Node* n) -> absl::StatusOr<bool> {
if (iterations == 1) {
TF_ASSIGN_OR_RETURN(
bool callee_has_ref_nodes,
DoesAnyCalleeHaveRefNodes(GetCallTargetListFromNode(*n, lib_runtime),
lib_runtime, direction, depth));
callee_has_ref_nodes_cache[n->id()] = callee_has_ref_nodes;
return callee_has_ref_nodes;
} else {
return {callee_has_ref_nodes_cache[n->id()]};
}
};
do {
TF_RET_CHECK(iterations++ < kMaxIterations) << "infinite loop?";
old_result_size = result->size();
for (Node* n : nodes_in_order) {
if (n->IsSource() || n->IsSink()) {
continue;
}
bool inserted_n = false;
const EdgeSet& edges =
direction == Direction::kForward ? n->in_edges() : n->out_edges();
for (const Edge* e : edges) {
if (result->contains(direction == Direction::kForward ? e->src()
: e->dst())) {
result->insert(n);
inserted_n = true;
break;
}
}
if (inserted_n) {
continue;
}
if (direction == Direction::kForward &&
absl::c_any_of(n->output_types(), IsRefType)) {
result->insert(n);
continue;
}
TF_ASSIGN_OR_RETURN(bool callee_has_ref_nodes,
does_callee_have_ref_nodes(n));
if (callee_has_ref_nodes) {
result->insert(n);
continue;
}
}
} while (result->size() != old_result_size);
VLOG(2) << "# iterations = " << iterations;
return absl::OkStatus();
}
void SortControlInputs(GraphDef* gdef) {
int64_t num_nodes = gdef->node_size();
for (int64_t i = 0; i < num_nodes; ++i) {
NodeDef* node = gdef->mutable_node(i);
std::stable_sort(node->mutable_input()->begin(),
node->mutable_input()->end(),
[](const string& a, const string& b) {
bool a_is_control = absl::StartsWith(a, "^");
bool b_is_control = absl::StartsWith(b, "^");
return (!a_is_control && b_is_control) ||
(a_is_control && b_is_control && a < b);
});
}
}
}
absl::StatusOr<absl::flat_hash_set<Node*>> GetNodesRelatedToRefVariables(
const Graph& graph, FunctionLibraryRuntime* lib_runtime) {
absl::flat_hash_set<Node*> result;
TF_RETURN_IF_ERROR(GetNodesRelatedToRefVariablesInDirection(
graph, lib_runtime, Direction::kForward, 0, &result));
TF_RETURN_IF_ERROR(GetNodesRelatedToRefVariablesInDirection(
graph, lib_runtime, Direction::kBackward, 0, &result));
VLOG(1) << "GetNodesRelatedToRefVariables() found " << result.size()
<< " nodes";
return result;
}
absl::StatusOr<std::string> SerializeGraphDeterministic(const Graph& graph) {
GraphDef def;
graph.ToGraphDef(&def);
SortControlInputs(&def);
std::string s;
if (!SerializeToStringDeterministic(def, &s)) {
return errors::Internal("Failed to serialize graphdef.");
}
return s;
}
absl::StatusOr<uint64> FingerprintGraph(const Graph& graph) {
TF_ASSIGN_OR_RETURN(std::string serialized,
SerializeGraphDeterministic(graph));
return Hash64(serialized.data(), serialized.size());
}
REGISTER_XLA_CONFIG_GETTER(GetXlaGlobalJitLevel);
} | #include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "absl/algorithm/container.h"
#include "absl/strings/str_join.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "xla/status_macros.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
TEST(CreateCycleDetectionGraph, ConnectivityThroughEnterExitRegion) {
Scope root = Scope::NewRootScope().ExitOnError();
Output a = ops::Const(root.WithOpName("a"), Input::Initializer(0.0));
Output enter =
ops::internal::Enter(root.WithOpName("enter"), a, "only_frame");
Output exit = ops::internal::Exit(root.WithOpName("exit"), enter);
Output b = ops::Add(root.WithOpName("b"), a, exit);
FixupSourceAndSinkEdges(root.graph());
xla::GraphCycles cycles;
TF_ASSERT_OK(CreateCycleDetectionGraph(root.graph(), &cycles).status());
EXPECT_FALSE(cycles.CanContractEdge(a.node()->id(), b.node()->id()));
}
TEST(CreateCycleDetectionGraph, ConnectivityThroughMultipleEnterExitRegions) {
Scope root = Scope::NewRootScope().ExitOnError();
Output a = ops::Const(root.WithOpName("a"), Input::Initializer(0.0));
Output enter_0 =
ops::internal::Enter(root.WithOpName("enter_0"), a, "frame_0");
Output exit_0 = ops::internal::Exit(root.WithOpName("exit_0"), enter_0);
Output enter_1 =
ops::internal::Enter(root.WithOpName("enter_1"), a, "frame_1");
Output exit_1 = ops::internal::Exit(root.WithOpName("exit_1"), enter_1);
Output b = ops::Add(root.WithOpName("b"), a, exit_1);
FixupSourceAndSinkEdges(root.graph());
xla::GraphCycles cycles;
TF_ASSERT_OK(CreateCycleDetectionGraph(root.graph(), &cycles).status());
EXPECT_FALSE(cycles.CanContractEdge(a.node()->id(), b.node()->id()));
}
TEST(CreateCycleDetectionGraph, ReachingEnterExit) {
Scope root = Scope::NewRootScope().ExitOnError();
Output a = ops::Const(root.WithOpName("a"), Input::Initializer(0.0));
Output enter_0 =
ops::internal::Enter(root.WithOpName("enter_0"), a, "frame_0");
Output exit_0 = ops::internal::Exit(root.WithOpName("exit_0"), enter_0);
Output add = ops::Add(root.WithOpName("add"), exit_0, exit_0);
Output enter_1 =
ops::internal::Enter(root.WithOpName("enter_1"), add, "frame_0");
Output exit_1 = ops::internal::Exit(root.WithOpName("exit_1"), enter_1);
FixupSourceAndSinkEdges(root.graph());
xla::GraphCycles cycles;
TF_ASSERT_OK_AND_ASSIGN(bool ok,
CreateCycleDetectionGraph(root.graph(), &cycles));
EXPECT_FALSE(ok);
}
const char* kCPU0 = "/job:localhost/replica:0/task:0/device:CPU:0";
const char* kGPU0 = "/job:localhost/replica:0/task:0/device:GPU:0";
const char* kGPU1 = "/job:localhost/replica:0/task:0/device:GPU:1";
TEST(IsSingleGpuGraph, ReturnsTrue) {
Scope root = Scope::NewRootScope().WithAssignedDevice(kGPU0).ExitOnError();
Output a = ops::Const(root.WithOpName("a"), Input::Initializer(0.0));
Output b = ops::Add(root.WithOpName("b"), a, a);
Output c = ops::Add(root.WithOpName("c"), b, b);
FixupSourceAndSinkEdges(root.graph());
EXPECT_TRUE(IsSingleGpuGraph(*root.graph()));
}
TEST(IsSingleGpuGraph, ReturnsFalseForCpuGraph) {
Scope root = Scope::NewRootScope().WithAssignedDevice(kCPU0).ExitOnError();
Output a = ops::Const(root.WithOpName("a"), Input::Initializer(0.0));
Output b = ops::Add(root.WithOpName("b"), a, a);
Output c = ops::Add(root.WithOpName("c"), b, b);
FixupSourceAndSinkEdges(root.graph());
EXPECT_FALSE(IsSingleGpuGraph(*root.graph()));
}
TEST(IsSingleGpuGraph, ReturnsFalseForMultiGpuGraph) {
Scope root = Scope::NewRootScope().WithAssignedDevice(kGPU0).ExitOnError();
Output a = ops::Const(root.WithOpName("a"), Input::Initializer(0.0));
Output b = ops::Add(root.WithOpName("b").WithAssignedDevice(kGPU1), a, a);
Output c = ops::Add(root.WithOpName("c"), b, b);
FixupSourceAndSinkEdges(root.graph());
EXPECT_FALSE(IsSingleGpuGraph(*root.graph()));
}
absl::StatusOr<std::vector<string>> GetNodesRelatedToRefVarsSorted(
const Scope& scope, FunctionLibraryDefinition* flib_def = nullptr) {
FunctionDefLibrary flib;
FunctionLibraryDefinition flib_def_local(OpRegistry::Global(), flib);
if (flib_def == nullptr) {
flib_def = &flib_def_local;
}
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_RETURN_IF_ERROR(scope.ToGraph(graph.get()));
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr(
new ProcessFunctionLibraryRuntime(
nullptr, Env::Default(), nullptr, TF_GRAPH_DEF_VERSION,
flib_def, OptimizerOptions{}));
FunctionLibraryRuntime* lib_runtime =
pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice);
TF_ASSIGN_OR_RETURN(absl::flat_hash_set<Node*> nodes_related_to_ref_vars,
GetNodesRelatedToRefVariables(*graph, lib_runtime));
std::vector<string> names;
absl::c_transform(nodes_related_to_ref_vars, std::back_inserter(names),
[](Node* n) { return n->name(); });
absl::c_sort(names);
return names;
}
void CreateSubgraphTouchingRefVar(const Scope& s) {
Output variable =
ops::Variable(s.WithOpName("variable"), PartialTensorShape{}, DT_FLOAT);
Output read = ops::Identity(s.WithOpName("read_ref_var"), variable);
Output neg = ops::Negate(s.WithOpName("negate_ref"), read);
Output add = ops::Add(s.WithOpName("add_ref"), neg, neg);
Output constant =
ops::Const(s.WithOpName("constant_ref"), Input::Initializer(0.0));
s.graph()->AddControlEdge(constant.node(), variable.node());
}
void CreateSubgraphNotTouchingRefVar(const Scope& s) {
Output constant =
ops::Const(s.WithOpName("constant_normal"), Input::Initializer(0.0));
Output neg = ops::Negate(s.WithOpName("negate_normal"), constant);
Output add = ops::Add(s.WithOpName("add_normal"), neg, neg);
}
void CreateSubgraphCallingFunctionWithRefVar(const Scope& s) {
NameAttrList ref_float_function;
ref_float_function.set_name("RefFloatFn");
ops::PartitionedCall call(s.WithOpName("RefFloat"), {absl::Span<Input>{}},
{DT_FLOAT}, ref_float_function);
Output constant =
ops::Const(s.WithOpName("constant_ref_pco"), Input::Initializer(0.0));
s.graph()->AddControlEdge(call.operation.node(), constant.node());
}
void CreateSubgraphCallingFunctionWithoutRefVar(const Scope& s) {
NameAttrList regular_float_function;
regular_float_function.set_name("RegularFloatFn");
ops::PartitionedCall call(s.WithOpName("RegularFloat"), {absl::Span<Input>{}},
{DT_FLOAT}, regular_float_function);
Output constant =
ops::Const(s.WithOpName("constant_normal_pco"), Input::Initializer(0.0));
s.graph()->AddControlEdge(call.operation.node(), constant.node());
}
void AddRefFunctionFunctionDef(FunctionDefLibrary* fdef_lib) {
FunctionDef make_ref_float = FunctionDefHelper::Define(
"RefFloatFn", {}, {"r:float"}, {},
{{{"var"},
"VariableV2",
{},
{{"dtype", DT_FLOAT}, {"shape", TensorShape({})}}},
{{"r"}, "Identity", {"var"}, {{"T", DT_FLOAT}}}});
*fdef_lib->add_function() = make_ref_float;
}
void AddRegularFunctionFunctionDef(FunctionDefLibrary* fdef_lib) {
Tensor seven(DT_FLOAT, {});
seven.scalar<float>()() = 7;
FunctionDef make_regular_float = FunctionDefHelper::Define(
"RegularFloatFn", {}, {"r:float"}, {},
{{{"r"}, "Const", {}, {{"dtype", DT_FLOAT}, {"value", seven}}}});
*fdef_lib->add_function() = make_regular_float;
}
TEST(NodesRelatedToRefVariables, Basic) {
Scope root = Scope::NewRootScope().ExitOnError();
FunctionDefLibrary fdef_lib;
CreateSubgraphTouchingRefVar(root);
CreateSubgraphNotTouchingRefVar(root);
AddRefFunctionFunctionDef(&fdef_lib);
CreateSubgraphCallingFunctionWithRefVar(root);
AddRegularFunctionFunctionDef(&fdef_lib);
CreateSubgraphCallingFunctionWithoutRefVar(root);
FunctionLibraryDefinition flib_def(OpRegistry::Global(), fdef_lib);
TF_ASSERT_OK_AND_ASSIGN(std::vector<string> names,
GetNodesRelatedToRefVarsSorted(root, &flib_def));
std::vector<string> expected({
"RefFloat",
"add_ref",
"constant_ref",
"constant_ref_pco",
"negate_ref",
"read_ref_var",
"variable",
});
EXPECT_EQ(names, expected);
}
Status MakeLoop(Scope s, Output init_value, absl::string_view loop_name) {
s = s.NewSubScope(std::string(loop_name));
ops::internal::Enter enter(s.WithOpName("init_value"), init_value, loop_name);
ops::Merge merge(s.WithOpName("merge"), {init_value, init_value});
Output next_iteration =
ops::NextIteration(s.WithOpName("next_itr"), merge.output);
return s.graph()->UpdateEdge(next_iteration.node(), 0, merge.output.node(),
1);
}
TEST(NodesRelatedToRefVariables, Cycles) {
Scope root = Scope::NewRootScope().ExitOnError();
Output variable = ops::Variable(root.WithOpName("variable"),
PartialTensorShape{}, DT_FLOAT);
TF_ASSERT_OK(
MakeLoop(root, ops::Identity(root.WithOpName("read_ref_var"), variable),
"ref_loop"));
TF_ASSERT_OK(MakeLoop(
root, ops::Const(root.WithOpName("constant"), Input::Initializer(0.0)),
"normal_loop"));
TF_ASSERT_OK_AND_ASSIGN(std::vector<string> names,
GetNodesRelatedToRefVarsSorted(root));
std::vector<string> expected({"read_ref_var", "ref_loop/init_value",
"ref_loop/merge", "ref_loop/next_itr",
"variable"});
EXPECT_EQ(names, expected);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/xla_cluster_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/xla_cluster_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b00eb5a1-792c-4449-99f0-8105011eaef0 | cpp | tensorflow/tensorflow | encapsulate_util | tensorflow/compiler/jit/encapsulate_util.cc | tensorflow/compiler/jit/encapsulate_util_test.cc | #include "tensorflow/compiler/jit/encapsulate_util.h"
#include <algorithm>
#include <iterator>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/types/optional.h"
#include "tensorflow/compiler/jit/shape_inference.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
using tsl::StatusOr;
namespace tensorflow {
namespace {
std::optional<string> GetStringAttr(const Node& n, const string& attr_name) {
auto attr = n.attrs().Find(attr_name);
if (!attr) {
return std::nullopt;
} else {
return attr->s();
}
}
template <typename T>
Status AppendToListAttr(Node* n, const string& attr_name, const string& value) {
std::vector<T> attr_value;
Status s = GetNodeAttr(n->attrs(), attr_name, &attr_value);
if (!s.ok() && s.code() != error::NOT_FOUND) {
return s;
}
n->ClearAttr(attr_name);
attr_value.push_back(value);
n->AddAttr(attr_name, attr_value);
return absl::OkStatus();
}
template <typename T>
void ReplaceAttr(Node* n, const string& attr_name, const T& value) {
n->ClearAttr(attr_name);
n->AddAttr(attr_name, value);
}
Status PreprocessControlEdgesBetweenOutsideCompilations(
Graph* g, const string& outside_compilation_attr_name) {
std::vector<const Edge*> edges_to_remove;
for (const Edge* e : g->edges()) {
if (!e->IsControlEdge()) {
continue;
}
auto src_outside_compilation =
GetStringAttr(*e->src(), outside_compilation_attr_name);
auto dst_outside_compilation =
GetStringAttr(*e->dst(), outside_compilation_attr_name);
if (src_outside_compilation && dst_outside_compilation) {
if (*src_outside_compilation != *dst_outside_compilation) {
edges_to_remove.push_back(e);
TF_RETURN_IF_ERROR(AppendToListAttr<string>(
e->dst(), kXlaControlDependenciesWithinXlaClusterAttrName,
e->src()->name()));
}
} else if (src_outside_compilation && !dst_outside_compilation) {
ReplaceAttr(e->src(), kXlaConnectedToXlaComputationAttrName, true);
} else if (!src_outside_compilation && dst_outside_compilation) {
ReplaceAttr(e->dst(), kXlaConnectedFromXlaComputationAttrName, true);
}
}
for (auto e : edges_to_remove) {
g->RemoveEdge(e);
}
return absl::OkStatus();
}
Status PreprocessDataEdgesBetweenOutsideCompilations(
Graph* g, const string& outside_compilation_attr_name) {
struct EdgeInfo {
int dst_input, dst_node_id;
};
std::vector<EdgeInfo> edges;
for (const Edge* e : g->edges()) {
if (e->IsControlEdge()) {
continue;
}
auto src_outside_compilation =
GetStringAttr(*e->src(), outside_compilation_attr_name);
auto dst_outside_compilation =
GetStringAttr(*e->dst(), outside_compilation_attr_name);
if (src_outside_compilation && dst_outside_compilation &&
*src_outside_compilation != *dst_outside_compilation) {
edges.push_back(EdgeInfo{e->dst_input(), e->dst()->id()});
VLOG(4) << "Oc -> oc edge: " << e->DebugString();
}
}
std::map<std::pair<string, int>, Node*> placeholders;
for (int i = 0, end = edges.size(); i < end; i++) {
Node* dst = g->FindNodeId(edges[i].dst_node_id);
const Edge* e;
TF_RETURN_IF_ERROR(dst->input_edge(edges[i].dst_input, &e));
Node* src = e->src();
int src_output = e->src_output(), dst_input = e->dst_input();
g->RemoveEdge(e);
string new_name =
absl::StrCat(src->name(), "_oc_to_oc_placeholder_", src_output);
auto placeholder_index = std::make_pair(src->name(), src_output);
auto iter = placeholders.find(placeholder_index);
Node* placeholder_node;
if (iter == placeholders.end()) {
NodeDefBuilder placeholder_builder(new_name, "Placeholder");
placeholder_builder.Attr("dtype", src->output_type(src_output));
string outside_compilation_attr;
TF_RETURN_IF_ERROR(GetNodeAttr(dst->attrs(),
outside_compilation_attr_name,
&outside_compilation_attr));
placeholder_builder.Attr(outside_compilation_attr_name,
outside_compilation_attr);
placeholder_builder.Attr(kOutsideCompilationOriginalNodeAttrName,
src->name());
placeholder_builder.Attr(kOutsideCompilationSrcOutputAttrName,
src_output);
NodeDef placeholder_def;
TF_RETURN_IF_ERROR(placeholder_builder.Finalize(&placeholder_def));
TF_ASSIGN_OR_RETURN(placeholder_node, g->AddNode(placeholder_def));
placeholders[placeholder_index] = placeholder_node;
} else {
placeholder_node = iter->second;
}
g->AddEdge(placeholder_node, 0, dst, dst_input);
NodeDef new_def = dst->def();
*new_def.mutable_input(dst_input) = placeholder_node->name();
TF_ASSIGN_OR_RETURN(Node * dst_replace_node, ReplaceNode(g, dst, new_def));
for (int j = i + 1, end = edges.size(); j < end; j++) {
if (edges[j].dst_node_id == edges[i].dst_node_id) {
edges[j].dst_node_id = dst_replace_node->id();
}
}
}
return absl::OkStatus();
}
Status PostprocessDataEdgesBetweenOutsideCompilations(
Graph* g, const string& outside_compilation_attr_name) {
std::vector<Node*> placeholder_nodes;
for (Node* n : g->nodes()) {
if (n->type_string() == "Placeholder" &&
HasNodeAttr(n->def(), kOutsideCompilationOriginalNodeAttrName)) {
placeholder_nodes.push_back(n);
}
}
auto node_name_index = g->BuildNodeNameIndex();
for (auto n : placeholder_nodes) {
string node_name;
int node_src_output;
TF_RETURN_IF_ERROR(GetNodeAttr(
n->attrs(), kOutsideCompilationOriginalNodeAttrName, &node_name));
TF_RETURN_IF_ERROR(GetNodeAttr(
n->attrs(), kOutsideCompilationSrcOutputAttrName, &node_src_output));
auto iter = node_name_index.find(node_name);
if (iter == node_name_index.end()) {
return errors::Internal(
"Cannot find original node for oc -> host placeholder node ",
node_name);
}
Node* original_node = iter->second;
std::vector<const Edge*> control_edges;
std::vector<OutEdgeInfo> data_edges;
for (auto e : n->out_edges()) {
if (e->IsControlEdge()) {
control_edges.push_back(e);
} else {
data_edges.push_back({e->dst(), e->src_output(), e->dst_input()});
}
}
for (const Edge* e : control_edges) {
g->AddControlEdge(original_node, e->dst());
g->RemoveEdge(e);
}
for (int i = 0, end = data_edges.size(); i < end; i++) {
Node* dst = data_edges[i].dst;
NodeDef new_def = dst->def();
int dst_input = data_edges[i].dst_input;
*new_def.mutable_input(dst_input) =
absl::StrCat(original_node->name(), ":", node_src_output);
TF_ASSIGN_OR_RETURN(Node * replace_node, ReplaceNode(g, dst, new_def));
const Edge* edge_to_replace = nullptr;
TF_RETURN_IF_ERROR(replace_node->input_edge(dst_input, &edge_to_replace));
g->RemoveEdge(edge_to_replace);
g->AddEdge(original_node, node_src_output, replace_node, dst_input);
for (int j = i + 1, end = data_edges.size(); j < end; j++) {
if (data_edges[j].dst == dst) {
data_edges[j].dst = replace_node;
}
}
node_name_index[replace_node->name()] = replace_node;
}
g->RemoveNode(n);
}
return absl::OkStatus();
}
Status PostprocessControlEdgesBetweenOutsideCompilations(
Graph* g, const string& outside_compilation_attr_name) {
auto node_name_index = g->BuildNodeNameIndex();
for (Node* n : g->nodes()) {
std::vector<string> control_deps;
Status s =
GetNodeAttr(n->attrs(), kXlaControlDependenciesWithinXlaClusterAttrName,
&control_deps);
if (!s.ok()) {
if (s.code() != error::NOT_FOUND) {
return s;
} else {
continue;
}
} else {
n->ClearAttr(kXlaControlDependenciesWithinXlaClusterAttrName);
for (const string& control_input : control_deps) {
auto iter = node_name_index.find(control_input);
if (iter == node_name_index.end()) {
return errors::Internal("Cannot find original node for ",
control_input);
}
g->AddControlEdge(iter->second, n);
}
}
}
return absl::OkStatus();
}
}
const char kXlaInferredShapesAttrName[] = "_xla_inferred_shapes";
const char kXlaConnectedToXlaComputationAttrName[] =
"_xla_connected_to_xla_computation";
const char kXlaConnectedFromXlaComputationAttrName[] =
"_xla_connected_from_xla_computation";
const char kOutsideCompilationOriginalNodeAttrName[] =
"_xla_oc_to_oc_node_name";
const char kOutsideCompilationSrcOutputAttrName[] = "_xla_oc_to_oc_src_output";
const char kXlaControlDependenciesWithinXlaClusterAttrName[] =
"_xla_control_dependencies_within_xla_cluster";
const char kXlaIsLiftedArgAttrName[] = "_xla_is_lifted_arg";
const char kXlaLiftedArgOutsideCompilationAttrName[] = "_xla_lifted_arg_oc";
const char kXlaOutsideCompilationInputsAttrName[] = "_xla_oc_inputs";
const char kXlaIsPlaceholderForArg[] = "_xla_is_placeholder_for_arg";
Status PerformStaticShapeInferenceBeforeEncapsulation(Graph* g) {
std::map<int, InferredShape> arg_shapes;
GraphShapeInfo shape_info;
TF_RETURN_IF_ERROR(
InferShapes(g, arg_shapes, nullptr, &shape_info));
auto node_name_index = g->BuildNodeNameIndex();
for (auto iter : shape_info) {
std::vector<PartialTensorShape> output_shapes;
std::transform(iter.second.begin(), iter.second.end(),
std::back_inserter(output_shapes),
[](const InferredShape& inferred_shape) {
return inferred_shape.shape;
});
Node* n = node_name_index[iter.first];
n->AddAttr(kXlaInferredShapesAttrName, output_shapes);
}
return absl::OkStatus();
}
absl::StatusOr<
std::unique_ptr<absl::flat_hash_map<string, std::vector<string>>>>
OutsideCompilationClusterDependencies(
const Graph* g, const string& outside_compilation_attr_name) {
auto cluster_deps = std::make_unique<
absl::flat_hash_map<string, absl::flat_hash_set<string>>>();
for (const Edge* e : g->edges()) {
auto src_outside_compilation =
GetStringAttr(*e->src(), outside_compilation_attr_name);
auto dst_outside_compilation =
GetStringAttr(*e->dst(), outside_compilation_attr_name);
if (src_outside_compilation && dst_outside_compilation &&
*src_outside_compilation != *dst_outside_compilation) {
auto dst_deps_it = cluster_deps->find(*dst_outside_compilation);
if (dst_deps_it == cluster_deps->end()) {
cluster_deps->insert(std::make_pair(
*dst_outside_compilation,
absl::flat_hash_set<string>({*src_outside_compilation})));
} else {
dst_deps_it->second.insert(*src_outside_compilation);
}
}
}
auto cluster_deps_ordered =
std::make_unique<absl::flat_hash_map<string, std::vector<string>>>();
for (auto it = cluster_deps->begin(); it != cluster_deps->end(); it++) {
std::vector<string> ordered_deps(it->second.begin(), it->second.end());
std::sort(ordered_deps.begin(), ordered_deps.end());
cluster_deps_ordered->insert(std::make_pair(it->first, ordered_deps));
}
return std::move(cluster_deps_ordered);
}
Status PreprocessEdgesBetweenOutsideCompilations(
Graph* g, const string& outside_compilation_attr_name) {
std::vector<const Edge*> edges_to_remove;
for (const Edge* e : g->source_node()->out_edges()) {
if (HasNodeAttr(e->dst()->def(), outside_compilation_attr_name)) {
edges_to_remove.push_back(e);
}
}
for (const Edge* e : g->sink_node()->in_edges()) {
if (HasNodeAttr(e->src()->def(), outside_compilation_attr_name)) {
edges_to_remove.push_back(e);
}
}
for (auto e : edges_to_remove) {
g->RemoveEdge(e);
}
TF_RETURN_IF_ERROR(PreprocessControlEdgesBetweenOutsideCompilations(
g, outside_compilation_attr_name));
TF_RETURN_IF_ERROR(PreprocessDataEdgesBetweenOutsideCompilations(
g, outside_compilation_attr_name));
return absl::OkStatus();
}
Status PostprocessEdgesBetweenOutsideCompilations(
Graph* g, const string& outside_compilation_attr_name) {
TF_RETURN_IF_ERROR(PostprocessDataEdgesBetweenOutsideCompilations(
g, outside_compilation_attr_name));
TF_RETURN_IF_ERROR(PostprocessControlEdgesBetweenOutsideCompilations(
g, outside_compilation_attr_name));
return absl::OkStatus();
}
} | #include "tensorflow/compiler/jit/encapsulate_util.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(PerformStaticShapeInferenceBeforeEncapsulationTest, Basic) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output const_0 = ops::Const(s.WithOpName("const_0"), 1, {2});
Output const_1 = ops::Const(s.WithOpName("const_1"), 2, {2});
Output add = ops::Add(s.WithOpName("add"), const_0, const_1);
Output identity = ops::Identity(s.WithOpName("identity"), add);
Graph g(OpRegistry::Global());
TF_CHECK_OK(s.ToGraph(&g));
TF_CHECK_OK(PerformStaticShapeInferenceBeforeEncapsulation(&g));
auto node_index = g.BuildNodeNameIndex();
Node *add_node = node_index["add"];
std::vector<PartialTensorShape> output_shapes;
TF_CHECK_OK(GetNodeAttr(add_node->attrs(), kXlaInferredShapesAttrName,
&output_shapes));
EXPECT_EQ(output_shapes.size(), 1);
TensorShapeProto shape_proto;
output_shapes[0].AsProto(&shape_proto);
EXPECT_EQ(shape_proto.dim_size(), 1);
EXPECT_EQ(shape_proto.dim(0).size(), 2);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/encapsulate_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/encapsulate_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8e50f42a-d4e9-4044-b05a-302eaf7a972f | cpp | tensorflow/tensorflow | codegen | tensorflow/compiler/aot/codegen.cc | tensorflow/compiler/aot/codegen_test.cc | #include "tensorflow/compiler/aot/codegen.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/str_split.h"
#include "absl/strings/substitute.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/aot/embedded_protocol_buffers.h"
#include "tensorflow/compiler/tf2xla/tf2xla.pb.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "xla/cpu_function_runtime.h"
#include "xla/service/compiler.h"
#include "xla/service/cpu/buffer_info_util.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
namespace tfcompile {
namespace {
using BufferInfo = xla::cpu_function_runtime::BufferInfo;
bool IsAlpha(char c) {
return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z');
}
bool IsAlphaNum(char c) { return IsAlpha(c) || (c >= '0' && c <= '9'); }
Status XLATypeToCpp(xla::PrimitiveType type, string* str) {
switch (type) {
case xla::PRED:
*str = "bool";
break;
case xla::S8:
*str = "tensorflow::int8";
break;
case xla::S16:
*str = "tensorflow::int16";
break;
case xla::S32:
*str = "tensorflow::int32";
break;
case xla::S64:
*str = "int64_t";
break;
case xla::U8:
*str = "tensorflow::uint8";
break;
case xla::U16:
*str = "tensorflow::uint16";
break;
case xla::U32:
*str = "tensorflow::uint32";
break;
case xla::U64:
*str = "tensorflow::uint64";
break;
case xla::F32:
*str = "float";
break;
case xla::F64:
*str = "double";
break;
default:
return errors::Unimplemented("XLA type ", xla::PrimitiveType_Name(type),
" has no equivalent in C++");
}
return absl::OkStatus();
}
size_t TotalBufferBytes(const std::vector<BufferInfo>& buffer_infos) {
return std::accumulate(buffer_infos.begin(), buffer_infos.end(), size_t{0},
[](size_t size, const BufferInfo& buffer_info) {
return size + buffer_info.size();
});
}
std::vector<BufferInfo> ExtractEntryParamBufferInfos(
const std::vector<BufferInfo>& buffer_infos) {
std::vector<BufferInfo> result;
std::copy_if(buffer_infos.begin(), buffer_infos.end(),
std::back_inserter(result), [](const BufferInfo& buffer_info) {
return buffer_info.is_entry_parameter();
});
return result;
}
std::vector<BufferInfo> ExtractTempBufferInfos(
const std::vector<BufferInfo>& buffer_infos) {
std::vector<BufferInfo> result;
std::copy_if(buffer_infos.begin(), buffer_infos.end(),
std::back_inserter(result), [](const BufferInfo& buffer_info) {
return buffer_info.is_temp_buffer();
});
return result;
}
Status AddRewritesForShape(int i, const xla::Shape& shape,
std::vector<std::pair<string, string>>* rewrites) {
string type;
TF_RETURN_IF_ERROR(XLATypeToCpp(shape.element_type(), &type));
std::vector<string> dim_vars;
string dim_sizes, indices;
int count = 1;
if (shape.rank() == 0 ||
(shape.dimensions_size() == 1 && shape.dimensions(0) == 1)) {
dim_sizes = "[1]";
indices = "[0]";
} else {
for (int dim = 0; dim < shape.dimensions_size(); ++dim) {
dim_vars.push_back(absl::StrCat("size_t dim", dim));
dim_sizes += absl::StrCat("[", shape.dimensions(dim), "]");
indices += absl::StrCat("[dim", dim, "]");
count *= shape.dimensions(dim);
}
}
rewrites->push_back({"{{I}}", absl::StrCat(i)});
rewrites->push_back({"{{TYPE}}", type});
rewrites->push_back({"{{DIM_VARS}}", absl::StrJoin(dim_vars, ", ")});
rewrites->push_back({"{{DIM_SIZES}}", dim_sizes});
rewrites->push_back({"{{INDICES}}", indices});
rewrites->push_back({"{{COUNT}}", absl::StrCat(count)});
return absl::OkStatus();
}
string RewriteWithName(const string& name, string code,
const std::vector<std::pair<string, string>>& rewrites) {
absl::StrReplaceAll(rewrites, &code);
absl::StrReplaceAll({{"{{NAME}}", name}}, &code);
return code;
}
Status GenArgMethods(const tf2xla::Config& config,
const xla::ProgramShapeProto& ps,
const CompileResult& compile_result, string* methods) {
const int num_args = ps.parameters_size();
if (config.feed_size() + config.variable_size() < num_args) {
return errors::InvalidArgument(
"mismatch between feed_size(", config.feed_size(), ")+variable_size(",
config.variable_size(), ") and num_args(", num_args, ")");
}
for (int i = 0; i < config.feed_size(); ++i) {
std::vector<std::pair<string, string>> rewrites;
TF_RETURN_IF_ERROR(
AddRewritesForShape(i, xla::Shape(ps.parameters(i)), &rewrites));
const string code = R"(
void set_arg{{NAME}}_data(const void* data) {
set_arg_data({{I}}, data);
}
{{TYPE}}* arg{{NAME}}_data() {
return static_cast<{{TYPE}}*>(arg_data({{I}}));
}
{{TYPE}}& arg{{NAME}}({{DIM_VARS}}) {
return (*static_cast<{{TYPE}}(*){{DIM_SIZES}}>(
arg_data({{I}}))){{INDICES}};
}
const {{TYPE}}* arg{{NAME}}_data() const {
return static_cast<const {{TYPE}}*>(arg_data({{I}}));
}
const {{TYPE}}& arg{{NAME}}({{DIM_VARS}}) const {
return (*static_cast<const {{TYPE}}(*){{DIM_SIZES}}>(
arg_data({{I}}))){{INDICES}};
}
int arg{{NAME}}_size() const {
return {{COUNT}} * sizeof({{TYPE}});
}
int arg{{NAME}}_count() const {
return {{COUNT}};
}
)";
*methods += RewriteWithName(absl::StrCat(i), code, rewrites);
if (!config.feed(i).name().empty()) {
*methods += RewriteWithName("_" + config.feed(i).name(), code, rewrites);
}
}
return absl::OkStatus();
}
Status GenResultMethods(const tf2xla::Config& config,
const xla::ProgramShapeProto& ps, string* methods) {
if (ps.result().element_type() != xla::TUPLE) {
return errors::Internal("codegen requires the XLA result to be a tuple");
}
size_t num_results = ps.result().tuple_shapes_size();
int readonly_variables = absl::c_count_if(
config.variable(),
[](const tf2xla::Variable& var) { return var.readonly(); });
const int actual_num_results =
config.fetch_size() + config.variable_size() - readonly_variables;
if (actual_num_results != num_results) {
return errors::InvalidArgument("mismatch between fetch_size(",
config.fetch_size(), ")+variable_size(",
config.variable_size(), ") and tuple_size(",
ps.result().tuple_shapes_size(), ")");
}
for (int i = 0; i < config.fetch_size(); ++i) {
std::vector<std::pair<string, string>> rewrites;
TF_RETURN_IF_ERROR(AddRewritesForShape(
i, xla::Shape(ps.result().tuple_shapes(i)), &rewrites));
string code = R"(
{{TYPE}}* result{{NAME}}_data() {
return static_cast<{{TYPE}}*>(result_data({{I}}));
}
{{TYPE}}& result{{NAME}}({{DIM_VARS}}) {
return (*static_cast<{{TYPE}}(*){{DIM_SIZES}}>(
result_data({{I}}))){{INDICES}};
}
const {{TYPE}}* result{{NAME}}_data() const {
return static_cast<const {{TYPE}}*>(result_data({{I}}));
}
const {{TYPE}}& result{{NAME}}({{DIM_VARS}}) const {
return (*static_cast<const {{TYPE}}(*){{DIM_SIZES}}>(
result_data({{I}}))){{INDICES}};
}
int result{{NAME}}_size() const {
return {{COUNT}} * sizeof({{TYPE}});
}
int result{{NAME}}_count() const {
return {{COUNT}};
}
)";
*methods += RewriteWithName(absl::StrCat(i), code, rewrites);
if (!config.fetch(i).name().empty()) {
*methods += RewriteWithName("_" + config.fetch(i).name(), code, rewrites);
}
}
return absl::OkStatus();
}
Status GenVariableMethods(const tf2xla::Config& config,
const xla::ProgramShapeProto& ps, string* methods) {
const int num_args = ps.parameters_size();
for (int i = config.feed_size(); i < num_args; ++i) {
std::vector<std::pair<string, string>> rewrites;
TF_RETURN_IF_ERROR(
AddRewritesForShape(i, xla::Shape(ps.parameters(i)), &rewrites));
const string code = R"(
void set_var_{{NAME}}_data({{MAYBE_CONST}}{{TYPE}}* data) {
set_arg_data({{I}}, data);
}
{{MAYBE_CONST}}{{TYPE}}* var_{{NAME}}_data() {
return static_cast<{{MAYBE_CONST}}{{TYPE}}*>(arg_data({{I}}));
}
{{MAYBE_CONST}}{{TYPE}}& var_{{NAME}}({{DIM_VARS}}) {
return (*static_cast<{{MAYBE_CONST}}{{TYPE}}(*){{DIM_SIZES}}>(
arg_data({{I}}))){{INDICES}};
}
const {{TYPE}}* var_{{NAME}}_data() const {
return static_cast<const {{TYPE}}*>(arg_data({{I}}));
}
const {{TYPE}}& var_{{NAME}}({{DIM_VARS}}) const {
return (*static_cast<const {{TYPE}}(*){{DIM_SIZES}}>(
arg_data({{I}}))){{INDICES}};
}
int var_{{NAME}}_size() const {
return {{COUNT}} * sizeof({{TYPE}});
}
int var_{{NAME}}_count() const {
return {{COUNT}};
}
)";
const tf2xla::Variable& var = config.variable(i - config.feed_size());
rewrites.emplace_back("{{MAYBE_CONST}}", var.readonly() ? "const " : "");
*methods += RewriteWithName(
var.name().empty() ? var.node_name() : var.name(), code, rewrites);
}
return absl::OkStatus();
}
Status GenArgShapeInfos(const xla::ProgramShapeProto& ps, string* infos) {
for (int i = 0; i < ps.parameters_size(); ++i) {
const xla::ShapeProto& shape = ps.parameters(i);
if (shape.element_type() == xla::TUPLE) {
return absl::InternalError(
absl::StrCat("parameter ", i,
": codegen requires XLA parameters to "
"be non-tuples."));
}
*infos += absl::Substitute(R"( static constexpr int32_t kArg$0Shapes[] = {
$1
};
)",
i,
shape.dimensions_size() > 0
? absl::StrJoin(shape.dimensions(), ", ")
: "-1");
}
*infos += R"( static const ShapeInfo* ArgShapeInfos() {
static constexpr ShapeInfo kArgShapeInfoTable[kNumArgs] = {
)";
for (int i = 0; i < ps.parameters_size(); ++i) {
const xla::ShapeProto& shape = ps.parameters(i);
*infos +=
absl::Substitute("{ kArg$0Shapes, $1 },\n", i, shape.dimensions_size());
}
*infos += R"( };
return kArgShapeInfoTable;
})";
return absl::OkStatus();
}
Status GenResultShapeInfos(const xla::ProgramShapeProto& ps, string* infos) {
if (ps.result().element_type() != xla::TUPLE) {
return absl::InternalError("codegen requires the XLA result to be a tuple");
}
for (int i = 0; i < ps.result().tuple_shapes_size(); ++i) {
const xla::ShapeProto& shape = ps.result().tuple_shapes(i);
*infos += absl::Substitute(
R"( static constexpr int32_t kResult$0Shapes[] = {
$1
};
)",
i,
shape.dimensions_size() > 0 ? absl::StrJoin(shape.dimensions(), ", ")
: "-1");
}
*infos += R"( static const ShapeInfo* ResultShapeInfos() {
static constexpr ShapeInfo kResultShapeInfoTable[kNumResults] = {
)";
for (int i = 0; i < ps.result().tuple_shapes_size(); ++i) {
const xla::ShapeProto& shape = ps.result().tuple_shapes(i);
*infos += absl::Substitute("{ kResult$0Shapes, $1 },\n", i,
shape.dimensions_size());
}
*infos += R"( };
return kResultShapeInfoTable;
})";
return absl::OkStatus();
}
template <typename T>
string GenNameToIndexCode(const T& entries, bool generate) {
if (!generate) {
return "{\n return nullptr;\n }";
}
int end = entries.size();
for (int i = entries.size() - 1; i >= 0; --i) {
if (!entries[i].name().empty()) {
break;
}
end = i;
}
string code = "{\n static const char* kNames[] = {";
for (int i = 0; i < end; ++i) {
if (i > 0) {
code += ", ";
}
code += "\"";
code += entries[i].name();
code += "\"";
}
if (end > 0) {
code += ", ";
}
code += "nullptr};\n return kNames;\n }";
return code;
}
Status ValidateFeedFetchCppNames(const tf2xla::Config& config) {
for (const tf2xla::Feed& feed : config.feed()) {
if (!feed.name().empty()) {
TF_RETURN_IF_ERROR(ValidateCppIdent(feed.name(), "feed name"));
}
}
for (const tf2xla::Fetch& fetch : config.fetch()) {
if (!fetch.name().empty()) {
TF_RETURN_IF_ERROR(ValidateCppIdent(fetch.name(), "fetch name"));
}
}
for (const tf2xla::Variable& variable : config.variable()) {
if (!variable.name().empty()) {
TF_RETURN_IF_ERROR(ValidateCppIdent(variable.name(), "variable name"));
} else {
TF_RETURN_IF_ERROR(
ValidateCppIdent(variable.node_name(), "variable name"));
}
}
return absl::OkStatus();
}
std::vector<string> BufferInfosToCppExpression(
const std::vector<BufferInfo>& buffer_infos) {
std::vector<string> buffer_infos_as_strings;
std::transform(buffer_infos.begin(), buffer_infos.end(),
std::back_inserter(buffer_infos_as_strings),
[](const BufferInfo& buffer_info) {
xla::cpu_function_runtime::EncodedBufferInfo encoded =
buffer_info.Encode();
auto param_to_str = [](uint32_t param) -> std::string {
return param == ~0U ? "~0U" : absl::StrCat(param, "U");
};
return absl::StrCat(
"::xla::cpu_function_runtime::BufferInfo("
"::xla::cpu_function_runtime::EncodedBufferInfo{",
encoded.packed_kind_and_size, "ULL, ",
param_to_str(encoded.entry_param_number), ", ",
param_to_str(encoded.result_param_number), "})");
});
return buffer_infos_as_strings;
}
Status CheckEqual(size_t a, size_t b, absl::string_view error_msg) {
if (a != b) {
return absl::InternalError(
absl::StrCat(error_msg, ". Expected ", a, ", got ", b, "."));
}
return absl::OkStatus();
}
}
Status GenerateHeader(const CodegenOpts& opts, const tf2xla::Config& config,
const CompileResult& compile_result,
const MetadataResult& metadata_result, string* header) {
TF_RETURN_IF_ERROR(ValidateConfig(config));
TF_RETURN_IF_ERROR(ValidateFeedFetchCppNames(config));
const int64_t result_index = compile_result.aot->result_buffer_index();
const std::vector<BufferInfo>& buffer_infos =
compile_result.aot->buffer_infos();
const std::vector<int32> arg_index_table =
::xla::cpu::CreateArgIndexTableFromBufferInfos(buffer_infos);
const std::vector<int32> result_index_table =
::xla::cpu::CreateResultIndexTableFromBufferInfos(buffer_infos);
std::vector<string> buffer_infos_as_strings =
BufferInfosToCppExpression(buffer_infos);
const int64_t buffer_infos_size = buffer_infos.size();
if (result_index < 0 || result_index >= buffer_infos_size) {
return errors::InvalidArgument("result index: ", result_index,
" is outside the range of temp sizes: [0,",
buffer_infos.size(), ")");
}
std::vector<BufferInfo> buffer_infos_for_args =
ExtractEntryParamBufferInfos(buffer_infos);
std::vector<BufferInfo> buffer_infos_for_temps =
ExtractTempBufferInfos(buffer_infos);
const xla::ProgramShapeProto& ps = compile_result.program_shape;
string methods_arg, methods_result, methods_variable;
TF_RETURN_IF_ERROR(GenArgMethods(config, ps, compile_result, &methods_arg));
TF_RETURN_IF_ERROR(GenResultMethods(config, ps, &methods_result));
TF_RETURN_IF_ERROR(GenVariableMethods(config, ps, &methods_variable));
string arg_shape_infos, result_shape_infos;
TF_RETURN_IF_ERROR(GenArgShapeInfos(ps, &arg_shape_infos));
TF_RETURN_IF_ERROR(
CheckEqual(ps.parameters_size(), arg_index_table.size(),
"Arg number mismatch, proto vs. arg_index_table"));
TF_RETURN_IF_ERROR(GenResultShapeInfos(ps, &result_shape_infos));
TF_RETURN_IF_ERROR(
CheckEqual(ps.result().tuple_shapes_size(), result_index_table.size(),
"Result number mismatch, proto vs. result_index_table"));
const size_t arg_bytes_aligned =
xla::cpu_function_runtime::AlignedBufferBytes(
buffer_infos_for_args.data(), buffer_infos_for_args.size(),
true);
const size_t arg_bytes_total = TotalBufferBytes(buffer_infos_for_args);
const size_t temp_bytes_aligned =
xla::cpu_function_runtime::AlignedBufferBytes(
buffer_infos_for_temps.data(), buffer_infos_for_temps.size(),
true);
const size_t temp_bytes_total = TotalBufferBytes(buffer_infos_for_temps);
string ns_start;
for (const string& n : opts.namespaces) {
ns_start += absl::StrCat("namespace ", n, " {\n");
}
ns_start += "\n";
string ns_end("\n");
for (int i = opts.namespaces.size() - 1; i >= 0; --i) {
const string& n = opts.namespaces[i];
ns_end += absl::StrCat("}
}
const string arg_names_code =
GenNameToIndexCode(config.feed(), opts.gen_name_to_index);
auto variable_copy = config.variable();
for (auto& var : variable_copy) {
if (var.name().empty()) {
var.set_name(var.node_name());
}
}
const string variable_names_code =
GenNameToIndexCode(variable_copy, opts.gen_name_to_index);
const string result_names_code =
GenNameToIndexCode(config.fetch(), opts.gen_name_to_index);
const string include_xla_data_proto =
opts.gen_program_shape
? R"(#include "xla/xla_data.pb.h")"
: "";
const string include_hlo_profile_printer_data_proto =
opts.gen_hlo_profile_printer_data
? R"(#include "xla/service/hlo_profile_printer_data.pb.h")"
: "";
const string assign_profile_counters_size =
opts.gen_hlo_profile_printer_data
? "set_static_data_profile_counters_size(data, "
"get_static_data_hlo_profile_printer_data(data)->"
"profile_counters_size());"
: "";
*header =
R"(
#ifndef TFCOMPILE_GENERATED_{{ENTRY}}_H_
#define TFCOMPILE_GENERATED_{{ENTRY}}_H_
{{INCLUDE_XLA_DATA_PROTO}}
{{INCLUDE_HLO_PROFILE_PRINTER_DATA_PROTO}}
#include "tensorflow/compiler/tf2xla/xla_compiled_cpu_function.h"
#include "tensorflow/core/platform/types.h"
namespace Eigen { struct ThreadPoolDevice; }
namespace xla { class ExecutableRunOptions; }
extern "C" void {{ENTRY}}(
void* result, const ::xla::ExecutableRunOptions* run_options,
const void** args, void** temps, XlaCustomCallStatus* status,
int64_t* profile_counters);
{{DECLS_FROM_OBJ_FILE}}
{{NS_START}}
class {{CLASS}} final : public tensorflow::XlaCompiledCpuFunction {
public:
static constexpr size_t kNumArgs = {{ARG_NUM}};
static constexpr size_t kNumResults = {{RESULT_NUM}};
static constexpr size_t kNumVariables = {{VARIABLE_NUM}};
static const ::int64_t ArgSize(::tensorflow::int32 index) {
return BufferInfos()[ArgIndexToBufferIndex()[index]].size();
}
static const tensorflow::XlaCompiledCpuFunction::StaticData& StaticData() {
static XlaCompiledCpuFunction::StaticData* kStaticData = [](){
XlaCompiledCpuFunction::StaticData* data =
new XlaCompiledCpuFunction::StaticData;
set_static_data_raw_function(data, {{ENTRY}});
set_static_data_buffer_infos(data, BufferInfos());
set_static_data_num_buffers(data, kNumBuffers);
set_static_data_result_index_table(data, ResultIndexToBufferIndex());
set_static_data_num_results(data, kNumResults);
set_static_data_arg_index_table(data, ArgIndexToBufferIndex());
set_static_data_num_args(data, kNumArgs);
set_static_data_num_variables(data, kNumVariables);
set_static_data_result_index(data, kResultIndex);
set_static_data_arg_shape_infos(data, ArgShapeInfos());
set_static_data_result_shape_infos(data, ResultShapeInfos());
set_static_data_arg_names(data, StaticArgNames());
set_static_data_variable_names(data, StaticVariableNames());
set_static_data_result_names(data, StaticResultNames());
set_static_data_program_shape(data, StaticProgramShape());
set_static_data_hlo_profile_printer_data(
data, StaticHloProfilePrinterData());
{{ASSIGN_PROFILE_COUNTERS_SIZE}}
return data;
}();
return *kStaticData;
}
{{CLASS}}(AllocMode alloc_mode =
AllocMode::ARGS_VARIABLES_RESULTS_PROFILES_AND_TEMPS)
: XlaCompiledCpuFunction(StaticData(), alloc_mode) {}
{{CLASS}}(const {{CLASS}}&) = delete;
{{CLASS}}& operator=(const {{CLASS}}&) = delete;
{{METHODS_ARG}}
{{METHODS_RESULT}}
{{METHODS_VARIABLE}}
private:
static constexpr size_t kNumBuffers = {{NUM_BUFFERS}};
static const ::xla::cpu_function_runtime::BufferInfo* BufferInfos() {
static const ::xla::cpu_function_runtime::BufferInfo
kBufferInfos[kNumBuffers] = {
{{BUFFER_INFOS_AS_STRING}}
};
return kBufferInfos;
}
static const ::tensorflow::int32* ResultIndexToBufferIndex() {
static constexpr ::tensorflow::int32 kResultIndexToBufferIndex[kNumResults] = {
{{RESULT_INDEX_TABLE}}
};
return kResultIndexToBufferIndex;
}
static const ::tensorflow::int32* ArgIndexToBufferIndex() {
static constexpr ::tensorflow::int32 kArgIndexToBufferIndex[kNumArgs] = {
{{ARG_INDEX_TABLE}}
};
return kArgIndexToBufferIndex;
}
static constexpr size_t kResultIndex = {{RESULT_INDEX}};
{{ARG_SHAPE_INFOS}};
{{RESULT_SHAPE_INFOS}};
static const char** StaticArgNames() {{ARG_NAMES_CODE}}
static const char** StaticVariableNames() {{VARIABLE_NAMES_CODE}}
static const char** StaticResultNames() {{RESULT_NAMES_CODE}}
static const ::xla::ProgramShapeProto* StaticProgramShape() {
static const ::xla::ProgramShapeProto* kShape = {{PROGRAM_SHAPE_SHIM_EXPRESSION}};
return kShape;
}
static const ::xla::HloProfilePrinterData* StaticHloProfilePrinterData() {
static const ::xla::HloProfilePrinterData* kHloProfilePrinterData =
{{HLO_PROFILE_PRINTER_DATA_SHIM_EXPRESSION}};
return kHloProfilePrinterData;
}
};
{{NS_END}}
#endif
)";
const std::vector<std::pair<string, string>> rewrites = {
{"{{ARG_BYTES_ALIGNED}}", absl::StrCat(arg_bytes_aligned)},
{"{{ARG_BYTES_TOTAL}}", absl::StrCat(arg_bytes_total)},
{"{{ARG_NAMES_CODE}}", arg_names_code},
{"{{ARG_NUM}}", absl::StrCat(arg_index_table.size())},
{"{{ARG_SHAPE_INFOS}}", arg_shape_infos},
{"{{VARIABLE_NUM}}", absl::StrCat(config.variable_size())},
{"{{ARG_INDEX_TABLE}}", absl::StrJoin(arg_index_table, ", ")},
{"{{RESULT_NUM}}", absl::StrCat(result_index_table.size())},
{"{{RESULT_INDEX_TABLE}}", absl::StrJoin(result_index_table, ", ")},
{"{{ASSIGN_PROFILE_COUNTERS_SIZE}}", assign_profile_counters_size},
{"{{CLASS}}", opts.class_name},
{"{{DECLS_FROM_OBJ_FILE}}",
absl::StrJoin(metadata_result.header_variable_decls, "\n")},
{"{{ENTRY}}", compile_result.entry_point},
{"{{HLO_PROFILE_PRINTER_DATA_SHIM_EXPRESSION}}",
metadata_result.hlo_profile_printer_data_access_shim},
{"{{INCLUDE_XLA_DATA_PROTO}}", include_xla_data_proto},
{"{{INCLUDE_HLO_PROFILE_PRINTER_DATA_PROTO}}",
include_hlo_profile_printer_data_proto},
{"{{METHODS_ARG}}\n", methods_arg},
{"{{METHODS_RESULT}}\n", methods_result},
{"{{METHODS_VARIABLE}}\n", methods_variable},
{"{{NS_END}}\n", ns_end},
{"{{NS_START}}\n", ns_start},
{"{{PROGRAM_SHAPE}}", xla::ShapeUtil::HumanString(xla::ProgramShape(ps))},
{"{{PROGRAM_SHAPE_SHIM_EXPRESSION}}",
metadata_result.program_shape_access_shim},
{"{{VARIABLE_NAMES_CODE}}", variable_names_code},
{"{{RESULT_INDEX}}", absl::StrCat(result_index)},
{"{{RESULT_NAMES_CODE}}", result_names_code},
{"{{RESULT_SHAPE_INFOS}}", result_shape_infos},
{"{{TEMP_BYTES_ALIGNED}}", absl::StrCat(temp_bytes_aligned)},
{"{{TEMP_BYTES_TOTAL}}", absl::StrCat(temp_bytes_total)},
{"{{NUM_BUFFERS}}", absl::StrCat(buffer_infos.size())},
{"{{BUFFER_INFOS_AS_STRING}}",
absl::StrJoin(buffer_infos_as_strings, ",\n")}};
absl::StrReplaceAll(rewrites, header);
return absl::OkStatus();
}
static string CreateUniqueIdentifier(const CodegenOpts& opts,
absl::string_view suffix) {
string result = "__tfcompile";
for (const string& n : opts.namespaces) {
absl::StrAppend(&result, "_", n);
}
absl::StrAppend(&result, "_", opts.class_name, "_", suffix);
return result;
}
Status GenerateMetadata(const CodegenOpts& opts,
const CompileResult& compile_result,
MetadataResult* metadata_result) {
std::unique_ptr<xla::ProgramShapeProto> program_shape;
if (opts.gen_program_shape) {
program_shape =
std::make_unique<xla::ProgramShapeProto>(compile_result.program_shape);
program_shape->clear_parameter_names();
}
ProtobufToEmbed program_shape_protobuf{
CreateUniqueIdentifier(opts, "ProgramShapeProto"),
"::xla::ProgramShapeProto", program_shape.get()};
ProtobufToEmbed hlo_profile_printer_data_protobuf{
CreateUniqueIdentifier(opts, "HloProfilePrinterData"),
"::xla::HloProfilePrinterData",
compile_result.aot->hlo_profile_printer_data()};
TF_ASSIGN_OR_RETURN(
EmbeddedProtocolBuffers embedded_protobufs,
CreateEmbeddedProtocolBuffers(
opts.target_triple,
{program_shape_protobuf, hlo_profile_printer_data_protobuf}));
metadata_result->program_shape_access_shim =
std::move(embedded_protobufs.cpp_shims[0].expression);
metadata_result->hlo_profile_printer_data_access_shim =
std::move(embedded_protobufs.cpp_shims[1].expression);
metadata_result->header_variable_decls.emplace_back(
std::move(embedded_protobufs.cpp_shims[0].variable_decl));
metadata_result->header_variable_decls.emplace_back(
std::move(embedded_protobufs.cpp_shims[1].variable_decl));
metadata_result->object_file_data =
std::move(embedded_protobufs.object_file_data);
return absl::OkStatus();
}
Status ParseCppClass(const string& cpp_class, string* class_name,
std::vector<string>* namespaces) {
class_name->clear();
namespaces->clear();
if (cpp_class.empty()) {
return errors::InvalidArgument("empty cpp_class: " + cpp_class);
}
std::vector<string> parts = absl::StrSplit(cpp_class, "::");
if (parts.front().empty()) {
parts.erase(parts.begin());
}
for (int i = 0, end = parts.size(); i < end; ++i) {
if (i < end - 1) {
TF_RETURN_IF_ERROR(ValidateCppIdent(
parts[i], "in namespace component of cpp_class: " + cpp_class));
namespaces->push_back(parts[i]);
} else {
TF_RETURN_IF_ERROR(ValidateCppIdent(
parts[i], "in class name of cpp_class: " + cpp_class));
*class_name = parts[i];
}
}
return absl::OkStatus();
}
Status ValidateCppIdent(absl::string_view ident, absl::string_view msg) {
if (ident.empty()) {
return errors::InvalidArgument("empty identifier: ", msg);
}
if (ident[0] != '_' && !IsAlpha(ident[0])) {
return errors::InvalidArgument("illegal leading char: ", msg);
}
for (size_t pos = 1; pos < ident.size(); ++pos) {
if (ident[pos] != '_' && !IsAlphaNum(ident[pos])) {
return errors::InvalidArgument("illegal char: ", msg);
}
}
return absl::OkStatus();
}
}
} | #include "tensorflow/compiler/aot/codegen.h"
#include <algorithm>
#include <string>
#include <vector>
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/TargetSelect.h"
#include "xla/cpu_function_runtime.h"
#include "xla/shape_util.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace tfcompile {
namespace {
using ::xla::cpu_function_runtime::BufferInfo;
void ExpectErrorContains(const Status& status, absl::string_view str) {
EXPECT_NE(absl::OkStatus(), status);
EXPECT_TRUE(absl::StrContains(status.message(), str))
<< "expected error: " << status.message() << " to contain: " << str;
}
TEST(ValidateCppIdent, Simple) {
TF_EXPECT_OK(ValidateCppIdent("a", ""));
TF_EXPECT_OK(ValidateCppIdent("abc", ""));
TF_EXPECT_OK(ValidateCppIdent("_abc", ""));
TF_EXPECT_OK(ValidateCppIdent("_abc123", ""));
string ident;
for (char c = 'a'; c <= 'z'; c++) {
ident.append(1, c);
}
for (char c = 'A'; c <= 'Z'; c++) {
ident.append(1, c);
}
for (char c = '0'; c <= '9'; c++) {
ident.append(1, c);
}
ident += "_";
TF_EXPECT_OK(ValidateCppIdent(ident, ""));
ExpectErrorContains(ValidateCppIdent("", ""), "empty identifier");
ExpectErrorContains(ValidateCppIdent(" ", ""), "illegal leading char");
ExpectErrorContains(ValidateCppIdent("0", ""), "illegal leading char");
ExpectErrorContains(ValidateCppIdent(".", ""), "illegal leading char");
ExpectErrorContains(ValidateCppIdent(":", ""), "illegal leading char");
ExpectErrorContains(ValidateCppIdent("a.", ""), "illegal char");
ExpectErrorContains(ValidateCppIdent("a:", ""), "illegal char");
ExpectErrorContains(ValidateCppIdent("a:", ""), "illegal char");
}
class ParseCppClassTest : public ::testing::Test {
protected:
void ExpectOK(const string& cpp_class, const string& want_class_name,
const std::vector<string>& want_namespaces) {
string class_name;
std::vector<string> namespaces;
TF_EXPECT_OK(ParseCppClass(cpp_class, &class_name, &namespaces));
EXPECT_EQ(class_name, want_class_name);
EXPECT_EQ(namespaces, want_namespaces);
}
void ExpectFail(const string& cpp_class) {
string class_name;
std::vector<string> namespaces;
EXPECT_NE(ParseCppClass(cpp_class, &class_name, &namespaces),
absl::OkStatus())
<< cpp_class;
}
};
TEST_F(ParseCppClassTest, ParseOK) {
ExpectOK("MyClass", "MyClass", {});
ExpectOK("_MyClass", "_MyClass", {});
ExpectOK("a::MyClass", "MyClass", {"a"});
ExpectOK("a::foo::MyClass", "MyClass", {"a", "foo"});
ExpectOK("a::foo::b::MyClass", "MyClass", {"a", "foo", "b"});
ExpectOK("a::foo::b::bar::MyClass", "MyClass", {"a", "foo", "b", "bar"});
ExpectOK("foo::MyClass", "MyClass", {"foo"});
ExpectOK("_foo::MyClass", "MyClass", {"_foo"});
ExpectOK("_foo::_MyClass", "_MyClass", {"_foo"});
ExpectOK("::foo::bar::MyClass", "MyClass", {"foo", "bar"});
ExpectOK("::_foo::MyClass", "MyClass", {"_foo"});
ExpectOK("::_foo::_MyClass", "_MyClass", {"_foo"});
string ident;
for (char c = 'a'; c <= 'z'; c++) {
ident.append(1, c);
}
for (char c = 'A'; c <= 'Z'; c++) {
ident.append(1, c);
}
for (char c = '0'; c <= '9'; c++) {
ident.append(1, c);
}
ident += "_";
ExpectOK(ident, ident, {});
ExpectOK(ident + "::" + ident, ident, {ident});
ExpectOK(ident + "::" + ident + "::" + ident, ident, {ident, ident});
}
TEST_F(ParseCppClassTest, ParseFail) {
ExpectFail("");
ExpectFail("::");
ExpectFail("0");
ExpectFail("a.b");
ExpectFail("a:b");
ExpectFail(":foo::bar");
ExpectFail("good::.bad");
ExpectFail("good:::bad");
ExpectFail("good::bad::");
ExpectFail("good::::bad");
ExpectFail("::::bad");
ExpectFail("good:: bad");
ExpectFail("good::0bad");
}
static void CompareWithGoldenFile(
const string& tensorflow_relative_golden_file_name,
const string& expected_contents, bool ignore_cr) {
string sanitized_expected_contents(expected_contents);
if (ignore_cr) {
sanitized_expected_contents.erase(
std::remove(sanitized_expected_contents.begin(),
sanitized_expected_contents.end(), '\r'),
sanitized_expected_contents.end());
}
const bool update_golden = false;
string golden_file_name =
GetDataDependencyFilepath(tensorflow_relative_golden_file_name);
if (update_golden) {
TF_EXPECT_OK(
WriteStringToFile(Env::Default(), golden_file_name, expected_contents));
}
string golden_file_contents;
TF_ASSERT_OK(ReadFileToString(Env::Default(), golden_file_name,
&golden_file_contents));
if (ignore_cr) {
golden_file_contents.erase(std::remove(golden_file_contents.begin(),
golden_file_contents.end(), '\r'),
golden_file_contents.end());
}
EXPECT_EQ(golden_file_contents, expected_contents);
}
#if TF_LLVM_X86_AVAILABLE
TEST(CodegenTest, Golden) {
LLVMInitializeX86Target();
LLVMInitializeX86TargetInfo();
LLVMInitializeX86TargetMC();
LLVMInitializeX86AsmPrinter();
CodegenOpts opts;
opts.class_name = "MyClass";
opts.target_triple = "x86_64-pc-linux";
opts.namespaces = {"foo", "bar"};
opts.gen_name_to_index = true;
opts.gen_program_shape = true;
tf2xla::Config config;
tf2xla::Feed* feed = config.add_feed();
feed->mutable_id()->set_node_name("feed0");
feed->set_name("myfeed");
feed = config.add_feed();
feed->mutable_id()->set_node_name("feed1");
tf2xla::Fetch* fetch = config.add_fetch();
fetch->mutable_id()->set_node_name("fetch0");
fetch->set_name("myfetch");
tf2xla::Variable* variable = config.add_variable();
variable->set_node_name("myvar_readonly");
variable->mutable_shape()->add_dim()->set_size(1);
variable->set_type(DT_FLOAT);
variable->set_readonly(true);
tf2xla::Variable* variable2 = config.add_variable();
variable2->set_node_name("myvar");
variable2->mutable_shape()->add_dim()->set_size(1);
variable2->set_type(DT_FLOAT);
tf2xla::Variable* variable3 = config.add_variable();
variable3->set_node_name("my/var");
variable3->set_name("myvar2");
variable3->mutable_shape()->add_dim()->set_size(5);
variable3->set_type(DT_INT32);
CompileResult compile_result;
compile_result.aot.reset(new xla::cpu::CpuAotCompilationResult(
{},
{BufferInfo::MakeTempBuffer(3 * 8),
BufferInfo::MakeEntryParameter(8, 0),
BufferInfo::MakeTempBuffer(1),
BufferInfo::MakeEntryParameter(96, 1),
BufferInfo::MakeTempBuffer(1),
BufferInfo::MakeEntryParameter(96, 2),
BufferInfo::MakeTempBuffer(1),
BufferInfo::MakeEntryParameter(96, 3),
BufferInfo::MakeResultParameter(5 * 6 * 4,
0),
BufferInfo::MakeEntryParameter(96, 4),
BufferInfo::MakeResultParameter(1 * 4,
1),
BufferInfo::MakeResultParameter(5 * 4,
2)},
0, nullptr, {}));
compile_result.program_shape =
xla::ShapeUtil::MakeProgramShape(
{
xla::ShapeUtil::MakeShape(xla::F32, {1, 2}),
xla::ShapeUtil::MakeShape(xla::S64, {3, 4}),
xla::ShapeUtil::MakeShape(xla::F32, {1}),
xla::ShapeUtil::MakeShape(xla::F32, {1}),
xla::ShapeUtil::MakeShape(xla::S32, {5}),
},
xla::ShapeUtil::MakeTupleShape({
xla::ShapeUtil::MakeShape(xla::U32, {5, 6}),
xla::ShapeUtil::MakeShape(xla::F32, {1}),
xla::ShapeUtil::MakeShape(xla::S32, {5}),
}))
.ToProto();
compile_result.entry_point = "entry_point";
compile_result.pointer_size = 8;
MetadataResult metadata_result;
TF_ASSERT_OK(GenerateMetadata(opts, compile_result, &metadata_result));
CompareWithGoldenFile("tensorflow/compiler/aot/codegen_test_o.golden",
metadata_result.object_file_data, false);
string header;
TF_ASSERT_OK(
GenerateHeader(opts, config, compile_result, metadata_result, &header));
CompareWithGoldenFile("tensorflow/compiler/aot/codegen_test_h.golden", header,
true);
}
#endif
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/aot/codegen.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/aot/codegen_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
75b5b842-ea24-4b95-a9a0-6dbf9817814e | cpp | tensorflow/tensorflow | benchmark | tensorflow/compiler/aot/benchmark.cc | tensorflow/compiler/aot/benchmark_test.cc | #include "tensorflow/compiler/aot/benchmark.h"
#include <sys/time.h>
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace tfcompile {
namespace benchmark {
static uint64 NowMicros() {
struct timeval tv;
gettimeofday(&tv, nullptr);
return static_cast<uint64>(tv.tv_sec) * 1000000 + tv.tv_usec;
}
void DumpStatsToStdout(const Stats& stats) {
std::vector<int64_t> sorted_us(stats.per_iter_us);
std::sort(sorted_us.begin(), sorted_us.end());
const size_t count_us = sorted_us.size();
double sum_us = 0;
size_t count_us_trimmed = 0;
double sum_us_trimmed = 0;
size_t count_us_best = 0;
double sum_us_best = 0;
static constexpr float trim_ratio = 0.25;
static constexpr float best_ratio = 0.1;
const size_t count_trimmed = count_us * trim_ratio;
const size_t count_best = count_us * best_ratio;
for (size_t i = 0; i < sorted_us.size(); ++i) {
const int64_t us = sorted_us[i];
sum_us += us;
if (i >= count_trimmed && i < count_us - count_trimmed) {
sum_us_trimmed += us;
++count_us_trimmed;
}
if (i < count_best) {
sum_us_best += us;
++count_us_best;
}
}
const int kBufSize = 1000;
char buf[kBufSize];
snprintf(buf, kBufSize, "Mean with %2.0f%% trimmed:", trim_ratio * 100);
std::string label_trimmed(buf);
snprintf(buf, kBufSize, "Mean of %2.0f%% best:", best_ratio * 100);
std::string label_best(buf);
std::vector<std::pair<std::string, double>> groups = {
{"Best:", sorted_us.front()},
{"Worst:", sorted_us.back()},
{"Median:", sorted_us[count_us / 2]},
{"Mean:", sum_us / count_us},
{std::move(label_trimmed), sum_us_trimmed / count_us_trimmed},
{std::move(label_best), sum_us_best / count_us_best},
};
int max_label_size = 0;
double max_us = 0;
for (const auto& g : groups) {
if (g.first.size() > max_label_size) {
max_label_size = g.first.size();
}
if (g.second > max_us) {
max_us = g.second;
}
}
int max_digits = 1;
while (max_us >= 10.0) {
max_us /= 10.0;
++max_digits;
}
printf("Benchmark ran %zu iterations over %lld us\n", count_us,
static_cast<long long>(stats.total_us));
for (const auto& g : groups) {
printf(" %-*s %*.3f us\n", max_label_size, g.first.c_str(), max_digits + 4,
g.second);
}
}
void Benchmark(const Options& options, const BenchmarkFn& fn, Stats* stats) {
const int64_t max_us = (options.max_micros <= 0 && options.max_iters <= 0)
? Options::kDefaultMicros
: options.max_micros;
printf("Running benchmark for %lld us\n", static_cast<long long>(max_us));
const int64_t start_us = NowMicros();
int64_t iters = 0;
while (true) {
const int64_t iter_start_us = NowMicros();
fn();
const int64_t end_us = NowMicros();
stats->per_iter_us.push_back(end_us - iter_start_us);
const int64_t total_us = end_us - start_us;
++iters;
if ((max_us > 0 && total_us >= max_us) ||
(options.max_iters > 0 && iters >= options.max_iters)) {
stats->total_us = total_us;
break;
}
}
}
}
}
} | #include "tensorflow/compiler/aot/benchmark.h"
#include "tensorflow/compiler/aot/test_graph_tfadd.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace tfcompile {
namespace benchmark {
namespace {
TEST(Benchmark, Benchmark) {
AddComp add;
Options options;
options.max_iters = 1;
Stats stats1;
Benchmark(options, [&] { add.Run(); }, &stats1);
EXPECT_EQ(stats1.per_iter_us.size(), 1);
options.max_iters = 5;
Stats stats5;
Benchmark(options, [&] { add.Run(); }, &stats5);
EXPECT_EQ(stats5.per_iter_us.size(), 5);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/aot/benchmark.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/aot/benchmark_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d3090d1e-5324-455c-99cb-9831f60b4f72 | cpp | tensorflow/tensorflow | array4d | third_party/xla/xla/array4d.h | third_party/xla/xla/array4d_test.cc | #ifndef XLA_ARRAY4D_H_
#define XLA_ARRAY4D_H_
#include <algorithm>
#include <functional>
#include <initializer_list>
#include <iterator>
#include <memory>
#include <numeric>
#include <random>
#include <string>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/array2d.h"
#include "xla/types.h"
#include "tsl/platform/logging.h"
namespace xla {
template <typename T>
class Array4D : public Array<T> {
public:
Array4D() : Array<T>(std::vector<int64_t>{0, 0, 0, 0}) {}
Array4D(int64_t planes, int64_t depth, int64_t height, int64_t width)
: Array<T>(std::vector<int64_t>{planes, depth, height, width}) {}
Array4D(int64_t planes, int64_t depth, int64_t height, int64_t width, T value)
: Array<T>(std::vector<int64_t>{planes, depth, height, width}, value) {}
template <typename Container = std::initializer_list<T>>
Array4D(int64_t planes, int64_t depth, int64_t height, int64_t width,
const Container& values)
: Array4D(planes, depth, height, width) {
this->SetValues(values);
}
Array4D(std::initializer_list<std::initializer_list<
std::initializer_list<std::initializer_list<T>>>>
values)
: Array<T>(values) {}
template <typename T2, array_impl::overload_for_float<T, T2> = true>
Array4D(std::initializer_list<std::initializer_list<
std::initializer_list<std::initializer_list<T2>>>>
values)
: Array<T>(values) {}
int64_t n4() const { return this->dim(3); }
int64_t n3() const { return this->dim(2); }
int64_t n2() const { return this->dim(1); }
int64_t n1() const { return this->dim(0); }
int64_t width() const { return this->dim(3); }
int64_t height() const { return this->dim(2); }
int64_t depth() const { return this->dim(1); }
int64_t planes() const { return this->dim(0); }
void FillWithYX(const Array2D<T>& value) {
CHECK_EQ(value.height(), height());
CHECK_EQ(value.width(), width());
for (int64_t plane = 0; plane < planes(); ++plane) {
for (int64_t depth = 0; depth < this->depth(); ++depth) {
for (int64_t height = 0; height < this->height(); ++height) {
for (int64_t width = 0; width < this->width(); ++width) {
(*this)(plane, depth, height, width) = value(height, width);
}
}
}
}
}
void FillWithZY(const Array2D<T>& value) {
CHECK_EQ(value.height(), depth());
CHECK_EQ(value.width(), height());
for (int64_t plane = 0; plane < planes(); ++plane) {
for (int64_t depth = 0; depth < this->depth(); ++depth) {
for (int64_t height = 0; height < this->height(); ++height) {
for (int64_t width = 0; width < this->width(); ++width) {
(*this)(plane, depth, height, width) = value(depth, height);
}
}
}
}
}
void FillWithPZ(const Array2D<T>& value) {
CHECK_EQ(value.height(), planes());
CHECK_EQ(value.width(), depth());
for (int64_t height = 0; height < this->height(); ++height) {
for (int64_t width = 0; width < this->width(); ++width) {
for (int64_t plane = 0; plane < planes(); ++plane) {
for (int64_t depth = 0; depth < this->depth(); ++depth) {
(*this)(plane, depth, height, width) = value(plane, depth);
}
}
}
}
}
void FillWithMinorDimNum() {
LOG(INFO) << "width: " << this->width();
LOG(INFO) << "height: " << this->height();
LOG(INFO) << "depth: " << this->depth();
LOG(INFO) << "planes: " << this->planes();
for (int64_t height = 0; height < this->height(); ++height) {
for (int64_t width = 0; width < this->width(); ++width) {
for (int64_t plane = 0; plane < planes(); ++plane) {
for (int64_t depth = 0; depth < this->depth(); ++depth) {
float this_val = plane * this->depth() + depth;
(*this)(plane, depth, height, width) = this_val;
}
}
}
}
}
};
}
#endif | #include "xla/array4d.h"
#include <initializer_list>
#include <numeric>
#include <vector>
#include "absl/log/log.h"
#include "absl/types/span.h"
#include "Eigen/Core"
#include "xla/array2d.h"
#include "xla/test.h"
namespace xla {
namespace {
template <typename T>
int64_t Array4DLinearIndex(const Array4D<T>& arr,
absl::Span<const int64_t> idx) {
EXPECT_EQ(4, idx.size());
return (idx[3] + idx[2] * arr.n4() + idx[1] * arr.n3() * arr.n4() +
idx[0] * arr.n2() * arr.n3() * arr.n4());
}
TEST(Array4dTest, UninitializedDimsCtor) {
Array4D<int> empty(2, 3, 4, 5);
EXPECT_EQ(empty.n1(), 2);
EXPECT_EQ(empty.n2(), 3);
EXPECT_EQ(empty.n3(), 4);
EXPECT_EQ(empty.n4(), 5);
EXPECT_EQ(empty.num_elements(), 120);
}
TEST(Array4dTest, FillCtor) {
Array4D<int> fullof7(2, 3, 4, 5, 7);
EXPECT_EQ(fullof7.n1(), 2);
EXPECT_EQ(fullof7.n2(), 3);
EXPECT_EQ(fullof7.n3(), 4);
EXPECT_EQ(fullof7.n4(), 5);
fullof7.Each(
[](absl::Span<const int64_t> idx, int* cell) { EXPECT_EQ(*cell, 7); });
}
TEST(Array4dTest, ContainerCtor) {
std::vector<int> filler(120);
std::iota(filler.begin(), filler.end(), 0);
Array4D<int> arr(2, 3, 4, 5, filler);
EXPECT_EQ(arr.n1(), 2);
EXPECT_EQ(arr.n2(), 3);
EXPECT_EQ(arr.n3(), 4);
EXPECT_EQ(arr.n4(), 5);
arr.Each([&arr](absl::Span<const int64_t> idx, int* cell) {
EXPECT_EQ(*cell, Array4DLinearIndex(arr, idx));
});
}
TEST(Array3dTest, InitializerListCtor) {
Array4D<int> arr = {{{{1}, {2}}, {{3}, {4}}, {{5}, {6}}, {{7}, {8}}},
{{{9}, {10}}, {{11}, {12}}, {{13}, {14}}, {{15}, {16}}},
{{{17}, {18}}, {{19}, {20}}, {{21}, {22}}, {{23}, {24}}}};
EXPECT_EQ(arr.n1(), 3);
EXPECT_EQ(arr.n2(), 4);
EXPECT_EQ(arr.n3(), 2);
EXPECT_EQ(arr.n4(), 1);
EXPECT_EQ(arr.num_elements(), 24);
EXPECT_EQ(arr(0, 0, 0, 0), 1);
EXPECT_EQ(arr(0, 0, 1, 0), 2);
EXPECT_EQ(arr(0, 1, 0, 0), 3);
EXPECT_EQ(arr(0, 3, 1, 0), 8);
EXPECT_EQ(arr(1, 0, 0, 0), 9);
EXPECT_EQ(arr(1, 1, 1, 0), 12);
EXPECT_EQ(arr(2, 0, 0, 0), 17);
EXPECT_EQ(arr(2, 1, 1, 0), 20);
EXPECT_EQ(arr(2, 2, 0, 0), 21);
EXPECT_EQ(arr(2, 3, 1, 0), 24);
}
TEST(Array3dTest, InitializerListCtorHalf) {
Array4D<Eigen::half> arr = {
{{{1.0f}, {2.0f}}, {{3.0f}, {4.0f}}, {{5.0f}, {6.0f}}, {{7.0f}, {8.0f}}},
{{{9.0f}, {10.0f}},
{{11.0f}, {12.0f}},
{{13.0f}, {14.0f}},
{{15.0f}, {16.0f}}},
{{{17.0f}, {18.0f}},
{{19.0f}, {20.0f}},
{{21.0f}, {22.0f}},
{{23.0f}, {24.0f}}}};
EXPECT_EQ(arr.n1(), 3);
EXPECT_EQ(arr.n2(), 4);
EXPECT_EQ(arr.n3(), 2);
EXPECT_EQ(arr.n4(), 1);
EXPECT_EQ(arr.num_elements(), 24);
EXPECT_EQ(arr(0, 0, 0, 0), static_cast<Eigen::half>(1));
EXPECT_EQ(arr(0, 0, 1, 0), static_cast<Eigen::half>(2));
EXPECT_EQ(arr(0, 1, 0, 0), static_cast<Eigen::half>(3));
EXPECT_EQ(arr(0, 3, 1, 0), static_cast<Eigen::half>(8));
EXPECT_EQ(arr(1, 0, 0, 0), static_cast<Eigen::half>(9));
EXPECT_EQ(arr(1, 1, 1, 0), static_cast<Eigen::half>(12));
EXPECT_EQ(arr(2, 0, 0, 0), static_cast<Eigen::half>(17));
EXPECT_EQ(arr(2, 1, 1, 0), static_cast<Eigen::half>(20));
EXPECT_EQ(arr(2, 2, 0, 0), static_cast<Eigen::half>(21));
EXPECT_EQ(arr(2, 3, 1, 0), static_cast<Eigen::half>(24));
}
TEST(Array4dTest, Fill) {
Array4D<int> fullof7(2, 3, 4, 5, 7);
fullof7.Each(
[](absl::Span<const int64_t> idx, int* cell) { EXPECT_EQ(*cell, 7); });
fullof7.Fill(11);
fullof7.Each(
[](absl::Span<const int64_t> idx, int* cell) { EXPECT_EQ(*cell, 11); });
}
TEST(Array4dTest, FillWithMultiples) {
Array4D<float> arr(2, 3, 4, 5);
arr.FillWithMultiples(2.0f);
arr.Each([&arr](absl::Span<const int64_t> idx, float* cell) {
EXPECT_EQ(*cell, 2.0f * Array4DLinearIndex(arr, idx));
});
}
TEST(Array4dTest, FillRasterDimensionDepthOne) {
Array4D<float> array(1, 1, 128, 128);
Array2D<float> raster(128, 128);
for (int row = 0; row < 128; ++row) {
for (int col = 0; col < 128; ++col) {
raster(row, col) = row * 1000.0 + col;
}
}
array.FillWithYX(raster);
VLOG(1) << array.ToString();
EXPECT_FLOAT_EQ(raster(0, 0), array(0, 0, 0, 0));
EXPECT_FLOAT_EQ(raster(0, 1), array(0, 0, 0, 1));
EXPECT_FLOAT_EQ(raster(1, 0), array(0, 0, 1, 0));
EXPECT_FLOAT_EQ(raster(1, 1), array(0, 0, 1, 1));
EXPECT_FLOAT_EQ(raster(2, 0), array(0, 0, 2, 0));
EXPECT_FLOAT_EQ(raster(127, 127), array(0, 0, 127, 127));
EXPECT_FLOAT_EQ(0, array(0, 0, 0, 0));
EXPECT_FLOAT_EQ(1, array(0, 0, 0, 1));
EXPECT_FLOAT_EQ(2, array(0, 0, 0, 2));
EXPECT_FLOAT_EQ(1001, array(0, 0, 1, 1));
EXPECT_FLOAT_EQ(2001, array(0, 0, 2, 1));
EXPECT_FLOAT_EQ(127000, array(0, 0, 127, 0));
EXPECT_FLOAT_EQ(127127, array(0, 0, 127, 127));
}
TEST(Array4dTest, FillWithPzTestDepthOne) {
Array2D<float> matrix(3, 2);
std::initializer_list<std::initializer_list<float>> values = {
{-3.f, -0.1f}, {0.f, -0.1f}, {3.f, 0.2f},
};
int rowno = 0;
for (auto row : values) {
int colno = 0;
for (float f : row) {
matrix(rowno, colno) = f;
colno++;
}
rowno++;
}
Array4D<float> actual(3, 2, 1, 1);
actual.FillWithPZ(matrix);
EXPECT_FLOAT_EQ(-3, actual(0, 0, 0, 0));
EXPECT_FLOAT_EQ(-0.1, actual(0, 1, 0, 0));
EXPECT_FLOAT_EQ(0, actual(1, 0, 0, 0));
EXPECT_FLOAT_EQ(-0.1, actual(1, 1, 0, 0));
EXPECT_FLOAT_EQ(3, actual(2, 0, 0, 0));
EXPECT_FLOAT_EQ(0.2, actual(2, 1, 0, 0));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/array4d.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/array4d_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3d4c0666-2f4c-4f65-b796-b910e48f5143 | cpp | tensorflow/tensorflow | fp_util | third_party/xla/xla/fp_util.h | third_party/xla/xla/fp_util_test.cc | #ifndef XLA_FP_UTIL_H_
#define XLA_FP_UTIL_H_
#include <algorithm>
#define _USE_MATH_DEFINES
#include <cmath>
#include <cstdint>
#include <cstdlib>
#include <limits>
#include <optional>
#include <utility>
#include "xla/types.h"
#include "xla/util.h"
namespace xla {
template <typename T>
constexpr bool IsZero(T x) {
return x == static_cast<T>(0.0f);
}
template <typename T>
constexpr bool IsSignMinus(T x) {
return x < 0;
}
template <typename T>
constexpr T Abs(T x) {
if (IsZero(x)) {
return x + static_cast<T>(0.0f);
}
return IsSignMinus(x) ? -x : x;
}
template <typename T>
constexpr bool IsNaN(T x) {
return x != x;
}
template <typename T>
constexpr bool IsInfinite(T x) {
return x == std::numeric_limits<T>::infinity() ||
x == -std::numeric_limits<T>::infinity();
}
template <typename T>
constexpr bool IsFinite(T x) {
return !IsNaN(x) && !IsInfinite(x);
}
template <typename T>
constexpr bool IsNormal(T x) {
T abs_x = Abs(x);
return abs_x >= std::numeric_limits<T>::min() &&
abs_x <= std::numeric_limits<T>::max();
}
template <typename T>
constexpr bool IsSubnormal(T x) {
T abs_x = Abs(x);
return abs_x > static_cast<T>(0) && abs_x < std::numeric_limits<T>::min();
}
template <typename T>
constexpr T ScaleBase(T x, int n) {
static_assert(is_specialized_floating_point_v<T>);
while (n > 0 && IsFinite(x) && !IsZero(x)) {
int multiplier_exponent =
std::min(n, std::numeric_limits<T>::max_exponent - 1);
x *= IPow(static_cast<T>(std::numeric_limits<T>::radix),
multiplier_exponent);
n -= multiplier_exponent;
}
for (; n < 0 && IsFinite(x) && !IsZero(x); ++n) {
T shifted_x = x / std::numeric_limits<T>::radix;
if (IsSubnormal(shifted_x)) {
int scale_exponent = -((std::numeric_limits<T>::min_exponent - 1) -
(std::numeric_limits<T>::digits - 1)) +
n;
if (scale_exponent < 0) {
return x * static_cast<T>(0);
}
return x *
ScaleBase(std::numeric_limits<T>::denorm_min(), scale_exponent);
}
x = shifted_x;
}
return x;
}
template <typename T>
constexpr std::optional<int> LogBase(T x) {
if (IsNaN(x)) {
return std::nullopt;
}
if (IsInfinite(x)) {
return std::numeric_limits<int>::max();
}
if (IsZero(x)) {
return std::numeric_limits<int>::min();
}
T abs_x = Abs(x);
int exponent = 0;
while (abs_x < static_cast<T>(1)) {
abs_x *= std::numeric_limits<T>::radix;
exponent -= 1;
}
while (abs_x >= std::numeric_limits<T>::radix) {
abs_x /= std::numeric_limits<T>::radix;
exponent += 1;
}
return exponent;
}
enum class RoundingDirection {
kRoundTiesToEven,
kRoundTowardsZero,
};
template <typename DstT, typename SrcT>
constexpr std::pair<DstT, DstT> SplitToFpPair(
SrcT to_split, int num_high_trailing_zeros,
RoundingDirection rounding_direction =
RoundingDirection::kRoundTiesToEven) {
constexpr auto kError =
std::make_pair(std::numeric_limits<DstT>::quiet_NaN(),
std::numeric_limits<DstT>::quiet_NaN());
if (num_high_trailing_zeros < 0) {
return kError;
}
if (!IsFinite(to_split)) {
return kError;
}
if (IsZero(to_split)) {
DstT zero = static_cast<DstT>(to_split);
return std::make_pair(zero, zero);
}
if (IsSignMinus(to_split)) {
auto [high, low] =
SplitToFpPair<DstT, SrcT>(Abs(to_split), num_high_trailing_zeros);
return std::make_pair(-high, -low);
}
auto maybe_exponent = LogBase(to_split);
if (!maybe_exponent.has_value()) {
return kError;
}
int exponent = *maybe_exponent;
constexpr int kMinNormalExponent =
std::numeric_limits<DstT>::min_exponent - 1;
const int effective_precision = std::numeric_limits<DstT>::digits -
std::max(kMinNormalExponent - exponent, 0);
const int high_bits_to_keep = effective_precision - num_high_trailing_zeros;
if (high_bits_to_keep < 1) {
return kError;
}
static_assert(std::numeric_limits<SrcT>::max_exponent - 1 >=
std::numeric_limits<DstT>::digits);
SrcT scaled_significand =
ScaleBase(to_split, high_bits_to_keep - (exponent + 1));
uint64_t integer_part = static_cast<uint64_t>(scaled_significand);
SrcT fractional_part = scaled_significand - static_cast<SrcT>(integer_part);
switch (rounding_direction) {
case RoundingDirection::kRoundTiesToEven: {
if (fractional_part > static_cast<SrcT>(0.5f) ||
(fractional_part == static_cast<SrcT>(0.5f) &&
integer_part % 2 == 1)) {
integer_part += 1;
}
break;
}
case RoundingDirection::kRoundTowardsZero: {
break;
}
}
SrcT rounded = ScaleBase(static_cast<SrcT>(integer_part),
(exponent + 1) - high_bits_to_keep);
DstT high = static_cast<DstT>(rounded);
if (static_cast<SrcT>(high) != rounded) {
return kError;
}
DstT low = static_cast<DstT>(to_split - double{high});
return std::make_pair(high, low);
}
template <typename DstT, typename SrcT>
constexpr DstT RoundToPrecision(
SrcT to_round, int precision = std::numeric_limits<DstT>::digits,
RoundingDirection rounding_direction =
RoundingDirection::kRoundTiesToEven) {
auto [high, low] = SplitToFpPair<DstT, SrcT>(
to_round,
std::numeric_limits<DstT>::digits - precision,
rounding_direction);
return high;
}
template <typename DstT>
constexpr std::pair<DstT, DstT> Log2FloatPair(int num_high_trailing_zeros) {
return SplitToFpPair<DstT>(M_LN2, num_high_trailing_zeros);
}
template <typename T>
constexpr T GoldbergUlp(T x) {
if (IsZero(x) || IsSubnormal(x)) {
return GoldbergUlp(std::numeric_limits<T>::min());
}
std::optional<int> maybe_exponent = LogBase(x);
if (maybe_exponent.has_value(); const int exponent = *maybe_exponent) {
return ScaleBase(std::numeric_limits<T>::epsilon(), exponent);
}
if constexpr (std::numeric_limits<T>::has_quiet_NaN) {
return std::numeric_limits<T>::quiet_NaN();
} else if constexpr (std::numeric_limits<T>::has_infinity) {
return std::numeric_limits<T>::infinity();
} else {
return GoldbergUlp(std::numeric_limits<T>::max());
}
}
template <typename T>
int64_t CalculateDistanceInFloats(T a, T b) {
auto a_sign_and_magnitude = SignAndMagnitude(a);
auto b_sign_and_magnitude = SignAndMagnitude(b);
uint64_t a_distance_from_zero = a_sign_and_magnitude.first
? -a_sign_and_magnitude.second
: a_sign_and_magnitude.second;
uint64_t b_distance_from_zero = b_sign_and_magnitude.first
? -b_sign_and_magnitude.second
: b_sign_and_magnitude.second;
int64_t signed_distance = a_distance_from_zero - b_distance_from_zero;
return std::abs(signed_distance);
}
}
#endif | #include "xla/fp_util.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <limits>
#include <gtest/gtest.h>
#include "absl/base/casts.h"
#include "absl/numeric/bits.h"
#include "xla/bit_cast.h"
#include "xla/test.h"
#include "xla/util.h"
#include "tsl/platform/ml_dtypes.h"
namespace xla {
namespace {
class FixedValueTest : public testing::TestWithParam<double> {};
TEST_P(FixedValueTest, DropBits) {
double input = GetParam();
int exponent = std::ilogb(input);
constexpr int kMinNormalExponent =
std::numeric_limits<float>::min_exponent - 1;
int normalization_loss =
std::isnormal(input) ? std::max(kMinNormalExponent - exponent, 0) : 0;
int max_precision = std::numeric_limits<float>::digits - normalization_loss;
for (int i = 0; i < max_precision; ++i) {
auto result = SplitToFpPair<float>(input,
i);
auto [high_float, low_float] = result;
if (!std::isfinite(input)) {
EXPECT_TRUE(std::isnan(high_float));
EXPECT_TRUE(std::isnan(low_float));
continue;
}
EXPECT_FALSE(std::isnan(high_float));
EXPECT_FALSE(std::isnan(low_float));
EXPECT_GE(absl::countr_zero(absl::bit_cast<uint32_t>(high_float)), i);
double sum = double{high_float} + double{low_float};
if (input == 0.0) {
EXPECT_EQ(high_float, 0.0f);
EXPECT_EQ(low_float, 0.0f);
} else {
EXPECT_LT(std::fabs(input - double{high_float}),
std::scalbn(input, -(max_precision - i)));
if (std::abs(input) >= std::numeric_limits<float>::min()) {
EXPECT_LT(std::fabs(input - sum),
std::scalbn(std::fabs(input), -(2 * max_precision + 1 - i)));
}
}
if (i == 0) {
EXPECT_EQ(high_float + low_float, high_float);
}
if (input == high_float) {
EXPECT_EQ(low_float, 0.0f);
} else {
EXPECT_GT(std::fabs(high_float),
std::scalbn(low_float, max_precision - i))
<< "input: " << RoundTripFpToString(input)
<< " high_float: " << RoundTripFpToString(high_float)
<< " low_float: " << RoundTripFpToString(low_float);
auto no_op_split = SplitToFpPair<float>(high_float,
i);
EXPECT_EQ(no_op_split.first, high_float);
EXPECT_EQ(no_op_split.second, 0.0f);
}
if (input != sum) {
EXPECT_LT(absl::countr_zero(absl::bit_cast<uint64_t>(input)),
std::numeric_limits<double>::digits - (2 * max_precision + 1))
<< "input: " << RoundTripFpToString(input)
<< " high_float: " << RoundTripFpToString(high_float)
<< " low_float: " << RoundTripFpToString(low_float);
}
}
}
INSTANTIATE_TEST_SUITE_P(
SinglePrecisionInputs, FixedValueTest,
testing::Values(+0.0f, -0.0f, 1.0f, static_cast<float>(M_PI),
static_cast<float>(M_1_PI), static_cast<float>(M_E),
static_cast<float>(M_LN2), static_cast<float>(M_LOG2E),
static_cast<float>(M_SQRT2), static_cast<float>(M_SQRT1_2),
static_cast<float>(M_2_SQRTPI), 0x1.555554p+1f,
0x1.aaaaaap+1f, 0x1.fffffcp-127f,
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::quiet_NaN()));
INSTANTIATE_TEST_SUITE_P(DoublePrecisionInputs, FixedValueTest,
testing::Values(+0.0, -0.0, 1.0, M_PI, M_1_PI, M_E,
M_LN2, M_LOG2E, M_SQRT2, M_SQRT1_2,
M_2_SQRTPI, 0x1.5555555555555p+1,
0x1.aaaaaaaaaaaaap+1,
0x1.fffffffffffffp-127,
0x1.aaaaaaaaaaaaap-127));
template <typename T>
class FP8E4M3DistanceTest : public ::testing::Test {};
using F8E4M3Types = ::testing::Types<tsl::float8_e4m3, tsl::float8_e4m3fn>;
TYPED_TEST_SUITE(FP8E4M3DistanceTest, F8E4M3Types);
TEST(FPDistanceTest, F8E3M4Distance) {
EXPECT_EQ(CalculateDistanceInFloats<tsl::float8_e3m4>(tsl::float8_e3m4(8.0),
tsl::float8_e3m4(8.0)),
0);
EXPECT_EQ(CalculateDistanceInFloats<tsl::float8_e3m4>(tsl::float8_e3m4(8.0),
tsl::float8_e3m4(15.5)),
15);
EXPECT_EQ(CalculateDistanceInFloats<tsl::float8_e3m4>(tsl::float8_e3m4(8.0),
tsl::float8_e3m4(6)),
8);
EXPECT_EQ(CalculateDistanceInFloats<tsl::float8_e3m4>(
std::numeric_limits<tsl::float8_e3m4>::denorm_min(),
tsl::float8_e3m4(0)),
1);
EXPECT_EQ(CalculateDistanceInFloats<tsl::float8_e3m4>(
-std::numeric_limits<tsl::float8_e3m4>::denorm_min(),
tsl::float8_e3m4(0)),
1);
EXPECT_EQ(CalculateDistanceInFloats<tsl::float8_e3m4>(
-std::numeric_limits<tsl::float8_e3m4>::denorm_min(),
std::numeric_limits<tsl::float8_e3m4>::denorm_min()),
2);
EXPECT_EQ(
CalculateDistanceInFloats<tsl::float8_e3m4>(
std::numeric_limits<tsl::float8_e3m4>::min(), tsl::float8_e3m4(0)),
16);
EXPECT_EQ(
CalculateDistanceInFloats<tsl::float8_e3m4>(
-std::numeric_limits<tsl::float8_e3m4>::min(), tsl::float8_e3m4(0)),
16);
EXPECT_EQ(CalculateDistanceInFloats<tsl::float8_e3m4>(
-std::numeric_limits<tsl::float8_e3m4>::min(),
std::numeric_limits<tsl::float8_e3m4>::min()),
32);
}
TYPED_TEST(FP8E4M3DistanceTest, F8E4M3Distance) {
EXPECT_EQ(
CalculateDistanceInFloats<TypeParam>(TypeParam(8.0), TypeParam(8.0)), 0);
EXPECT_EQ(
CalculateDistanceInFloats<TypeParam>(TypeParam(8.0), TypeParam(15.0)), 7);
EXPECT_EQ(
CalculateDistanceInFloats<TypeParam>(TypeParam(8.0), TypeParam(6.0)), 4);
EXPECT_EQ(CalculateDistanceInFloats<TypeParam>(
std::numeric_limits<TypeParam>::denorm_min(), TypeParam(0)),
1);
EXPECT_EQ(CalculateDistanceInFloats<TypeParam>(
-std::numeric_limits<TypeParam>::denorm_min(), TypeParam(0)),
1);
EXPECT_EQ(CalculateDistanceInFloats<TypeParam>(
-std::numeric_limits<TypeParam>::denorm_min(),
std::numeric_limits<TypeParam>::denorm_min()),
2);
EXPECT_EQ(CalculateDistanceInFloats<TypeParam>(
std::numeric_limits<TypeParam>::min(), TypeParam(0)),
8);
EXPECT_EQ(CalculateDistanceInFloats<TypeParam>(
-std::numeric_limits<TypeParam>::min(), TypeParam(0)),
8);
EXPECT_EQ(CalculateDistanceInFloats<TypeParam>(
-std::numeric_limits<TypeParam>::min(),
std::numeric_limits<TypeParam>::min()),
16);
}
TEST(FPDistanceTest, F8E5M2Distance) {
EXPECT_EQ(CalculateDistanceInFloats<tsl::float8_e5m2>(tsl::float8_e5m2(8.0),
tsl::float8_e5m2(8.0)),
0);
EXPECT_EQ(CalculateDistanceInFloats<tsl::float8_e5m2>(tsl::float8_e5m2(8.0),
tsl::float8_e5m2(14)),
3);
EXPECT_EQ(CalculateDistanceInFloats<tsl::float8_e5m2>(tsl::float8_e5m2(8.0),
tsl::float8_e5m2(6)),
2);
EXPECT_EQ(CalculateDistanceInFloats<tsl::float8_e5m2>(
std::numeric_limits<tsl::float8_e5m2>::denorm_min(),
tsl::float8_e5m2(0)),
1);
EXPECT_EQ(CalculateDistanceInFloats<tsl::float8_e5m2>(
-std::numeric_limits<tsl::float8_e5m2>::denorm_min(),
tsl::float8_e5m2(0)),
1);
EXPECT_EQ(CalculateDistanceInFloats<tsl::float8_e5m2>(
-std::numeric_limits<tsl::float8_e5m2>::denorm_min(),
std::numeric_limits<tsl::float8_e5m2>::denorm_min()),
2);
EXPECT_EQ(
CalculateDistanceInFloats<tsl::float8_e5m2>(
std::numeric_limits<tsl::float8_e5m2>::min(), tsl::float8_e5m2(0)),
4);
EXPECT_EQ(
CalculateDistanceInFloats<tsl::float8_e5m2>(
-std::numeric_limits<tsl::float8_e5m2>::min(), tsl::float8_e5m2(0)),
4);
EXPECT_EQ(CalculateDistanceInFloats<tsl::float8_e5m2>(
-std::numeric_limits<tsl::float8_e5m2>::min(),
std::numeric_limits<tsl::float8_e5m2>::min()),
8);
}
TEST(FPDistanceTest, F64Distance) {
EXPECT_EQ(CalculateDistanceInFloats<double>(8.0, 8.0), 0);
EXPECT_EQ(CalculateDistanceInFloats<double>(
std::numeric_limits<double>::denorm_min(),
std::nextafter(std::numeric_limits<double>::denorm_min(), 1.0)),
1);
EXPECT_EQ(CalculateDistanceInFloats<double>(
std::numeric_limits<double>::min(),
std::numeric_limits<double>::denorm_min()),
(1ULL << 52) - 1);
EXPECT_EQ(CalculateDistanceInFloats<double>(
std::numeric_limits<double>::denorm_min(), 0.0),
1);
EXPECT_EQ(CalculateDistanceInFloats<double>(
-std::numeric_limits<double>::denorm_min(), 0.0),
1);
EXPECT_EQ(CalculateDistanceInFloats<double>(
-std::numeric_limits<double>::denorm_min(),
std::numeric_limits<double>::denorm_min()),
2);
EXPECT_EQ(CalculateDistanceInFloats<double>(
std::numeric_limits<double>::min(), 0.0),
1ULL << 52);
EXPECT_EQ(CalculateDistanceInFloats<double>(
-std::numeric_limits<double>::min(), 0.0),
1ULL << 52);
EXPECT_EQ(
CalculateDistanceInFloats<double>(-std::numeric_limits<double>::min(),
std::numeric_limits<double>::min()),
2 * (1ULL << 52));
EXPECT_EQ(
CalculateDistanceInFloats<double>(BitCast<double>(0x7fffffffffffffff),
BitCast<double>(0xffffffffffffffff)),
2);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/fp_util.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/fp_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6685bdc8-3ebc-494e-b6e0-36ecd722d50c | cpp | tensorflow/tensorflow | map_util | tensorflow/core/lib/gtl/map_util.h | third_party/xla/xla/tsl/lib/gtl/map_util_test.cc | #ifndef TENSORFLOW_CORE_LIB_GTL_MAP_UTIL_H_
#define TENSORFLOW_CORE_LIB_GTL_MAP_UTIL_H_
#include "xla/tsl/lib/gtl/map_util.h"
namespace tensorflow {
namespace gtl {
using ::tsl::gtl::EraseKeyReturnValuePtr;
using ::tsl::gtl::FindOrNull;
using ::tsl::gtl::FindPtrOrNull;
using ::tsl::gtl::FindWithDefault;
using ::tsl::gtl::InsertIfNotPresent;
using ::tsl::gtl::InsertOrUpdate;
using ::tsl::gtl::LookupOrInsert;
using ::tsl::gtl::ReverseMap;
}
}
#endif | #include "xla/tsl/lib/gtl/map_util.h"
#include <map>
#include <set>
#include <string>
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tsl {
TEST(MapUtil, Find) {
typedef std::map<string, string> Map;
Map m;
EXPECT_EQ("", gtl::FindWithDefault(m, "foo", ""));
m["foo"] = "bar";
EXPECT_EQ("bar", gtl::FindWithDefault(m, "foo", ""));
EXPECT_EQ("bar", *gtl::FindOrNull(m, "foo"));
EXPECT_TRUE(m.count("foo") > 0);
EXPECT_EQ(m["foo"], "bar");
}
TEST(MapUtil, LookupOrInsert) {
typedef std::map<string, string> Map;
Map m;
EXPECT_EQ("xyz", gtl::LookupOrInsert(&m, "foo", "xyz"));
EXPECT_EQ("xyz", gtl::LookupOrInsert(&m, "foo", "abc"));
}
TEST(MapUtil, InsertIfNotPresent) {
typedef std::set<int> Set;
Set s;
EXPECT_TRUE(gtl::InsertIfNotPresent(&s, 0));
EXPECT_EQ(s.count(0), 1);
EXPECT_FALSE(gtl::InsertIfNotPresent(&s, 0));
EXPECT_EQ(s.count(0), 1);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/gtl/map_util.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/gtl/map_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b97172ce-bd0b-4d5c-b376-b2377221647b | cpp | tensorflow/tensorflow | statusor | tensorflow/core/platform/statusor.h | third_party/xla/third_party/tsl/tsl/platform/statusor_test.cc | #ifndef TENSORFLOW_CORE_PLATFORM_STATUSOR_H_
#define TENSORFLOW_CORE_PLATFORM_STATUSOR_H_
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
using tsl::StatusOr;
}
#endif | #include "tsl/platform/statusor.h"
#include <memory>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/base/config.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace tsl {
namespace {
class Base1 {
public:
virtual ~Base1() {}
int pad_;
};
class Base2 {
public:
virtual ~Base2() {}
int yetotherpad_;
};
class Derived : public Base1, public Base2 {
public:
~Derived() override {}
int evenmorepad_;
};
class CopyNoAssign {
public:
explicit CopyNoAssign(int value) : foo_(value) {}
CopyNoAssign(const CopyNoAssign& other) : foo_(other.foo_) {}
int foo_;
private:
const CopyNoAssign& operator=(const CopyNoAssign&);
};
class NoDefaultConstructor {
public:
explicit NoDefaultConstructor(int foo);
};
static_assert(!std::is_default_constructible<NoDefaultConstructor>(),
"Should not be default-constructible.");
absl::StatusOr<std::unique_ptr<int>> ReturnUniquePtr() {
return std::unique_ptr<int>(new int(0));
}
TEST(StatusOr, NullPointerStatusOr) {
absl::StatusOr<int*> null_status(nullptr);
EXPECT_TRUE(null_status.ok());
EXPECT_EQ(null_status.value(), nullptr);
}
TEST(StatusOr, TestNoDefaultConstructorInitialization) {
absl::StatusOr<NoDefaultConstructor> statusor(errors::Cancelled(""));
EXPECT_FALSE(statusor.ok());
EXPECT_EQ(statusor.status().code(), absl::StatusCode::kCancelled);
absl::StatusOr<NoDefaultConstructor> statusor2;
EXPECT_FALSE(statusor2.ok());
EXPECT_EQ(statusor2.status().code(), absl::StatusCode::kUnknown);
}
TEST(StatusOr, TestMoveOnlyInitialization) {
absl::StatusOr<std::unique_ptr<int>> thing(ReturnUniquePtr());
ASSERT_TRUE(thing.ok());
EXPECT_EQ(0, *thing.value());
int* previous = thing.value().get();
thing = ReturnUniquePtr();
EXPECT_TRUE(thing.ok());
EXPECT_EQ(0, *thing.value());
EXPECT_NE(previous, thing.value().get());
}
TEST(StatusOr, TestMoveOnlyStatusCtr) {
absl::StatusOr<std::unique_ptr<int>> thing(errors::Cancelled(""));
ASSERT_FALSE(thing.ok());
}
TEST(StatusOr, TestMoveOnlyValueExtraction) {
absl::StatusOr<std::unique_ptr<int>> thing(ReturnUniquePtr());
ASSERT_TRUE(thing.ok());
std::unique_ptr<int> ptr = std::move(thing).value();
EXPECT_EQ(0, *ptr);
thing = std::move(ptr);
ptr = std::move(thing.value());
EXPECT_EQ(0, *ptr);
}
TEST(StatusOr, TestMoveOnlyConversion) {
absl::StatusOr<std::unique_ptr<const int>> const_thing(ReturnUniquePtr());
EXPECT_TRUE(const_thing.ok());
EXPECT_EQ(0, *const_thing.value());
const int* const_previous = const_thing.value().get();
const_thing = ReturnUniquePtr();
EXPECT_TRUE(const_thing.ok());
EXPECT_EQ(0, *const_thing.value());
EXPECT_NE(const_previous, const_thing.value().get());
}
TEST(StatusOr, TestMoveOnlyVector) {
std::vector<absl::StatusOr<std::unique_ptr<int>>> vec;
vec.push_back(ReturnUniquePtr());
vec.resize(2);
auto another_vec = std::move(vec);
EXPECT_EQ(0, *another_vec[0].value());
EXPECT_EQ(absl::StatusCode::kUnknown, another_vec[1].status().code());
}
TEST(StatusOr, TestMoveWithValuesAndErrors) {
absl::StatusOr<std::string> status_or(std::string(1000, '0'));
absl::StatusOr<std::string> value1(std::string(1000, '1'));
absl::StatusOr<std::string> value2(std::string(1000, '2'));
absl::StatusOr<std::string> error1(
absl::Status(absl::StatusCode::kUnknown, "error1"));
absl::StatusOr<std::string> error2(
absl::Status(absl::StatusCode::kUnknown, "error2"));
ASSERT_TRUE(status_or.ok());
EXPECT_EQ(std::string(1000, '0'), status_or.value());
status_or = std::move(value1);
ASSERT_TRUE(status_or.ok());
EXPECT_EQ(std::string(1000, '1'), status_or.value());
status_or = std::move(error1);
ASSERT_FALSE(status_or.ok());
EXPECT_EQ("error1", status_or.status().message());
status_or = std::move(error2);
ASSERT_FALSE(status_or.ok());
EXPECT_EQ("error2", status_or.status().message());
status_or = std::move(value2);
ASSERT_TRUE(status_or.ok());
EXPECT_EQ(std::string(1000, '2'), status_or.value());
}
TEST(StatusOr, TestCopyWithValuesAndErrors) {
absl::StatusOr<std::string> status_or(std::string(1000, '0'));
absl::StatusOr<std::string> value1(std::string(1000, '1'));
absl::StatusOr<std::string> value2(std::string(1000, '2'));
absl::StatusOr<std::string> error1(
absl::Status(absl::StatusCode::kUnknown, "error1"));
absl::StatusOr<std::string> error2(
absl::Status(absl::StatusCode::kUnknown, "error2"));
ASSERT_TRUE(status_or.ok());
EXPECT_EQ(std::string(1000, '0'), status_or.value());
status_or = value1;
ASSERT_TRUE(status_or.ok());
EXPECT_EQ(std::string(1000, '1'), status_or.value());
status_or = error1;
ASSERT_FALSE(status_or.ok());
EXPECT_EQ("error1", status_or.status().message());
status_or = error2;
ASSERT_FALSE(status_or.ok());
EXPECT_EQ("error2", status_or.status().message());
status_or = value2;
ASSERT_TRUE(status_or.ok());
EXPECT_EQ(std::string(1000, '2'), status_or.value());
EXPECT_EQ(std::string(1000, '1'), value1.value());
EXPECT_EQ("error1", error1.status().message());
EXPECT_EQ("error2", error2.status().message());
EXPECT_EQ(std::string(1000, '2'), value2.value());
}
TEST(StatusOr, TestDefaultCtor) {
absl::StatusOr<int> thing;
EXPECT_FALSE(thing.ok());
EXPECT_EQ(thing.status().code(), absl::StatusCode::kUnknown);
}
TEST(StatusOrDeathTest, TestDefaultCtorValue) {
absl::StatusOr<int> thing;
#ifdef ABSL_HAVE_EXCEPTIONS
try {
thing.value();
ADD_FAILURE()
<< "value() returned successfully while the access is illegal";
} catch (absl::BadStatusOrAccess& ex) {
}
#else
EXPECT_DEATH(thing.value(), "");
#endif
const absl::StatusOr<int> thing2;
#ifdef ABSL_HAVE_EXCEPTIONS
try {
thing.value();
ADD_FAILURE()
<< "value() returned successfully while the access is illegal";
} catch (absl::BadStatusOrAccess& ex) {
}
#else
EXPECT_DEATH(thing.value(), "");
#endif
}
TEST(StatusOr, TestStatusCtor) {
absl::StatusOr<int> thing(absl::Status(absl::StatusCode::kCancelled, ""));
EXPECT_FALSE(thing.ok());
EXPECT_EQ(thing.status().code(), absl::StatusCode::kCancelled);
}
TEST(StatusOr, TestValueCtor) {
const int kI = 4;
const absl::StatusOr<int> thing(kI);
EXPECT_TRUE(thing.ok());
EXPECT_EQ(kI, thing.value());
}
TEST(StatusOr, TestCopyCtorStatusOk) {
const int kI = 4;
const absl::StatusOr<int> original(kI);
const absl::StatusOr<int> copy(original);
EXPECT_EQ(copy.status(), original.status());
EXPECT_EQ(original.value(), copy.value());
}
TEST(StatusOr, TestCopyCtorStatusNotOk) {
absl::StatusOr<int> original(absl::Status(absl::StatusCode::kCancelled, ""));
absl::StatusOr<int> copy(original);
EXPECT_EQ(copy.status(), original.status());
}
TEST(StatusOr, TestCopyCtorNonAssignable) {
const int kI = 4;
CopyNoAssign value(kI);
absl::StatusOr<CopyNoAssign> original(value);
absl::StatusOr<CopyNoAssign> copy(original);
EXPECT_EQ(copy.status(), original.status());
EXPECT_EQ(original.value().foo_, copy.value().foo_);
}
TEST(StatusOr, TestCopyCtorStatusOKConverting) {
const int kI = 4;
absl::StatusOr<int> original(kI);
absl::StatusOr<double> copy(original);
EXPECT_EQ(copy.status(), original.status());
EXPECT_DOUBLE_EQ(original.value(), copy.value());
}
TEST(StatusOr, TestCopyCtorStatusNotOkConverting) {
absl::StatusOr<int> original(absl::Status(absl::StatusCode::kCancelled, ""));
absl::StatusOr<double> copy(original);
EXPECT_EQ(copy.status(), original.status());
}
TEST(StatusOr, TestAssignmentStatusOk) {
const int kI = 4;
absl::StatusOr<int> source(kI);
absl::StatusOr<int> target;
target = source;
EXPECT_EQ(target.status(), source.status());
EXPECT_EQ(source.value(), target.value());
}
TEST(StatusOr, TestAssignmentStatusNotOk) {
absl::StatusOr<int> source(absl::Status(absl::StatusCode::kCancelled, ""));
absl::StatusOr<int> target;
target = source;
EXPECT_EQ(target.status(), source.status());
}
TEST(StatusOr, TestStatus) {
absl::StatusOr<int> good(4);
EXPECT_TRUE(good.ok());
absl::StatusOr<int> bad(absl::Status(absl::StatusCode::kCancelled, ""));
EXPECT_FALSE(bad.ok());
EXPECT_EQ(bad.status(), absl::Status(absl::StatusCode::kCancelled, ""));
}
TEST(StatusOr, TestValue) {
const int kI = 4;
absl::StatusOr<int> thing(kI);
EXPECT_EQ(kI, thing.value());
}
TEST(StatusOr, TestValueConst) {
const int kI = 4;
const absl::StatusOr<int> thing(kI);
EXPECT_EQ(kI, thing.value());
}
TEST(StatusOrDeathTest, TestValueNotOk) {
absl::StatusOr<int> thing(
absl::Status(absl::StatusCode::kCancelled, "cancelled"));
#ifdef ABSL_HAVE_EXCEPTIONS
try {
thing.value();
ADD_FAILURE()
<< "value() returned successfully while the access is illegal";
} catch (absl::BadStatusOrAccess& ex) {
}
#else
EXPECT_DEATH(thing.value(), "cancelled");
#endif
}
TEST(StatusOrDeathTest, TestValueNotOkConst) {
const absl::StatusOr<int> thing(absl::Status(absl::StatusCode::kUnknown, ""));
#ifdef ABSL_HAVE_EXCEPTIONS
try {
thing.value();
ADD_FAILURE()
<< "value() returned successfully while the access is illegal";
} catch (absl::BadStatusOrAccess& ex) {
}
#else
EXPECT_DEATH(thing.value(), "");
#endif
}
TEST(StatusOr, TestPointerDefaultCtor) {
absl::StatusOr<int*> thing;
EXPECT_FALSE(thing.ok());
EXPECT_EQ(thing.status().code(), absl::StatusCode::kUnknown);
}
TEST(StatusOrDeathTest, TestPointerDefaultCtorValue) {
absl::StatusOr<int*> thing;
#ifdef ABSL_HAVE_EXCEPTIONS
try {
thing.value();
ADD_FAILURE()
<< "value() returned successfully while the access is illegal";
} catch (absl::BadStatusOrAccess& ex) {
}
#else
EXPECT_DEATH(thing.value(), "");
#endif
}
TEST(StatusOr, TestPointerStatusCtor) {
absl::StatusOr<int*> thing(absl::Status(absl::StatusCode::kCancelled, ""));
EXPECT_FALSE(thing.ok());
EXPECT_EQ(thing.status(), absl::Status(absl::StatusCode::kCancelled, ""));
}
TEST(StatusOr, TestPointerValueCtor) {
const int kI = 4;
absl::StatusOr<const int*> thing(&kI);
EXPECT_TRUE(thing.ok());
EXPECT_EQ(&kI, thing.value());
}
TEST(StatusOr, TestPointerCopyCtorStatusOk) {
const int kI = 0;
absl::StatusOr<const int*> original(&kI);
absl::StatusOr<const int*> copy(original);
EXPECT_EQ(copy.status(), original.status());
EXPECT_EQ(original.value(), copy.value());
}
TEST(StatusOr, TestPointerCopyCtorStatusNotOk) {
absl::StatusOr<int*> original(absl::Status(absl::StatusCode::kCancelled, ""));
absl::StatusOr<int*> copy(original);
EXPECT_EQ(copy.status(), original.status());
}
TEST(StatusOr, TestPointerCopyCtorStatusOKConverting) {
Derived derived;
absl::StatusOr<Derived*> original(&derived);
absl::StatusOr<Base2*> copy(original);
EXPECT_EQ(copy.status(), original.status());
EXPECT_EQ(static_cast<const Base2*>(original.value()), copy.value());
}
TEST(StatusOr, TestPointerCopyCtorStatusNotOkConverting) {
absl::StatusOr<Derived*> original(
absl::Status(absl::StatusCode::kCancelled, ""));
absl::StatusOr<Base2*> copy(original);
EXPECT_EQ(copy.status(), original.status());
}
TEST(StatusOr, TestPointerAssignmentStatusOk) {
const int kI = 0;
absl::StatusOr<const int*> source(&kI);
absl::StatusOr<const int*> target;
target = source;
EXPECT_EQ(target.status(), source.status());
EXPECT_EQ(source.value(), target.value());
}
TEST(StatusOr, TestPointerAssignmentStatusNotOk) {
absl::StatusOr<int*> source(absl::Status(absl::StatusCode::kCancelled, ""));
absl::StatusOr<int*> target;
target = source;
EXPECT_EQ(target.status(), source.status());
}
TEST(StatusOr, TestPointerStatus) {
const int kI = 0;
absl::StatusOr<const int*> good(&kI);
EXPECT_TRUE(good.ok());
absl::StatusOr<const int*> bad(
absl::Status(absl::StatusCode::kCancelled, ""));
EXPECT_EQ(bad.status(), absl::Status(absl::StatusCode::kCancelled, ""));
}
TEST(StatusOr, TestPointerValue) {
const int kI = 0;
absl::StatusOr<const int*> thing(&kI);
EXPECT_EQ(&kI, thing.value());
}
TEST(StatusOr, TestPointerValueConst) {
const int kI = 0;
const absl::StatusOr<const int*> thing(&kI);
EXPECT_EQ(&kI, thing.value());
}
TEST(StatusOr, TestArrowOperator) {
absl::StatusOr<std::unique_ptr<int>> uptr = ReturnUniquePtr();
EXPECT_EQ(*uptr->get(), 0);
}
TEST(StatusOr, TestStarOperator) {
absl::StatusOr<std::unique_ptr<int>> uptr = ReturnUniquePtr();
EXPECT_EQ(**uptr, 0);
}
TEST(StatusOr, TestStarOperatorDeath) {
absl::StatusOr<Base1> error(
absl::Status(absl::StatusCode::kCancelled, "cancelled"));
EXPECT_DEATH(*error, "cancelled");
}
static absl::StatusOr<int> MakeStatus() { return 100; }
template <typename T>
class BenchmarkFactory {
public:
BenchmarkFactory() : value_(new T) {}
~BenchmarkFactory() { delete value_; }
T* TrivialFactory() TF_ATTRIBUTE_NOINLINE { return value_; }
absl::Status ArgumentFactory(T** result) TF_ATTRIBUTE_NOINLINE {
*result = value_;
return absl::OkStatus();
}
absl::Status ArgumentFactoryFail(T** result) TF_ATTRIBUTE_NOINLINE {
*result = nullptr;
return absl::Status(absl::StatusCode::kCancelled, "");
}
absl::Status ArgumentFactoryFailShortMsg(T** result) TF_ATTRIBUTE_NOINLINE {
*result = nullptr;
return absl::Status(absl::StatusCode::kInternal, "");
}
absl::Status ArgumentFactoryFailLongMsg(T** result) TF_ATTRIBUTE_NOINLINE {
*result = nullptr;
return absl::Status(absl::StatusCode::kInternal,
"a big string of message junk that will never be read");
}
StatusOr<T*> StatusOrFactory() TF_ATTRIBUTE_NOINLINE {
return static_cast<T*>(value_);
}
StatusOr<T*> StatusOrFactoryFail() TF_ATTRIBUTE_NOINLINE {
return absl::Status(absl::StatusCode::kCancelled, "");
}
StatusOr<T*> StatusOrFactoryFailShortMsg() TF_ATTRIBUTE_NOINLINE {
return absl::Status(absl::StatusCode::kInternal, "");
}
StatusOr<T*> StatusOrFactoryFailLongMsg() TF_ATTRIBUTE_NOINLINE {
return absl::Status(absl::StatusCode::kInternal,
"a big string of message junk that will never be read");
}
private:
T* volatile value_;
BenchmarkFactory(const BenchmarkFactory&) = delete;
void operator=(const BenchmarkFactory&) = delete;
};
class BenchmarkType {
public:
BenchmarkType() {}
virtual ~BenchmarkType() {}
virtual void DoWork() TF_ATTRIBUTE_NOINLINE {}
private:
BenchmarkType(const BenchmarkType&) = delete;
void operator=(const BenchmarkType&) = delete;
};
void BM_CalibrateWorkLoop(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
BenchmarkType* result = factory.TrivialFactory();
for (auto s : state) {
if (result != nullptr) {
result->DoWork();
}
}
}
BENCHMARK(BM_CalibrateWorkLoop);
void BM_TrivialFactory(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
for (auto s : state) {
BenchmarkType* result = factory.TrivialFactory();
if (result != nullptr) {
result->DoWork();
}
}
}
BENCHMARK(BM_TrivialFactory);
void BM_ArgumentFactory(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
for (auto s : state) {
BenchmarkType* result = nullptr;
absl::Status status = factory.ArgumentFactory(&result);
if (status.ok() && result != nullptr) {
result->DoWork();
}
}
}
BENCHMARK(BM_ArgumentFactory);
void BM_StatusOrFactory(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
for (auto s : state) {
absl::StatusOr<BenchmarkType*> result = factory.StatusOrFactory();
if (result.ok()) {
result.value()->DoWork();
}
}
}
BENCHMARK(BM_StatusOrFactory);
void BM_ArgumentFactoryFail(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
for (auto s : state) {
BenchmarkType* result = nullptr;
absl::Status status = factory.ArgumentFactoryFail(&result);
if (status.ok() && result != nullptr) {
result->DoWork();
}
}
}
BENCHMARK(BM_ArgumentFactoryFail);
void BM_StatusOrFactoryFail(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
for (auto s : state) {
absl::StatusOr<BenchmarkType*> result = factory.StatusOrFactoryFail();
if (result.ok()) {
result.value()->DoWork();
}
}
}
BENCHMARK(BM_StatusOrFactoryFail);
void BM_ArgumentFactoryFailShortMsg(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
for (auto s : state) {
BenchmarkType* result = nullptr;
absl::Status status = factory.ArgumentFactoryFailShortMsg(&result);
if (status.ok() && result != nullptr) {
result->DoWork();
}
}
}
BENCHMARK(BM_ArgumentFactoryFailShortMsg);
void BM_StatusOrFactoryFailShortMsg(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
for (auto s : state) {
absl::StatusOr<BenchmarkType*> result =
factory.StatusOrFactoryFailShortMsg();
if (result.ok()) {
result.value()->DoWork();
}
}
}
BENCHMARK(BM_StatusOrFactoryFailShortMsg);
void BM_ArgumentFactoryFailLongMsg(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
for (auto s : state) {
BenchmarkType* result = nullptr;
absl::Status status = factory.ArgumentFactoryFailLongMsg(&result);
if (status.ok() && result != nullptr) {
result->DoWork();
}
}
}
BENCHMARK(BM_ArgumentFactoryFailLongMsg);
void BM_StatusOrFactoryFailLongMsg(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
for (auto s : state) {
absl::StatusOr<BenchmarkType*> result =
factory.StatusOrFactoryFailLongMsg();
if (result.ok()) {
result.value()->DoWork();
}
}
}
BENCHMARK(BM_StatusOrFactoryFailLongMsg);
#if defined(PLATFORM_GOOGLE)
absl::StatusOr<int> GetError() {
return absl::InvalidArgumentError("An invalid argument error");
}
absl::StatusOr<int> PropagateError() {
TF_ASSIGN_OR_RETURN(int a, GetError());
return a;
}
absl::StatusOr<int> PropagateError2() {
TF_ASSIGN_OR_RETURN(int a, PropagateError());
return a;
}
TEST(Status, StackTracePropagation) {
absl::StatusOr<int> s = PropagateError2();
auto sources = s.status().GetSourceLocations();
ASSERT_EQ(sources.size(), 3);
for (int i = 0; i < 3; ++i) {
ASSERT_EQ(sources[i].file_name(),
"third_party/tensorflow/tsl/platform/statusor_test.cc");
}
}
#endif
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/statusor.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/statusor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b9e0a1bd-6fce-4357-8434-7921a81b9262 | cpp | tensorflow/tensorflow | refcounting_hash_map | third_party/xla/xla/refcounting_hash_map.h | third_party/xla/xla/refcounting_hash_map_test.cc | #ifndef XLA_REFCOUNTING_HASH_MAP_H_
#define XLA_REFCOUNTING_HASH_MAP_H_
#include <functional>
#include <memory>
#include "absl/base/thread_annotations.h"
#include "absl/container/node_hash_map.h"
#include "absl/functional/function_ref.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
namespace xla {
template <typename K, typename V>
class RefcountingHashMap {
public:
RefcountingHashMap() = default;
RefcountingHashMap(const RefcountingHashMap&) = delete;
RefcountingHashMap(RefcountingHashMap&&) = delete;
RefcountingHashMap& operator=(const RefcountingHashMap&) = delete;
RefcountingHashMap& operator=(RefcountingHashMap&&) = delete;
std::shared_ptr<V> GetOrCreateIfAbsent(
const K& key,
absl::FunctionRef<std::unique_ptr<V>(const K&)> value_factory) {
absl::MutexLock lock(&mu_);
auto it = map_.find(key);
if (it != map_.end()) {
if (std::shared_ptr<V> value = it->second.lock()) {
return value;
}
}
it = map_.emplace(key, std::weak_ptr<V>()).first;
std::shared_ptr<V> value(value_factory(key).release(),
Deleter{it->first, *this});
it->second = value;
return value;
}
private:
struct Deleter {
const K& key;
RefcountingHashMap& parent;
void operator()(V* v) {
delete v;
absl::MutexLock lock(&parent.mu_);
auto it = parent.map_.find(key);
if (it != parent.map_.end() && it->second.expired()) {
parent.map_.erase(it);
}
}
};
absl::Mutex mu_;
absl::node_hash_map<K, std::weak_ptr<V>> map_ ABSL_GUARDED_BY(mu_);
};
}
#endif | #include "xla/refcounting_hash_map.h"
#include <functional>
#include <memory>
#include <utility>
#include "xla/test.h"
namespace xla {
namespace {
struct DeleteNotifier {
DeleteNotifier() = default;
DeleteNotifier(const DeleteNotifier&) = delete;
DeleteNotifier& operator=(const DeleteNotifier&) = delete;
DeleteNotifier(DeleteNotifier&& o) noexcept : fn(std::move(o.fn)) {
o.fn = nullptr;
}
DeleteNotifier& operator=(DeleteNotifier&& o) noexcept {
fn = o.fn;
o.fn = nullptr;
return *this;
}
~DeleteNotifier() {
if (fn) {
fn();
}
}
std::function<void()> fn;
};
TEST(RefcountingHashMapTest, PointerIdentity) {
RefcountingHashMap<int, int> m;
auto factory = [](const int) { return std::make_unique<int>(); };
std::shared_ptr<int> a = m.GetOrCreateIfAbsent(0, factory);
std::shared_ptr<int> b = m.GetOrCreateIfAbsent(0, factory);
std::shared_ptr<int> c = m.GetOrCreateIfAbsent(1, factory);
EXPECT_EQ(a.get(), b.get());
EXPECT_NE(a.get(), c.get());
}
TEST(RefcountingHashMapTest, DefaultInitialized) {
RefcountingHashMap<int, int> m;
auto factory = [](const int) { return std::make_unique<int>(); };
EXPECT_EQ(*m.GetOrCreateIfAbsent(42, factory), 0);
}
TEST(RefcountingHashMapTest, DeletesEagerly) {
RefcountingHashMap<int, DeleteNotifier> m;
bool deleted = false;
auto factory = [](const int) { return std::make_unique<DeleteNotifier>(); };
auto handle = m.GetOrCreateIfAbsent(0, factory);
handle->fn = [&] { deleted = true; };
EXPECT_FALSE(deleted);
handle = nullptr;
EXPECT_TRUE(deleted);
}
TEST(RefcountingHashMapTest, CustomFactory) {
RefcountingHashMap<int, int> m;
auto factory = [](const int x) { return std::make_unique<int>(x + 1); };
EXPECT_EQ(*m.GetOrCreateIfAbsent(0, factory), 1);
EXPECT_EQ(*m.GetOrCreateIfAbsent(100, factory), 101);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/refcounting_hash_map.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/refcounting_hash_map_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
11b2521d-ab0f-4b91-a3a6-80f134f4eb7d | cpp | tensorflow/tensorflow | iterator_util | third_party/xla/xla/iterator_util.h | third_party/xla/xla/iterator_util_test.cc | #ifndef XLA_ITERATOR_UTIL_H_
#define XLA_ITERATOR_UTIL_H_
#include <cstddef>
#include <iterator>
#include <utility>
#include "xla/tsl/lib/gtl/iterator_range.h"
namespace xla {
template <typename NestedIter>
class UnwrappingIterator {
public:
using iterator_category = std::input_iterator_tag;
using value_type = decltype(std::declval<NestedIter>()->get());
using difference_type = ptrdiff_t;
using pointer = value_type*;
using reference = value_type&;
explicit UnwrappingIterator(NestedIter iter) : iter_(std::move(iter)) {}
auto operator*() -> value_type { return iter_->get(); }
UnwrappingIterator& operator++() {
++iter_;
return *this;
}
UnwrappingIterator operator++(int) {
UnwrappingIterator temp(iter_);
operator++();
return temp;
}
friend bool operator==(const UnwrappingIterator& a,
const UnwrappingIterator& b) {
return a.iter_ == b.iter_;
}
friend bool operator!=(const UnwrappingIterator& a,
const UnwrappingIterator& b) {
return !(a == b);
}
private:
NestedIter iter_;
};
template <typename NestedIter>
UnwrappingIterator<NestedIter> MakeUnwrappingIterator(NestedIter iter) {
return UnwrappingIterator<NestedIter>(std::move(iter));
}
template <typename NestedIter, typename UnaryPredicate>
class FilteringIterator {
public:
using iterator_category = std::input_iterator_tag;
using value_type = decltype(*std::declval<NestedIter>());
using difference_type = ptrdiff_t;
using pointer = value_type*;
using reference = value_type&;
FilteringIterator(NestedIter iter, NestedIter end_iter, UnaryPredicate pred)
: iter_(std::move(iter)),
end_iter_(std::move(end_iter)),
pred_(std::move(pred)) {
if (iter_ != end_iter_ && !pred_(**this)) {
++*this;
}
}
auto operator*() -> value_type { return *iter_; }
FilteringIterator& operator++() {
do {
++iter_;
} while (iter_ != end_iter_ && !pred_(**this));
return *this;
}
FilteringIterator operator++(int) {
FilteringIterator temp(iter_, end_iter_, pred_);
operator++();
return temp;
}
friend bool operator==(const FilteringIterator& a,
const FilteringIterator& b) {
return a.iter_ == b.iter_;
}
friend bool operator!=(const FilteringIterator& a,
const FilteringIterator& b) {
return !(a == b);
}
private:
NestedIter iter_;
NestedIter end_iter_;
UnaryPredicate pred_;
};
template <typename NestedIter, typename UnaryPredicate>
using FilteringUnwrappingIterator =
FilteringIterator<UnwrappingIterator<NestedIter>, UnaryPredicate>;
template <typename NestedIter, typename UnaryPredicate>
FilteringUnwrappingIterator<NestedIter, UnaryPredicate>
MakeFilteringUnwrappingIterator(NestedIter iter, NestedIter end_iter,
UnaryPredicate pred) {
return FilteringUnwrappingIterator<NestedIter, UnaryPredicate>(
MakeUnwrappingIterator(iter), MakeUnwrappingIterator(end_iter),
std::move(pred));
}
template <typename NestedIter, typename UnaryPredicate>
tsl::gtl::iterator_range<
FilteringUnwrappingIterator<NestedIter, UnaryPredicate>>
MakeFilteringUnwrappingIteratorRange(NestedIter begin_iter, NestedIter end_iter,
UnaryPredicate pred) {
return {MakeFilteringUnwrappingIterator(begin_iter, end_iter, pred),
MakeFilteringUnwrappingIterator(end_iter, end_iter, pred)};
}
}
#endif | #include "xla/iterator_util.h"
#include <algorithm>
#include <functional>
#include <list>
#include <memory>
#include <vector>
#include "xla/test.h"
namespace xla {
namespace {
TEST(UnwrappingIteratorTest, Simple) {
std::vector<std::unique_ptr<int>> v;
for (int i = 0; i < 3; ++i) {
v.push_back(std::make_unique<int>(i));
}
int i = 0;
for (auto iter = MakeUnwrappingIterator(v.begin());
iter != MakeUnwrappingIterator(v.end()); ++iter) {
EXPECT_EQ(*iter, v[i].get());
++i;
}
}
TEST(UnwrappingIteratorTest, PostincrementOperator) {
std::vector<std::shared_ptr<int>> v;
for (int i = 0; i < 3; ++i) {
v.push_back(std::make_shared<int>(i));
}
auto iter = MakeUnwrappingIterator(v.begin());
EXPECT_EQ(*(iter++), v[0].get());
EXPECT_EQ(*iter, v[1].get());
}
TEST(UnwrappingIteratorTest, StdFind) {
std::list<std::unique_ptr<int>> l;
for (int i = 0; i < 3; ++i) {
l.push_back(std::make_unique<int>(i));
}
EXPECT_EQ(l.begin()->get(),
*std::find(MakeUnwrappingIterator(l.begin()),
MakeUnwrappingIterator(l.end()), l.begin()->get()));
}
TEST(FilteringUnwrappingIteratorTest, SimpleOdd) {
std::vector<std::unique_ptr<int>> v;
for (int i = 0; i < 3; ++i) {
v.push_back(std::make_unique<int>(i));
}
int i = 0;
std::vector<int> expected = {1};
auto pred = [](const int* value) { return *value % 2 == 1; };
for (auto iter = MakeFilteringUnwrappingIterator(v.begin(), v.end(), pred);
iter != MakeFilteringUnwrappingIterator(v.end(), v.end(), pred);
++iter) {
EXPECT_EQ(**iter, expected[i]);
++i;
}
EXPECT_EQ(i, expected.size());
}
TEST(FilteringUnwrappingIteratorTest, SimpleEven) {
std::vector<std::unique_ptr<int>> v;
for (int i = 0; i < 3; ++i) {
v.push_back(std::make_unique<int>(i));
}
int i = 0;
std::vector<int> expected = {0, 2};
auto pred = [](const int* value) { return *value % 2 == 0; };
for (auto iter = MakeFilteringUnwrappingIterator(v.begin(), v.end(), pred);
iter != MakeFilteringUnwrappingIterator(v.end(), v.end(), pred);
++iter) {
EXPECT_EQ(**iter, expected[i]);
++i;
}
EXPECT_EQ(i, expected.size());
}
TEST(FilteringUnwrappingIteratorTest, SimpleNone) {
std::vector<std::unique_ptr<int>> v;
for (int i = 0; i < 3; ++i) {
v.push_back(std::make_unique<int>(i));
}
int i = 0;
auto pred = [](const int* value) { return false; };
for (auto iter = MakeFilteringUnwrappingIterator(v.begin(), v.end(), pred);
iter != MakeFilteringUnwrappingIterator(v.end(), v.end(), pred);
++iter) {
++i;
}
EXPECT_EQ(i, 0);
}
TEST(FilteringUnwrappingIteratorTest, PostincrementOperator) {
std::vector<std::shared_ptr<int>> v;
for (int i = 0; i < 3; ++i) {
v.push_back(std::make_shared<int>(i));
}
auto iter = MakeFilteringUnwrappingIterator(
v.begin(), v.end(), [](const int* value) { return *value % 2 == 0; });
EXPECT_EQ(*(iter++), v[0].get());
EXPECT_EQ(*iter, v[2].get());
}
TEST(FilteringUnwrappingIteratorTest, StdFind) {
std::list<std::unique_ptr<int>> l;
for (int i = 0; i < 3; ++i) {
l.push_back(std::make_unique<int>(i));
}
std::function<bool(const int*)> pred = [](const int* value) {
return *value % 2 == 0;
};
EXPECT_EQ(
l.begin()->get(),
*std::find(MakeFilteringUnwrappingIterator(l.begin(), l.end(), pred),
MakeFilteringUnwrappingIterator(l.end(), l.end(), pred),
l.begin()->get()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/iterator_util.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/iterator_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6bf951a5-3117-4d7c-af58-2d619f8af77e | cpp | tensorflow/tensorflow | bit_cast | third_party/xla/xla/bit_cast.h | third_party/xla/xla/bit_cast_test.cc | #ifndef XLA_BIT_CAST_H_
#define XLA_BIT_CAST_H_
#include <cstdint>
#include "absl/base/casts.h"
#include "Eigen/Core"
#include "xla/types.h"
#include "tsl/platform/bfloat16.h"
namespace xla {
template <typename T, typename U>
T BitCast(U src) {
static_assert(sizeof(T) == sizeof(U), "sizes don't match");
return absl::bit_cast<T>(src);
}
template <>
inline tsl::bfloat16 BitCast<tsl::bfloat16, uint16_t>(uint16_t src) {
return Eigen::numext::bit_cast<tsl::bfloat16>(src);
}
template <>
inline uint16_t BitCast<uint16_t, tsl::bfloat16>(tsl::bfloat16 src) {
return Eigen::numext::bit_cast<uint16_t>(src);
}
template <>
inline Eigen::half BitCast<Eigen::half, uint16_t>(uint16_t src) {
return Eigen::numext::bit_cast<Eigen::half>(src);
}
template <>
inline uint16_t BitCast<uint16_t, Eigen::half>(Eigen::half src) {
return Eigen::numext::bit_cast<uint16_t>(src);
}
}
#endif | #include "xla/bit_cast.h"
#include <cstdint>
#include "Eigen/Core"
#include "xla/test.h"
#include "tsl/platform/bfloat16.h"
namespace xla {
namespace {
using ::Eigen::half;
using ::tsl::bfloat16;
TEST(BitCastTest, BackAndForth) {
for (uint32_t n = 0; n < 0x10000; ++n) {
uint16_t initial_rep = n;
bfloat16 float_val = BitCast<bfloat16>(initial_rep);
uint16_t final_rep = BitCast<uint16_t>(float_val);
EXPECT_EQ(initial_rep, final_rep);
}
for (uint32_t n = 0; n < 0x10000; ++n) {
uint16_t initial_rep = n;
half float_val = BitCast<half>(initial_rep);
uint16_t final_rep = BitCast<uint16_t>(float_val);
EXPECT_EQ(initial_rep, final_rep);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/bit_cast.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/bit_cast_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
654f57be-adcb-489c-a11d-b400e8cefe97 | cpp | tensorflow/tensorflow | array3d | third_party/xla/xla/array3d.h | third_party/xla/xla/array3d_test.cc | #ifndef XLA_ARRAY3D_H_
#define XLA_ARRAY3D_H_
#include <algorithm>
#include <functional>
#include <initializer_list>
#include <iterator>
#include <memory>
#include <numeric>
#include <random>
#include "xla/array.h"
#include "xla/types.h"
#include "tsl/platform/logging.h"
namespace xla {
template <typename T>
class Array3D : public Array<T> {
public:
Array3D() : Array<T>(std::vector<int64_t>{0, 0, 0}) {}
Array3D(const int64_t n1, const int64_t n2, const int64_t n3)
: Array<T>(std::vector<int64_t>{n1, n2, n3}) {}
Array3D(const int64_t n1, const int64_t n2, const int64_t n3, const T value)
: Array<T>(std::vector<int64_t>{n1, n2, n3}, value) {}
Array3D(std::initializer_list<std::initializer_list<std::initializer_list<T>>>
values)
: Array<T>(values) {}
template <typename T2, array_impl::overload_for_float<T, T2> = true>
Array3D(
std::initializer_list<std::initializer_list<std::initializer_list<T2>>>
values)
: Array<T>(values) {}
int64_t n1() const { return this->dim(0); }
int64_t n2() const { return this->dim(1); }
int64_t n3() const { return this->dim(2); }
};
}
#endif | #include "xla/array3d.h"
#include <initializer_list>
#include "xla/test.h"
#include "xla/types.h"
namespace xla {
namespace {
TEST(Array3dTest, UninitializedDimsCtor) {
Array3D<int> uninit(2, 3, 4);
EXPECT_EQ(uninit.n1(), 2);
EXPECT_EQ(uninit.n2(), 3);
EXPECT_EQ(uninit.n3(), 4);
EXPECT_EQ(uninit.num_elements(), 24);
}
TEST(Array3dTest, FillCtor) {
Array3D<int> fullof7(2, 3, 4, 7);
EXPECT_EQ(fullof7.n1(), 2);
EXPECT_EQ(fullof7.n2(), 3);
EXPECT_EQ(fullof7.n3(), 4);
for (int64_t n1 = 0; n1 < fullof7.n1(); ++n1) {
for (int64_t n2 = 0; n2 < fullof7.n2(); ++n2) {
for (int64_t n3 = 0; n3 < fullof7.n3(); ++n3) {
EXPECT_EQ(fullof7(n1, n2, n3), 7);
}
}
}
}
TEST(Array3dTest, InitializerListCtor) {
Array3D<int> arr = {{{1, 2}, {3, 4}, {5, 6}, {7, 8}},
{{9, 10}, {11, 12}, {13, 14}, {15, 16}},
{{17, 18}, {19, 20}, {21, 22}, {23, 24}}};
EXPECT_EQ(arr.n1(), 3);
EXPECT_EQ(arr.n2(), 4);
EXPECT_EQ(arr.n3(), 2);
EXPECT_EQ(arr.num_elements(), 24);
EXPECT_EQ(arr(0, 0, 0), 1);
EXPECT_EQ(arr(0, 0, 1), 2);
EXPECT_EQ(arr(0, 1, 0), 3);
EXPECT_EQ(arr(0, 3, 1), 8);
EXPECT_EQ(arr(1, 0, 0), 9);
EXPECT_EQ(arr(1, 1, 1), 12);
EXPECT_EQ(arr(2, 0, 0), 17);
EXPECT_EQ(arr(2, 1, 1), 20);
EXPECT_EQ(arr(2, 2, 0), 21);
EXPECT_EQ(arr(2, 3, 1), 24);
}
TEST(Array3dTest, InitializerListCtorHalf) {
Array3D<Eigen::half> arr = {
{{1.0f, 2.0f}, {3.0f, 4.0f}, {5.0f, 6.0f}, {7.0f, 8.0f}},
{{9.0f, 10.0f}, {11.0f, 12.0f}, {13.0f, 14.0f}, {15.0f, 16.0f}},
{{17.0f, 18.0f}, {19.0f, 20.0f}, {21.0f, 22.0f}, {23.0f, 24.0f}}};
EXPECT_EQ(arr.n1(), 3);
EXPECT_EQ(arr.n2(), 4);
EXPECT_EQ(arr.n3(), 2);
EXPECT_EQ(arr.num_elements(), 24);
EXPECT_EQ(arr(0, 0, 0), static_cast<Eigen::half>(1));
EXPECT_EQ(arr(0, 0, 1), static_cast<Eigen::half>(2));
EXPECT_EQ(arr(0, 1, 0), static_cast<Eigen::half>(3));
EXPECT_EQ(arr(0, 3, 1), static_cast<Eigen::half>(8));
EXPECT_EQ(arr(1, 0, 0), static_cast<Eigen::half>(9));
EXPECT_EQ(arr(1, 1, 1), static_cast<Eigen::half>(12));
EXPECT_EQ(arr(2, 0, 0), static_cast<Eigen::half>(17));
EXPECT_EQ(arr(2, 1, 1), static_cast<Eigen::half>(20));
EXPECT_EQ(arr(2, 2, 0), static_cast<Eigen::half>(21));
EXPECT_EQ(arr(2, 3, 1), static_cast<Eigen::half>(24));
}
TEST(Array3dTest, Fill) {
Array3D<int> fullof7(2, 3, 4, 7);
for (int64_t n1 = 0; n1 < fullof7.n1(); ++n1) {
for (int64_t n2 = 0; n2 < fullof7.n2(); ++n2) {
for (int64_t n3 = 0; n3 < fullof7.n3(); ++n3) {
EXPECT_EQ(fullof7(n1, n2, n3), 7);
}
}
}
fullof7.Fill(11);
for (int64_t n1 = 0; n1 < fullof7.n1(); ++n1) {
for (int64_t n2 = 0; n2 < fullof7.n2(); ++n2) {
for (int64_t n3 = 0; n3 < fullof7.n3(); ++n3) {
EXPECT_EQ(fullof7(n1, n2, n3), 11);
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/array3d.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/array3d_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5187626f-d2e3-4804-8df7-fa079abdd8b9 | cpp | tensorflow/tensorflow | debug_options_parsers | third_party/xla/xla/debug_options_parsers.h | third_party/xla/xla/debug_options_parsers_test.cc | #ifndef XLA_DEBUG_OPTIONS_PARSERS_H_
#define XLA_DEBUG_OPTIONS_PARSERS_H_
#include <string>
#include <vector>
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
#include "xla/xla.pb.h"
namespace xla {
template <typename T>
void parse_xla_backend_extra_options(T* extra_options_map,
std::string comma_separated_values) {
std::vector<std::string> extra_options_parts =
absl::StrSplit(comma_separated_values, ',');
for (const auto& part : extra_options_parts) {
size_t eq_pos = part.find_first_of('=');
if (eq_pos == std::string::npos) {
(*extra_options_map)[part] = "";
} else {
std::string value = "";
if (eq_pos + 1 < part.size()) {
value = part.substr(eq_pos + 1);
}
(*extra_options_map)[part.substr(0, eq_pos)] = value;
}
}
}
}
#endif | #include "xla/debug_options_parsers.h"
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_cat.h"
#include "xla/debug_options_flags.h"
#include "xla/parse_flags_from_env.h"
#include "xla/tsl/util/command_line_flags.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
TEST(DebugOptionsFlags, ParseXlaBackendExtraOptions) {
absl::flat_hash_map<std::string, std::string> test_map;
std::string test_string = "aa=bb,cc,dd=,ee=ff=gg";
parse_xla_backend_extra_options(&test_map, test_string);
EXPECT_EQ(test_map.size(), 4);
EXPECT_EQ(test_map.at("aa"), "bb");
EXPECT_EQ(test_map.at("cc"), "");
EXPECT_EQ(test_map.at("dd"), "");
EXPECT_EQ(test_map.at("ee"), "ff=gg");
}
struct UppercaseStringSetterTestSpec {
std::string user_max_isa;
std::string expected_max_isa;
};
class UppercaseStringSetterTest
: public ::testing::Test,
public ::testing::WithParamInterface<UppercaseStringSetterTestSpec> {
public:
UppercaseStringSetterTest()
: flag_values_(DefaultDebugOptionsIgnoringFlags()) {
MakeDebugOptionsFlags(&flag_objects_, &flag_values_);
}
static std::string Name(
const ::testing::TestParamInfo<UppercaseStringSetterTestSpec>& info) {
return info.param.user_max_isa;
}
DebugOptions flag_values() const { return flag_values_; }
std::vector<tsl::Flag> flag_objects() { return flag_objects_; }
private:
DebugOptions flag_values_;
std::vector<tsl::Flag> flag_objects_;
};
TEST_P(UppercaseStringSetterTest, XlaCpuMaxIsa) {
UppercaseStringSetterTestSpec spec = GetParam();
tsl::setenv("XLA_FLAGS",
absl::StrCat("--xla_cpu_max_isa=", spec.user_max_isa).c_str(),
true);
int* pargc;
std::vector<char*>* pargv;
ResetFlagsFromEnvForTesting("XLA_FLAGS", &pargc, &pargv);
ParseFlagsFromEnvAndDieIfUnknown("XLA_FLAGS", flag_objects());
EXPECT_EQ(flag_values().xla_cpu_max_isa(), spec.expected_max_isa);
}
std::vector<UppercaseStringSetterTestSpec> GetUppercaseStringSetterTestCases() {
return std::vector<UppercaseStringSetterTestSpec>({
UppercaseStringSetterTestSpec{"sse4_2", "SSE4_2"},
UppercaseStringSetterTestSpec{"aVx512", "AVX512"},
UppercaseStringSetterTestSpec{"AMx_fP16", "AMX_FP16"},
});
}
INSTANTIATE_TEST_SUITE_P(
UppercaseStringSetterTestInstantiation, UppercaseStringSetterTest,
::testing::ValuesIn(GetUppercaseStringSetterTestCases()),
UppercaseStringSetterTest::Name);
}
}
int main(int argc, char* argv[]) {
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/debug_options_parsers.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/debug_options_parsers_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
84f6e6ee-611f-4184-955c-815d484011a5 | cpp | tensorflow/tensorflow | array2d | third_party/xla/xla/array2d.h | third_party/xla/xla/array2d_test.cc | #ifndef XLA_ARRAY2D_H_
#define XLA_ARRAY2D_H_
#include <algorithm>
#include <cstdint>
#include <functional>
#include <initializer_list>
#include <iterator>
#include <memory>
#include <random>
#include <vector>
#include "absl/functional/function_ref.h"
#include "absl/strings/str_cat.h"
#include "xla/array.h"
#include "xla/types.h"
#include "xla/util.h"
namespace xla {
template <typename T>
class Array2D : public Array<T> {
public:
Array2D() : Array<T>(std::vector<int64_t>{0, 0}) {}
Array2D(const int64_t n1, const int64_t n2)
: Array<T>(std::vector<int64_t>{n1, n2}) {}
Array2D(const int64_t n1, const int64_t n2, const T value)
: Array<T>({n1, n2}, value) {}
Array2D(std::initializer_list<std::initializer_list<T>> values)
: Array<T>(values) {}
template <typename T2, array_impl::overload_for_float<T, T2> = true>
Array2D(std::initializer_list<std::initializer_list<T2>> values)
: Array<T>(values) {}
Array2D(const Array2D<T>& other) : Array<T>(other) {}
int64_t n1() const { return this->dim(0); }
int64_t n2() const { return this->dim(1); }
int64_t height() const { return this->dim(0); }
int64_t width() const { return this->dim(1); }
void FillUnique(T start_value = 0) {
int shift = Log2Ceiling<uint64_t>(n2());
for (int64_t i0 = 0; i0 < n1(); ++i0) {
for (int64_t i1 = 0; i1 < n2(); ++i1) {
(*this)(i0, i1) = ((i0 << shift) | i1) + start_value;
}
}
}
void Each(absl::FunctionRef<void(int64_t, int64_t, T*)> f) {
for (int64_t i0 = 0; i0 < n1(); ++i0) {
for (int64_t i1 = 0; i1 < n2(); ++i1) {
f(i0, i1, &(*this)(i0, i1));
}
}
}
};
template <typename NativeT = float>
std::unique_ptr<Array2D<NativeT>> MakeLinspaceArray2D(double from, double to,
int64_t n1, int64_t n2) {
auto array = std::make_unique<Array2D<NativeT>>(n1, n2);
int64_t count = n1 * n2;
double step =
static_cast<double>((count > 1) ? (to - from) / (count - 1) : 0);
auto set = [&array, n2](int64_t index, NativeT value) {
(*array)(index / n2, index % n2) = value;
};
for (int64_t i = 0; i < count - 1; ++i) {
set(i, (static_cast<NativeT>(from + i * step)));
}
set(count - 1, static_cast<NativeT>(to));
return array;
}
}
#endif | #include "xla/array2d.h"
#include <initializer_list>
#include <string>
#include "Eigen/Core"
#include "xla/test.h"
#include "tsl/platform/ml_dtypes.h"
namespace xla {
namespace {
TEST(Array2dTest, DefaultCtor) {
Array2D<int> empty;
EXPECT_EQ(empty.n1(), 0);
EXPECT_EQ(empty.n2(), 0);
EXPECT_EQ(empty.num_elements(), 0);
}
TEST(Array2dTest, UninitializedDimsCtor) {
Array2D<int> uninit(2, 3);
EXPECT_EQ(uninit.n1(), 2);
EXPECT_EQ(uninit.n2(), 3);
EXPECT_EQ(uninit.num_elements(), 6);
}
TEST(Array2dTest, FillCtor) {
Array2D<int> fullof7(2, 3, 7);
EXPECT_EQ(fullof7.n1(), 2);
EXPECT_EQ(fullof7.n2(), 3);
for (int64_t n1 = 0; n1 < fullof7.n1(); ++n1) {
for (int64_t n2 = 0; n2 < fullof7.n2(); ++n2) {
EXPECT_EQ(fullof7(n1, n2), 7);
}
}
}
TEST(Array2dTest, InitializerListCtor) {
Array2D<int> arr = {{1, 2, 3}, {4, 5, 6}};
EXPECT_EQ(arr.n1(), 2);
EXPECT_EQ(arr.n2(), 3);
EXPECT_EQ(arr(0, 0), 1);
EXPECT_EQ(arr(0, 1), 2);
EXPECT_EQ(arr(0, 2), 3);
EXPECT_EQ(arr(1, 0), 4);
EXPECT_EQ(arr(1, 1), 5);
EXPECT_EQ(arr(1, 2), 6);
}
TEST(Array2dTest, InitializerListCtorHalf) {
Array2D<Eigen::half> arr = {{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}};
EXPECT_EQ(arr.n1(), 2);
EXPECT_EQ(arr.n2(), 3);
EXPECT_EQ(arr(0, 0), static_cast<Eigen::half>(1));
EXPECT_EQ(arr(0, 1), static_cast<Eigen::half>(2));
EXPECT_EQ(arr(0, 2), static_cast<Eigen::half>(3));
EXPECT_EQ(arr(1, 0), static_cast<Eigen::half>(4));
EXPECT_EQ(arr(1, 1), static_cast<Eigen::half>(5));
EXPECT_EQ(arr(1, 2), static_cast<Eigen::half>(6));
}
TEST(Array2dTest, Accessors) {
Array2D<int> arr = {{1, 2, 3}, {4, 5, 6}};
EXPECT_EQ(arr.n1(), 2);
EXPECT_EQ(arr.n2(), 3);
EXPECT_EQ(arr.height(), 2);
EXPECT_EQ(arr.width(), 3);
EXPECT_EQ(arr.num_elements(), 6);
}
TEST(Array2dTest, IndexingReadWrite) {
Array2D<int> arr = {{1, 2, 3}, {4, 5, 6}};
EXPECT_EQ(arr(1, 1), 5);
EXPECT_EQ(arr(1, 2), 6);
arr(1, 1) = 51;
arr(1, 2) = 61;
EXPECT_EQ(arr(1, 1), 51);
EXPECT_EQ(arr(1, 2), 61);
}
TEST(Array2dTest, IndexingReadWriteBool) {
Array2D<bool> arr = {{false, true, false}, {true, true, false}};
EXPECT_EQ(arr(1, 1), true);
EXPECT_EQ(arr(1, 2), false);
arr(1, 1) = false;
arr(1, 2) = true;
EXPECT_EQ(arr(1, 1), false);
EXPECT_EQ(arr(1, 2), true);
}
TEST(Array2dTest, Fill) {
Array2D<int> fullof7(2, 3, 7);
for (int64_t n1 = 0; n1 < fullof7.n1(); ++n1) {
for (int64_t n2 = 0; n2 < fullof7.n2(); ++n2) {
EXPECT_EQ(fullof7(n1, n2), 7);
}
}
fullof7.Fill(11);
for (int64_t n1 = 0; n1 < fullof7.n1(); ++n1) {
for (int64_t n2 = 0; n2 < fullof7.n2(); ++n2) {
EXPECT_EQ(fullof7(n1, n2), 11);
}
}
}
TEST(Array2dTest, DataPointer) {
Array2D<int> arr = {{1, 2, 3}, {4, 5, 6}};
EXPECT_EQ(arr.data()[0], 1);
}
TEST(Array2dTest, Linspace) {
auto arr = MakeLinspaceArray2D(1.0, 3.5, 3, 2);
EXPECT_EQ(arr->n1(), 3);
EXPECT_EQ(arr->n2(), 2);
EXPECT_FLOAT_EQ((*arr)(0, 0), 1.0);
EXPECT_FLOAT_EQ((*arr)(0, 1), 1.5);
EXPECT_FLOAT_EQ((*arr)(1, 0), 2.0);
EXPECT_FLOAT_EQ((*arr)(1, 1), 2.5);
EXPECT_FLOAT_EQ((*arr)(2, 0), 3.0);
EXPECT_FLOAT_EQ((*arr)(2, 1), 3.5);
}
TEST(Array2dTest, LinspaceF8E5M2) {
auto arr = MakeLinspaceArray2D<tsl::float8_e5m2>(1.0, 3.5, 3, 2);
EXPECT_EQ(arr->n1(), 3);
EXPECT_EQ(arr->n2(), 2);
EXPECT_FLOAT_EQ(static_cast<float>((*arr)(0, 0)), 1.0);
EXPECT_FLOAT_EQ(static_cast<float>((*arr)(0, 1)), 1.5);
EXPECT_FLOAT_EQ(static_cast<float>((*arr)(1, 0)), 2.0);
EXPECT_FLOAT_EQ(static_cast<float>((*arr)(1, 1)), 2.5);
EXPECT_FLOAT_EQ(static_cast<float>((*arr)(2, 0)), 3.0);
EXPECT_FLOAT_EQ(static_cast<float>((*arr)(2, 1)), 3.5);
}
TEST(Array2dTest, LinspaceF8E4M3) {
auto arr = MakeLinspaceArray2D<tsl::float8_e4m3>(1.0, 3.5, 3, 2);
EXPECT_EQ(arr->n1(), 3);
EXPECT_EQ(arr->n2(), 2);
EXPECT_FLOAT_EQ(static_cast<float>((*arr)(0, 0)), 1.0);
EXPECT_FLOAT_EQ(static_cast<float>((*arr)(0, 1)), 1.5);
EXPECT_FLOAT_EQ(static_cast<float>((*arr)(1, 0)), 2.0);
EXPECT_FLOAT_EQ(static_cast<float>((*arr)(1, 1)), 2.5);
EXPECT_FLOAT_EQ(static_cast<float>((*arr)(2, 0)), 3.0);
EXPECT_FLOAT_EQ(static_cast<float>((*arr)(2, 1)), 3.5);
}
TEST(Array2dTest, LinspaceF8E4M3Fn) {
auto arr = MakeLinspaceArray2D<tsl::float8_e4m3fn>(1.0, 3.5, 3, 2);
EXPECT_EQ(arr->n1(), 3);
EXPECT_EQ(arr->n2(), 2);
EXPECT_FLOAT_EQ(static_cast<float>((*arr)(0, 0)), 1.0);
EXPECT_FLOAT_EQ(static_cast<float>((*arr)(0, 1)), 1.5);
EXPECT_FLOAT_EQ(static_cast<float>((*arr)(1, 0)), 2.0);
EXPECT_FLOAT_EQ(static_cast<float>((*arr)(1, 1)), 2.5);
EXPECT_FLOAT_EQ(static_cast<float>((*arr)(2, 0)), 3.0);
EXPECT_FLOAT_EQ(static_cast<float>((*arr)(2, 1)), 3.5);
}
TEST(Array2dTest, LinspaceF8E4M3FnNoNan) {
auto arr = MakeLinspaceArray2D<tsl::float8_e4m3fn>(0, 1, 23, 42);
for (int64_t n1 = 0; n1 < arr->n1(); ++n1) {
for (int64_t n2 = 0; n2 < arr->n2(); ++n2) {
EXPECT_EQ((*arr)(n1, n2), (*arr)(n1, n2));
}
}
}
TEST(Array2dTest, LinspaceF8E3M4) {
auto arr = MakeLinspaceArray2D<tsl::float8_e3m4>(1.0, 3.5, 3, 2);
EXPECT_EQ(arr->n1(), 3);
EXPECT_EQ(arr->n2(), 2);
EXPECT_FLOAT_EQ(static_cast<float>((*arr)(0, 0)), 1.0);
EXPECT_FLOAT_EQ(static_cast<float>((*arr)(0, 1)), 1.5);
EXPECT_FLOAT_EQ(static_cast<float>((*arr)(1, 0)), 2.0);
EXPECT_FLOAT_EQ(static_cast<float>((*arr)(1, 1)), 2.5);
EXPECT_FLOAT_EQ(static_cast<float>((*arr)(2, 0)), 3.0);
EXPECT_FLOAT_EQ(static_cast<float>((*arr)(2, 1)), 3.5);
}
TEST(Array2dTest, Stringification) {
auto arr = MakeLinspaceArray2D(1.0, 3.5, 3, 2);
const std::string expected = R"([[1, 1.5],
[2, 2.5],
[3, 3.5]])";
EXPECT_EQ(expected, arr->ToString());
}
TEST(Array2dTest, Equals) {
Array2D<int> arr0 = {{1, 2}, {3, 4}, {5, 6}};
Array2D<int> arr1 = {{1, 2}, {3, 4}, {5, 6}};
EXPECT_TRUE(arr0 == arr1);
EXPECT_FALSE(arr0 != arr1);
EXPECT_TRUE(arr1 == arr0);
EXPECT_FALSE(arr1 != arr0);
Array2D<int> arr2 = {{1, 2}, {3, 4}, {5, 6}, {7, 8}};
EXPECT_TRUE(arr0 != arr2);
EXPECT_FALSE(arr0 == arr2);
EXPECT_TRUE(arr2 != arr0);
EXPECT_FALSE(arr2 == arr0);
Array2D<int> arr3 = {{1, 2, 3}, {4, 5, 6}};
EXPECT_TRUE(arr0 != arr3);
EXPECT_FALSE(arr0 == arr3);
EXPECT_TRUE(arr3 != arr0);
EXPECT_FALSE(arr3 == arr0);
Array2D<int> arr4 = {{1, 2}, {3, 4}};
EXPECT_TRUE(arr0 != arr4);
EXPECT_FALSE(arr0 == arr4);
EXPECT_TRUE(arr4 != arr0);
EXPECT_FALSE(arr4 == arr0);
Array2D<int> arr5 = {{1, 2}, {13, 4}, {5, 6}};
EXPECT_TRUE(arr0 != arr5);
EXPECT_FALSE(arr0 == arr5);
EXPECT_TRUE(arr5 != arr0);
EXPECT_FALSE(arr5 == arr0);
Array2D<bool> bool_arr0 = {{false}, {true}};
Array2D<bool> bool_arr1 = {{false}, {true}};
EXPECT_TRUE(bool_arr0 == bool_arr1);
EXPECT_FALSE(bool_arr0 != bool_arr1);
Array2D<bool> bool_arr2 = {{false}, {false}};
EXPECT_FALSE(bool_arr0 == bool_arr2);
EXPECT_TRUE(bool_arr0 != bool_arr2);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/array2d.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/array2d_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2486c2e3-6592-4428-a86c-aeecaa457f0c | cpp | tensorflow/tensorflow | lru_cache | third_party/xla/xla/pjrt/lru_cache.h | third_party/xla/xla/pjrt/lru_cache_test.cc | #ifndef XLA_PJRT_LRU_CACHE_H_
#define XLA_PJRT_LRU_CACHE_H_
#include <optional>
#include <unordered_map>
#include "absl/container/node_hash_map.h"
#include "tsl/platform/logging.h"
namespace xla {
template <typename Key, typename Value,
typename Hash = typename absl::node_hash_map<Key, Value>::hasher,
typename Eq = typename absl::node_hash_map<Key, Value>::key_equal>
class LRUCache {
private:
struct LRUListEntry {
LRUListEntry* next;
LRUListEntry* prev;
};
public:
class LRUList {
public:
explicit LRUList(int capacity) : capacity_(capacity) {
head_.next = &head_;
head_.prev = &head_;
}
~LRUList() {
CHECK(head_.next == &head_);
CHECK(head_.prev == &head_);
}
LRUList(const LRUList&) = delete;
LRUList(LRUList&&) = delete;
LRUList& operator=(const LRUList&) = delete;
LRUList& operator=(LRUList&&) = delete;
int Capacity() const { return capacity_; }
int Size() const { return size_; }
void Clear();
private:
friend class LRUCache;
int capacity_;
int size_ = 0;
LRUListEntry head_;
};
explicit LRUCache(LRUList* lru_list) : lru_list_(lru_list) {}
~LRUCache();
LRUCache(const LRUCache&) = delete;
LRUCache(LRUCache&&) = delete;
LRUCache& operator=(const LRUCache&) = delete;
LRUCache& operator=(LRUCache&&) = delete;
Value GetOrCreateIfAbsent(const Key& key,
const std::function<Value(const Key&)>& factory);
void Remove(const Key& key);
void Clear();
int Size() const { return entries_.size(); }
int Capacity() const { return lru_list_->Capacity(); }
auto begin() const { return entries_.begin(); }
auto end() const { return entries_.end(); }
private:
LRUList* lru_list_;
struct Entry : public LRUListEntry {
Entry() = default;
const Key* key;
LRUCache* container;
std::optional<Value> value;
};
std::unordered_map<Key, Entry, Hash, Eq> entries_;
};
template <typename Key, typename Value, typename Hash, typename Eq>
void LRUCache<Key, Value, Hash, Eq>::LRUList::Clear() {
while (head_.next != &head_) {
static_cast<Entry*>(head_.next)->container->Clear();
}
size_ = 0;
}
template <typename Key, typename Value, typename Hash, typename Eq>
void LRUCache<Key, Value, Hash, Eq>::Clear() {
for (auto& e : entries_) {
LRUListEntry* l = &e.second;
l->next->prev = l->prev;
l->prev->next = l->next;
--lru_list_->size_;
}
entries_.clear();
}
template <typename Key, typename Value, typename Hash, typename Eq>
LRUCache<Key, Value, Hash, Eq>::~LRUCache() {
Clear();
}
template <typename Key, typename Value, typename Hash, typename Eq>
void LRUCache<Key, Value, Hash, Eq>::Remove(const Key& key) {
LRUListEntry* l = &entries_[key];
l->next->prev = l->prev;
l->prev->next = l->next;
--lru_list_->size_;
entries_.erase(key);
}
template <typename Key, typename Value, typename Hash, typename Eq>
Value LRUCache<Key, Value, Hash, Eq>::GetOrCreateIfAbsent(
const Key& key, const std::function<Value(const Key&)>& factory) {
auto [it, inserted] = entries_.try_emplace(key);
Entry& entry = it->second;
if (inserted) {
entry.key = &it->first;
entry.value = factory(*entry.key);
++lru_list_->size_;
} else {
entry.prev->next = entry.next;
entry.next->prev = entry.prev;
}
LRUListEntry& lru_head = lru_list_->head_;
entry.container = this;
entry.prev = lru_head.prev;
entry.next = &lru_head;
lru_head.prev->next = &entry;
lru_head.prev = &entry;
Value v = *entry.value;
if (lru_list_->size_ > lru_list_->capacity_) {
Entry* to_remove = static_cast<Entry*>(lru_head.next);
to_remove->next->prev = &lru_head;
lru_head.next = to_remove->next;
to_remove->container->entries_.extract(*to_remove->key);
--lru_list_->size_;
}
return v;
}
}
#endif | #include "xla/pjrt/lru_cache.h"
#include <random>
#include "xla/test.h"
namespace xla {
namespace {
TEST(LRUCache, Basics) {
LRUCache<int, int>::LRUList list(3);
LRUCache<int, int> cache(&list);
EXPECT_EQ(3, cache.Capacity());
EXPECT_EQ(0, cache.Size());
EXPECT_EQ(0, cache.GetOrCreateIfAbsent(0, [](int) { return 0; }));
EXPECT_EQ(1, cache.Size());
EXPECT_EQ(1, cache.GetOrCreateIfAbsent(1, [](int) { return 1; }));
EXPECT_EQ(2, cache.Size());
EXPECT_EQ(2, cache.GetOrCreateIfAbsent(2, [](int) { return 2; }));
EXPECT_EQ(3, cache.Size());
EXPECT_EQ(0, cache.GetOrCreateIfAbsent(0, [](int) { return 3; }));
EXPECT_EQ(3, cache.Size());
EXPECT_EQ(4, cache.GetOrCreateIfAbsent(3, [](int) { return 4; }));
EXPECT_EQ(3, cache.Size());
EXPECT_EQ(2, cache.GetOrCreateIfAbsent(2, [](int) { return 5; }));
EXPECT_EQ(3, cache.Size());
EXPECT_EQ(6, cache.GetOrCreateIfAbsent(1, [](int) { return 6; }));
EXPECT_EQ(3, cache.Size());
cache.Clear();
EXPECT_EQ(0, cache.Size());
EXPECT_EQ(6, cache.GetOrCreateIfAbsent(1, [](int) { return 6; }));
EXPECT_EQ(1, cache.Size());
}
TEST(LRUCache, SharedLRUList) {
LRUCache<int, int>::LRUList list(2);
LRUCache<int, int> cache1(&list);
LRUCache<int, int> cache2(&list);
EXPECT_EQ(2, list.Capacity());
EXPECT_EQ(0, cache1.Size());
EXPECT_EQ(0, cache2.Size());
EXPECT_EQ(0, cache1.GetOrCreateIfAbsent(0, [](int) { return 0; }));
EXPECT_EQ(1, list.Size());
EXPECT_EQ(1, cache1.Size());
EXPECT_EQ(0, cache2.Size());
EXPECT_EQ(1, cache2.GetOrCreateIfAbsent(1, [](int) { return 1; }));
EXPECT_EQ(2, list.Size());
EXPECT_EQ(1, cache1.Size());
EXPECT_EQ(1, cache2.Size());
EXPECT_EQ(2, cache1.GetOrCreateIfAbsent(2, [](int) { return 2; }));
EXPECT_EQ(2, list.Size());
EXPECT_EQ(1, cache1.Size());
EXPECT_EQ(1, cache2.Size());
EXPECT_EQ(1, cache2.GetOrCreateIfAbsent(1, [](int) { return -1; }));
EXPECT_EQ(2, list.Size());
EXPECT_EQ(1, cache1.Size());
EXPECT_EQ(1, cache2.Size());
cache1.Clear();
EXPECT_EQ(1, list.Size());
EXPECT_EQ(0, cache1.Size());
EXPECT_EQ(1, cache2.Size());
EXPECT_EQ(1, cache2.GetOrCreateIfAbsent(1, [](int) { return 4; }));
EXPECT_EQ(1, list.Size());
EXPECT_EQ(0, cache1.Size());
EXPECT_EQ(1, cache2.Size());
EXPECT_EQ(7, cache1.GetOrCreateIfAbsent(7, [](int) { return 7; }));
EXPECT_EQ(2, list.Size());
EXPECT_EQ(1, cache1.Size());
EXPECT_EQ(1, cache2.Size());
list.Clear();
EXPECT_EQ(0, list.Size());
EXPECT_EQ(0, cache1.Size());
EXPECT_EQ(0, cache2.Size());
EXPECT_EQ(2, cache1.GetOrCreateIfAbsent(2, [](int) { return 2; }));
}
TEST(LRUCache, RandomInsertions) {
LRUCache<int, int>::LRUList list(7);
LRUCache<int, int> cache(&list);
std::random_device rng;
std::uniform_int_distribution<int> dist(0, 100);
for (int i = 0; i < 1000; ++i) {
EXPECT_LE(cache.Size(), std::min(cache.Capacity(), i));
int key = dist(rng);
int k = -1;
int v = cache.GetOrCreateIfAbsent(key, [&](int k_arg) {
CHECK_EQ(k_arg, key);
k = k_arg;
return k_arg * 37;
});
EXPECT_TRUE(k == -1 || k == key);
EXPECT_EQ(v, key * 37);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/lru_cache.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/lru_cache_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b576bd0b-f67d-4f76-a31b-799406585c21 | cpp | tensorflow/tensorflow | pjrt_c_api | third_party/xla/xla/pjrt/c/pjrt_c_api.h | third_party/xla/xla/pjrt/c/pjrt_c_api_test.cc | #ifndef XLA_PJRT_C_PJRT_C_API_H_
#define XLA_PJRT_C_PJRT_C_API_H_
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#define PJRT_STRUCT_SIZE(struct_type, last_field) \
offsetof(struct_type, last_field) + sizeof(((struct_type*)0)->last_field)
#define PJRT_DEFINE_STRUCT_TRAITS(sname, last_field) \
typedef struct sname sname; \
enum { sname##_STRUCT_SIZE = PJRT_STRUCT_SIZE(sname, last_field) }
#ifdef __cplusplus
extern "C" {
#endif
typedef enum {
PJRT_Extension_Type_Gpu_Custom_Call = 0,
PJRT_Extension_Type_Profiler,
PJRT_Extension_Type_Custom_Partitioner,
PJRT_Extension_Type_Stream,
PJRT_Extension_Type_Layouts,
PJRT_Extension_Type_FFI,
} PJRT_Extension_Type;
typedef struct PJRT_Extension_Base {
size_t struct_size;
PJRT_Extension_Type type;
struct PJRT_Extension_Base* next;
} PJRT_Extension_Base;
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Extension_Base, next);
#define PJRT_API_MAJOR 0
#define PJRT_API_MINOR 55
struct PJRT_Api_Version {
size_t struct_size;
PJRT_Extension_Base* extension_start;
int major_version;
int minor_version;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Api_Version, minor_version);
typedef struct PJRT_Error PJRT_Error;
struct PJRT_Error_Destroy_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Error* error;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Error_Destroy_Args, error);
typedef void PJRT_Error_Destroy(PJRT_Error_Destroy_Args* args);
struct PJRT_Error_Message_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
const PJRT_Error* error;
const char* message;
size_t message_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Error_Message_Args, message_size);
typedef void PJRT_Error_Message(PJRT_Error_Message_Args* args);
typedef enum {
PJRT_Error_Code_CANCELLED = 1,
PJRT_Error_Code_UNKNOWN = 2,
PJRT_Error_Code_INVALID_ARGUMENT = 3,
PJRT_Error_Code_DEADLINE_EXCEEDED = 4,
PJRT_Error_Code_NOT_FOUND = 5,
PJRT_Error_Code_ALREADY_EXISTS = 6,
PJRT_Error_Code_PERMISSION_DENIED = 7,
PJRT_Error_Code_RESOURCE_EXHAUSTED = 8,
PJRT_Error_Code_FAILED_PRECONDITION = 9,
PJRT_Error_Code_ABORTED = 10,
PJRT_Error_Code_OUT_OF_RANGE = 11,
PJRT_Error_Code_UNIMPLEMENTED = 12,
PJRT_Error_Code_INTERNAL = 13,
PJRT_Error_Code_UNAVAILABLE = 14,
PJRT_Error_Code_DATA_LOSS = 15,
PJRT_Error_Code_UNAUTHENTICATED = 16
} PJRT_Error_Code;
struct PJRT_Error_GetCode_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
const PJRT_Error* error;
PJRT_Error_Code code;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Error_GetCode_Args, code);
typedef PJRT_Error* PJRT_Error_GetCode(PJRT_Error_GetCode_Args* args);
typedef PJRT_Error* (*PJRT_CallbackError)(PJRT_Error_Code code,
const char* message,
size_t message_size);
typedef enum {
PJRT_NamedValue_kString = 0,
PJRT_NamedValue_kInt64,
PJRT_NamedValue_kInt64List,
PJRT_NamedValue_kFloat,
PJRT_NamedValue_kBool,
} PJRT_NamedValue_Type;
struct PJRT_NamedValue {
size_t struct_size;
PJRT_Extension_Base* extension_start;
const char* name;
size_t name_size;
PJRT_NamedValue_Type type;
union {
const char* string_value;
int64_t int64_value;
const int64_t* int64_array_value;
float float_value;
bool bool_value;
};
size_t value_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_NamedValue, value_size);
struct PJRT_Plugin_Initialize_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Plugin_Initialize_Args, extension_start);
typedef PJRT_Error* PJRT_Plugin_Initialize(PJRT_Plugin_Initialize_Args* args);
struct PJRT_Plugin_Attributes_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
const PJRT_NamedValue* attributes;
size_t num_attributes;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Plugin_Attributes_Args, attributes);
typedef PJRT_Error* PJRT_Plugin_Attributes(PJRT_Plugin_Attributes_Args* args);
typedef struct PJRT_Event PJRT_Event;
struct PJRT_Event_Destroy_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Event* event;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Event_Destroy_Args, event);
typedef PJRT_Error* PJRT_Event_Destroy(PJRT_Event_Destroy_Args* args);
struct PJRT_Event_IsReady_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Event* event;
bool is_ready;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Event_IsReady_Args, is_ready);
typedef PJRT_Error* PJRT_Event_IsReady(PJRT_Event_IsReady_Args* args);
struct PJRT_Event_Error_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Event* event;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Event_Error_Args, event);
typedef PJRT_Error* PJRT_Event_Error(PJRT_Event_Error_Args* args);
struct PJRT_Event_Await_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Event* event;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Event_Await_Args, event);
typedef PJRT_Error* PJRT_Event_Await(PJRT_Event_Await_Args* args);
typedef void (*PJRT_Event_OnReadyCallback)(PJRT_Error* error, void* user_arg);
struct PJRT_Event_OnReady_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Event* event;
PJRT_Event_OnReadyCallback callback;
void* user_arg;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Event_OnReady_Args, user_arg);
typedef PJRT_Error* PJRT_Event_OnReady(PJRT_Event_OnReady_Args* args);
typedef struct PJRT_Client PJRT_Client;
typedef struct PJRT_Device PJRT_Device;
typedef struct PJRT_Memory PJRT_Memory;
typedef struct PJRT_DeviceDescription PJRT_DeviceDescription;
typedef struct PJRT_TopologyDescription PJRT_TopologyDescription;
typedef struct PJRT_Executable PJRT_Executable;
typedef struct PJRT_LoadedExecutable PJRT_LoadedExecutable;
typedef struct PJRT_Buffer PJRT_Buffer;
typedef void (*PJRT_KeyValueGetCallback_ValueDeleter)(char* value);
struct PJRT_KeyValueGetCallback_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
const char* key;
size_t key_size;
int timeout_in_ms;
PJRT_CallbackError* callback_error;
void* user_arg;
char* value;
size_t value_size;
PJRT_KeyValueGetCallback_ValueDeleter value_deleter_callback;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_KeyValueGetCallback_Args,
value_deleter_callback);
typedef PJRT_Error* (*PJRT_KeyValueGetCallback)(
PJRT_KeyValueGetCallback_Args* args);
struct PJRT_KeyValuePutCallback_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
const char* key;
size_t key_size;
const char* value;
size_t value_size;
PJRT_CallbackError* callback_error;
void* user_arg;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_KeyValuePutCallback_Args, user_arg);
typedef PJRT_Error* (*PJRT_KeyValuePutCallback)(
PJRT_KeyValuePutCallback_Args* args);
struct PJRT_Client_Create_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
const PJRT_NamedValue* create_options;
size_t num_options;
PJRT_KeyValueGetCallback kv_get_callback;
void* kv_get_user_arg;
PJRT_KeyValuePutCallback kv_put_callback;
void* kv_put_user_arg;
PJRT_Client* client;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_Create_Args, client);
typedef PJRT_Error* PJRT_Client_Create(PJRT_Client_Create_Args* args);
struct PJRT_Client_Destroy_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_Destroy_Args, client);
typedef PJRT_Error* PJRT_Client_Destroy(PJRT_Client_Destroy_Args* args);
struct PJRT_Client_PlatformName_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
const char* platform_name;
size_t platform_name_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_PlatformName_Args, platform_name_size);
typedef PJRT_Error* PJRT_Client_PlatformName(
PJRT_Client_PlatformName_Args* args);
struct PJRT_Client_ProcessIndex_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
int process_index;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_ProcessIndex_Args, process_index);
typedef PJRT_Error* PJRT_Client_ProcessIndex(
PJRT_Client_ProcessIndex_Args* args);
struct PJRT_Client_PlatformVersion_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
const char* platform_version;
size_t platform_version_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_PlatformVersion_Args,
platform_version_size);
typedef PJRT_Error* PJRT_Client_PlatformVersion(
PJRT_Client_PlatformVersion_Args* args);
struct PJRT_Client_TopologyDescription_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
PJRT_TopologyDescription* topology;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_TopologyDescription_Args, topology);
typedef PJRT_Error* PJRT_Client_TopologyDescription(
PJRT_Client_TopologyDescription_Args* args);
struct PJRT_Client_Devices_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
PJRT_Device* const* devices;
size_t num_devices;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_Devices_Args, num_devices);
typedef PJRT_Error* PJRT_Client_Devices(PJRT_Client_Devices_Args* args);
struct PJRT_Client_AddressableDevices_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
PJRT_Device* const* addressable_devices;
size_t num_addressable_devices;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_AddressableDevices_Args,
num_addressable_devices);
typedef PJRT_Error* PJRT_Client_AddressableDevices(
PJRT_Client_AddressableDevices_Args* args);
struct PJRT_Client_LookupDevice_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
int id;
PJRT_Device* device;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_LookupDevice_Args, device);
typedef PJRT_Error* PJRT_Client_LookupDevice(
PJRT_Client_LookupDevice_Args* args);
struct PJRT_Client_LookupAddressableDevice_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
int local_hardware_id;
PJRT_Device* addressable_device;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_LookupAddressableDevice_Args,
addressable_device);
typedef PJRT_Error* PJRT_Client_LookupAddressableDevice(
PJRT_Client_LookupAddressableDevice_Args* args);
struct PJRT_Client_AddressableMemories_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
PJRT_Memory* const* addressable_memories;
size_t num_addressable_memories;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_AddressableMemories_Args,
num_addressable_memories);
typedef PJRT_Error* PJRT_Client_AddressableMemories(
PJRT_Client_AddressableMemories_Args* args);
struct PJRT_Program {
size_t struct_size;
PJRT_Extension_Base* extension_start;
char* code;
size_t code_size;
const char* format;
size_t format_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Program, format_size);
struct PJRT_Client_Compile_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
const PJRT_Program* program;
const char* compile_options;
size_t compile_options_size;
PJRT_LoadedExecutable* executable;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_Compile_Args, executable);
typedef PJRT_Error* PJRT_Client_Compile(PJRT_Client_Compile_Args* args);
struct PJRT_Client_DefaultDeviceAssignment_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
int num_replicas;
int num_partitions;
size_t default_assignment_size;
int* default_assignment;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_DefaultDeviceAssignment_Args,
default_assignment);
typedef PJRT_Error* PJRT_Client_DefaultDeviceAssignment(
PJRT_Client_DefaultDeviceAssignment_Args* args);
typedef enum {
PJRT_Buffer_Type_INVALID,
PJRT_Buffer_Type_PRED,
PJRT_Buffer_Type_S8,
PJRT_Buffer_Type_S16,
PJRT_Buffer_Type_S32,
PJRT_Buffer_Type_S64,
PJRT_Buffer_Type_U8,
PJRT_Buffer_Type_U16,
PJRT_Buffer_Type_U32,
PJRT_Buffer_Type_U64,
PJRT_Buffer_Type_F16,
PJRT_Buffer_Type_F32,
PJRT_Buffer_Type_F64,
PJRT_Buffer_Type_BF16,
PJRT_Buffer_Type_C64,
PJRT_Buffer_Type_C128,
PJRT_Buffer_Type_F8E5M2,
PJRT_Buffer_Type_F8E4M3FN,
PJRT_Buffer_Type_F8E4M3B11FNUZ,
PJRT_Buffer_Type_F8E5M2FNUZ,
PJRT_Buffer_Type_F8E4M3FNUZ,
PJRT_Buffer_Type_S4,
PJRT_Buffer_Type_U4,
PJRT_Buffer_Type_TOKEN,
PJRT_Buffer_Type_S2,
PJRT_Buffer_Type_U2,
PJRT_Buffer_Type_F8E4M3,
PJRT_Buffer_Type_F8E3M4,
} PJRT_Buffer_Type;
typedef enum {
PJRT_HostBufferSemantics_kImmutableOnlyDuringCall,
PJRT_HostBufferSemantics_kImmutableUntilTransferCompletes,
PJRT_HostBufferSemantics_kImmutableZeroCopy,
PJRT_HostBufferSemantics_kMutableZeroCopy,
} PJRT_HostBufferSemantics;
typedef enum {
PJRT_Buffer_MemoryLayout_Type_Tiled = 0,
PJRT_Buffer_MemoryLayout_Type_Strides,
} PJRT_Buffer_MemoryLayout_Type;
struct PJRT_Buffer_MemoryLayout_Tiled {
size_t struct_size;
PJRT_Extension_Base* extension_start;
const int64_t* minor_to_major;
size_t minor_to_major_size;
const int64_t* tile_dims;
const size_t* tile_dim_sizes;
size_t num_tiles;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_MemoryLayout_Tiled, num_tiles);
struct PJRT_Buffer_MemoryLayout_Strides {
size_t struct_size;
PJRT_Extension_Base* extension_start;
const int64_t* byte_strides;
size_t num_byte_strides;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_MemoryLayout_Strides, num_byte_strides);
struct PJRT_Buffer_MemoryLayout {
size_t struct_size;
PJRT_Extension_Base* extension_start;
union {
PJRT_Buffer_MemoryLayout_Tiled tiled;
PJRT_Buffer_MemoryLayout_Strides strides;
};
PJRT_Buffer_MemoryLayout_Type type;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_MemoryLayout, type);
struct PJRT_Client_BufferFromHostBuffer_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
const void* data;
PJRT_Buffer_Type type;
const int64_t* dims;
size_t num_dims;
const int64_t* byte_strides;
size_t num_byte_strides;
PJRT_HostBufferSemantics host_buffer_semantics;
PJRT_Device* device;
PJRT_Memory* memory;
PJRT_Buffer_MemoryLayout* device_layout;
PJRT_Event* done_with_host_buffer;
PJRT_Buffer* buffer;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_BufferFromHostBuffer_Args, buffer);
typedef PJRT_Error* PJRT_Client_BufferFromHostBuffer(
PJRT_Client_BufferFromHostBuffer_Args* args);
struct PJRT_Client_CreateViewOfDeviceBuffer_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
void* device_buffer_ptr;
const int64_t* dims;
size_t num_dims;
PJRT_Buffer_Type element_type;
PJRT_Buffer_MemoryLayout* layout;
PJRT_Device* device;
void (*on_delete_callback)(void* device_buffer_ptr, void* user_arg);
void* on_delete_callback_arg;
intptr_t stream;
PJRT_Buffer* buffer;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_CreateViewOfDeviceBuffer_Args, buffer);
typedef PJRT_Error* PJRT_Client_CreateViewOfDeviceBuffer(
PJRT_Client_CreateViewOfDeviceBuffer_Args* args);
struct PJRT_DeviceDescription_Id_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_DeviceDescription* device_description;
int id;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_DeviceDescription_Id_Args, id);
typedef PJRT_Error* PJRT_DeviceDescription_Id(
PJRT_DeviceDescription_Id_Args* args);
struct PJRT_DeviceDescription_ProcessIndex_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_DeviceDescription* device_description;
int process_index;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_DeviceDescription_ProcessIndex_Args,
process_index);
typedef PJRT_Error* PJRT_DeviceDescription_ProcessIndex(
PJRT_DeviceDescription_ProcessIndex_Args* args);
struct PJRT_DeviceDescription_Attributes_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_DeviceDescription* device_description;
size_t num_attributes;
const PJRT_NamedValue* attributes;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_DeviceDescription_Attributes_Args, attributes);
typedef PJRT_Error* PJRT_DeviceDescription_Attributes(
PJRT_DeviceDescription_Attributes_Args* args);
struct PJRT_DeviceDescription_Kind_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_DeviceDescription* device_description;
const char* device_kind;
size_t device_kind_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_DeviceDescription_Kind_Args, device_kind_size);
typedef PJRT_Error* PJRT_DeviceDescription_Kind(
PJRT_DeviceDescription_Kind_Args* args);
struct PJRT_DeviceDescription_DebugString_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_DeviceDescription* device_description;
const char* debug_string;
size_t debug_string_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_DeviceDescription_DebugString_Args,
debug_string_size);
typedef PJRT_Error* PJRT_DeviceDescription_DebugString(
PJRT_DeviceDescription_DebugString_Args* args);
struct PJRT_DeviceDescription_ToString_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_DeviceDescription* device_description;
const char* to_string;
size_t to_string_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_DeviceDescription_ToString_Args, to_string_size);
typedef PJRT_Error* PJRT_DeviceDescription_ToString(
PJRT_DeviceDescription_ToString_Args* args);
struct PJRT_Device_GetDescription_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Device* device;
PJRT_DeviceDescription* device_description;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Device_GetDescription_Args, device_description);
typedef PJRT_Error* PJRT_Device_GetDescription(
PJRT_Device_GetDescription_Args* args);
struct PJRT_Device_IsAddressable_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Device* device;
bool is_addressable;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Device_IsAddressable_Args, is_addressable);
typedef PJRT_Error* PJRT_Device_IsAddressable(
PJRT_Device_IsAddressable_Args* args);
struct PJRT_Device_LocalHardwareId_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Device* device;
int local_hardware_id;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Device_LocalHardwareId_Args, local_hardware_id);
typedef PJRT_Error* PJRT_Device_LocalHardwareId(
PJRT_Device_LocalHardwareId_Args* args);
struct PJRT_Device_AddressableMemories_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Device* device;
PJRT_Memory* const* memories;
size_t num_memories;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Device_AddressableMemories_Args, num_memories);
typedef PJRT_Error* PJRT_Device_AddressableMemories(
PJRT_Device_AddressableMemories_Args* args);
struct PJRT_Device_DefaultMemory_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Device* device;
PJRT_Memory* memory;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Device_DefaultMemory_Args, memory);
typedef PJRT_Error* PJRT_Device_DefaultMemory(
PJRT_Device_DefaultMemory_Args* args);
struct PJRT_Device_MemoryStats_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Device* device;
int64_t bytes_in_use;
int64_t peak_bytes_in_use;
bool peak_bytes_in_use_is_set;
int64_t num_allocs;
bool num_allocs_is_set;
int64_t largest_alloc_size;
bool largest_alloc_size_is_set;
int64_t bytes_limit;
bool bytes_limit_is_set;
int64_t bytes_reserved;
bool bytes_reserved_is_set;
int64_t peak_bytes_reserved;
bool peak_bytes_reserved_is_set;
int64_t bytes_reservable_limit;
bool bytes_reservable_limit_is_set;
int64_t largest_free_block_bytes;
bool largest_free_block_bytes_is_set;
int64_t pool_bytes;
bool pool_bytes_is_set;
int64_t peak_pool_bytes;
bool peak_pool_bytes_is_set;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Device_MemoryStats_Args, peak_pool_bytes_is_set);
typedef PJRT_Error* PJRT_Device_MemoryStats(PJRT_Device_MemoryStats_Args* args);
struct PJRT_Memory_Id_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Memory* memory;
int id;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Memory_Id_Args, id);
typedef PJRT_Error* PJRT_Memory_Id(PJRT_Memory_Id_Args* args);
struct PJRT_Memory_Kind_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Memory* memory;
const char* kind;
size_t kind_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Memory_Kind_Args, kind_size);
typedef PJRT_Error* PJRT_Memory_Kind(PJRT_Memory_Kind_Args* args);
struct PJRT_Memory_Kind_Id_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Memory* memory;
int kind_id;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Memory_Kind_Id_Args, kind_id);
typedef PJRT_Error* PJRT_Memory_Kind_Id(PJRT_Memory_Kind_Id_Args* args);
struct PJRT_Memory_DebugString_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Memory* memory;
const char* debug_string;
size_t debug_string_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Memory_DebugString_Args, debug_string_size);
typedef PJRT_Error* PJRT_Memory_DebugString(PJRT_Memory_DebugString_Args* args);
struct PJRT_Memory_ToString_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Memory* memory;
const char* to_string;
size_t to_string_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Memory_ToString_Args, to_string_size);
typedef PJRT_Error* PJRT_Memory_ToString(PJRT_Memory_ToString_Args* args);
struct PJRT_Memory_AddressableByDevices_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Memory* memory;
PJRT_Device* const* devices;
size_t num_devices;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Memory_AddressableByDevices_Args, num_devices);
typedef PJRT_Error* PJRT_Memory_AddressableByDevices(
PJRT_Memory_AddressableByDevices_Args* args);
typedef struct PJRT_ExecuteContext PJRT_ExecuteContext;
struct PJRT_ExecuteContext_Create_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_ExecuteContext* context;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_ExecuteContext_Create_Args, context);
typedef PJRT_Error* PJRT_ExecuteContext_Create(
PJRT_ExecuteContext_Create_Args* args);
struct PJRT_ExecuteContext_Destroy_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_ExecuteContext* context;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_ExecuteContext_Destroy_Args, context);
typedef PJRT_Error* PJRT_ExecuteContext_Destroy(
PJRT_ExecuteContext_Destroy_Args* args);
struct PJRT_Executable_Destroy_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Executable* executable;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_Destroy_Args, executable);
typedef PJRT_Error* PJRT_Executable_Destroy(PJRT_Executable_Destroy_Args* args);
struct PJRT_LoadedExecutable_Destroy_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_LoadedExecutable* executable;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_LoadedExecutable_Destroy_Args, executable);
typedef PJRT_Error* PJRT_LoadedExecutable_Destroy(
PJRT_LoadedExecutable_Destroy_Args* args);
struct PJRT_LoadedExecutable_GetExecutable_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_LoadedExecutable* loaded_executable;
PJRT_Executable* executable;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_LoadedExecutable_GetExecutable_Args, executable);
typedef PJRT_Error* PJRT_LoadedExecutable_GetExecutable(
PJRT_LoadedExecutable_GetExecutable_Args* args);
struct PJRT_Executable_Name_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Executable* executable;
const char* executable_name;
size_t executable_name_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_Name_Args, executable_name_size);
typedef PJRT_Error* PJRT_Executable_Name(PJRT_Executable_Name_Args* args);
struct PJRT_Executable_NumReplicas_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Executable* executable;
size_t num_replicas;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_NumReplicas_Args, num_replicas);
typedef PJRT_Error* PJRT_Executable_NumReplicas(
PJRT_Executable_NumReplicas_Args* args);
struct PJRT_Executable_NumPartitions_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Executable* executable;
size_t num_partitions;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_NumPartitions_Args, num_partitions);
typedef PJRT_Error* PJRT_Executable_NumPartitions(
PJRT_Executable_NumPartitions_Args* args);
struct PJRT_LoadedExecutable_AddressableDevices_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_LoadedExecutable* executable;
PJRT_Device* const* addressable_devices;
size_t num_addressable_devices;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_LoadedExecutable_AddressableDevices_Args,
num_addressable_devices);
typedef PJRT_Error* PJRT_LoadedExecutable_AddressableDevices(
PJRT_LoadedExecutable_AddressableDevices_Args* args);
struct PJRT_Executable_OptimizedProgram_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Executable* executable;
PJRT_Program* program;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_OptimizedProgram_Args, program);
typedef PJRT_Error* PJRT_Executable_OptimizedProgram(
PJRT_Executable_OptimizedProgram_Args* args);
struct PJRT_LoadedExecutable_Delete_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_LoadedExecutable* executable;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_LoadedExecutable_Delete_Args, executable);
typedef PJRT_Error* PJRT_LoadedExecutable_Delete(
PJRT_LoadedExecutable_Delete_Args* args);
struct PJRT_LoadedExecutable_IsDeleted_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_LoadedExecutable* executable;
bool is_deleted;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_LoadedExecutable_IsDeleted_Args, is_deleted);
typedef PJRT_Error* PJRT_LoadedExecutable_IsDeleted(
PJRT_LoadedExecutable_IsDeleted_Args* args);
typedef struct PJRT_Chunk {
void* data;
size_t size;
void (*deleter)(void* data, void* deleter_arg);
void* deleter_arg;
} PJRT_Chunk;
typedef struct PJRT_CopyToDeviceStream PJRT_CopyToDeviceStream;
struct PJRT_TransferMetadata;
typedef PJRT_Error* (*PJRT_SendCallback)(PJRT_Chunk* chunk,
PJRT_CallbackError* callback_error,
size_t total_size_in_bytes, bool done,
void* user_arg);
typedef void (*PJRT_RecvCallback)(PJRT_CopyToDeviceStream* stream,
void* user_arg);
struct PJRT_SendCallbackInfo {
int64_t channel_id;
void* user_arg;
PJRT_SendCallback send_callback;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_SendCallbackInfo, send_callback);
struct PJRT_RecvCallbackInfo {
int64_t channel_id;
void* user_arg;
PJRT_RecvCallback recv_callback;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_RecvCallbackInfo, recv_callback);
struct PJRT_ExecuteOptions {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_SendCallbackInfo** send_callbacks;
PJRT_RecvCallbackInfo** recv_callbacks;
size_t num_send_ops;
size_t num_recv_ops;
int launch_id;
const int64_t* non_donatable_input_indices;
size_t num_non_donatable_input_indices;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_ExecuteOptions, num_non_donatable_input_indices);
struct PJRT_LoadedExecutable_Execute_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_LoadedExecutable* executable;
PJRT_ExecuteOptions* options;
PJRT_Buffer* const* const* argument_lists;
size_t num_devices;
size_t num_args;
PJRT_Buffer** const* output_lists;
PJRT_Event** device_complete_events;
PJRT_Device* execute_device;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_LoadedExecutable_Execute_Args, execute_device);
typedef PJRT_Error* PJRT_LoadedExecutable_Execute(
PJRT_LoadedExecutable_Execute_Args* args);
struct PJRT_Executable_NumOutputs_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Executable* executable;
size_t num_outputs;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_NumOutputs_Args, num_outputs);
typedef PJRT_Error* PJRT_Executable_NumOutputs(
PJRT_Executable_NumOutputs_Args* args);
struct PJRT_Executable_SizeOfGeneratedCodeInBytes_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Executable* executable;
int64_t size_in_bytes;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_SizeOfGeneratedCodeInBytes_Args,
size_in_bytes);
typedef PJRT_Error* PJRT_Executable_SizeOfGeneratedCodeInBytes(
PJRT_Executable_SizeOfGeneratedCodeInBytes_Args* args);
struct PJRT_Executable_Fingerprint_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Executable* executable;
const char* executable_fingerprint;
size_t executable_fingerprint_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_Fingerprint_Args,
executable_fingerprint_size);
typedef PJRT_Error* PJRT_Executable_Fingerprint(
PJRT_Executable_Fingerprint_Args* args);
struct PJRT_Executable_GetCostAnalysis_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Executable* executable;
size_t num_properties;
const PJRT_NamedValue* properties;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_GetCostAnalysis_Args, properties);
typedef PJRT_Error* PJRT_Executable_GetCostAnalysis(
PJRT_Executable_GetCostAnalysis_Args* args);
struct PJRT_Executable_GetCompiledMemoryStats_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Executable* executable;
int64_t generated_code_size_in_bytes;
int64_t argument_size_in_bytes;
int64_t output_size_in_bytes;
int64_t alias_size_in_bytes;
int64_t temp_size_in_bytes;
int64_t host_generated_code_size_in_bytes;
int64_t host_argument_size_in_bytes;
int64_t host_output_size_in_bytes;
int64_t host_alias_size_in_bytes;
int64_t host_temp_size_in_bytes;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_GetCompiledMemoryStats_Args,
host_temp_size_in_bytes);
typedef PJRT_Error* PJRT_Executable_GetCompiledMemoryStats(
PJRT_Executable_GetCompiledMemoryStats_Args* args);
struct PJRT_Executable_OutputElementTypes_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Executable* executable;
PJRT_Buffer_Type* output_types;
size_t num_output_types;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_OutputElementTypes_Args,
num_output_types);
typedef PJRT_Error* PJRT_Executable_OutputElementTypes(
PJRT_Executable_OutputElementTypes_Args* args);
struct PJRT_Executable_OutputDimensions_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Executable* executable;
size_t num_outputs;
const int64_t* dims;
const size_t* dim_sizes;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_OutputDimensions_Args, dim_sizes);
typedef PJRT_Error* PJRT_Executable_OutputDimensions(
PJRT_Executable_OutputDimensions_Args* args);
struct PJRT_Executable_OutputMemoryKinds_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Executable* executable;
size_t num_outputs;
const char* const* memory_kinds;
const size_t* memory_kind_sizes;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_OutputMemoryKinds_Args,
memory_kind_sizes);
typedef PJRT_Error* PJRT_Executable_OutputMemoryKinds(
PJRT_Executable_OutputMemoryKinds_Args* args);
typedef struct PJRT_SerializedExecutable PJRT_SerializedExecutable;
struct PJRT_Executable_Serialize_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
const PJRT_Executable* executable;
const char* serialized_bytes;
size_t serialized_bytes_size;
PJRT_SerializedExecutable* serialized_executable;
void (*serialized_executable_deleter)(
PJRT_SerializedExecutable* exec);
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_Serialize_Args,
serialized_executable_deleter);
typedef PJRT_Error* PJRT_Executable_Serialize(
PJRT_Executable_Serialize_Args* args);
struct PJRT_Executable_DeserializeAndLoad_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
const char* serialized_executable;
size_t serialized_executable_size;
PJRT_LoadedExecutable* loaded_executable;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_DeserializeAndLoad_Args,
loaded_executable);
typedef PJRT_Error* PJRT_Executable_DeserializeAndLoad(
PJRT_Executable_DeserializeAndLoad_Args* args);
struct PJRT_LoadedExecutable_Fingerprint_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_LoadedExecutable* executable;
const char* executable_fingerprint;
size_t executable_fingerprint_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_LoadedExecutable_Fingerprint_Args,
executable_fingerprint_size);
typedef PJRT_Error* PJRT_LoadedExecutable_Fingerprint(
PJRT_LoadedExecutable_Fingerprint_Args* args);
struct PJRT_Buffer_Destroy_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_Destroy_Args, buffer);
typedef PJRT_Error* PJRT_Buffer_Destroy(PJRT_Buffer_Destroy_Args* args);
struct PJRT_Buffer_ElementType_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
PJRT_Buffer_Type type;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_ElementType_Args, type);
typedef PJRT_Error* PJRT_Buffer_ElementType(PJRT_Buffer_ElementType_Args* args);
struct PJRT_Buffer_Dimensions_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
const int64_t* dims;
size_t num_dims;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_Dimensions_Args, num_dims);
typedef PJRT_Error* PJRT_Buffer_Dimensions(PJRT_Buffer_Dimensions_Args* args);
struct PJRT_Buffer_UnpaddedDimensions_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
const int64_t* unpadded_dims;
size_t num_dims;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_UnpaddedDimensions_Args, num_dims);
typedef PJRT_Error* PJRT_Buffer_UnpaddedDimensions(
PJRT_Buffer_UnpaddedDimensions_Args* args);
struct PJRT_Buffer_DynamicDimensionIndices_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
const size_t* dynamic_dim_indices;
size_t num_dynamic_dims;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_DynamicDimensionIndices_Args,
num_dynamic_dims);
typedef PJRT_Error* PJRT_Buffer_DynamicDimensionIndices(
PJRT_Buffer_DynamicDimensionIndices_Args* args);
struct PJRT_Buffer_GetMemoryLayout_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
PJRT_Buffer_MemoryLayout layout;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_GetMemoryLayout_Args, layout);
typedef PJRT_Error* PJRT_Buffer_GetMemoryLayout(
PJRT_Buffer_GetMemoryLayout_Args* args);
struct PJRT_Buffer_ToHostBuffer_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* src;
PJRT_Buffer_MemoryLayout* host_layout;
void* dst;
size_t dst_size;
PJRT_Event* event;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_ToHostBuffer_Args, event);
typedef PJRT_Error* PJRT_Buffer_ToHostBuffer(
PJRT_Buffer_ToHostBuffer_Args* args);
struct PJRT_Buffer_OnDeviceSizeInBytes_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
size_t on_device_size_in_bytes;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_OnDeviceSizeInBytes_Args,
on_device_size_in_bytes);
typedef PJRT_Error* PJRT_Buffer_OnDeviceSizeInBytes(
PJRT_Buffer_OnDeviceSizeInBytes_Args* args);
struct PJRT_Buffer_Delete_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_Delete_Args, buffer);
typedef PJRT_Error* PJRT_Buffer_Delete(PJRT_Buffer_Delete_Args* args);
struct PJRT_Buffer_IsDeleted_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
bool is_deleted;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_IsDeleted_Args, is_deleted);
typedef PJRT_Error* PJRT_Buffer_IsDeleted(PJRT_Buffer_IsDeleted_Args* args);
struct PJRT_Buffer_CopyToDevice_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
PJRT_Device* dst_device;
PJRT_Buffer* dst_buffer;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_CopyToDevice_Args, dst_buffer);
typedef PJRT_Error* PJRT_Buffer_CopyToDevice(
PJRT_Buffer_CopyToDevice_Args* args);
struct PJRT_Buffer_CopyToMemory_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
PJRT_Memory* dst_memory;
PJRT_Buffer* dst_buffer;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_CopyToMemory_Args, dst_buffer);
typedef PJRT_Error* PJRT_Buffer_CopyToMemory(
PJRT_Buffer_CopyToMemory_Args* args);
struct PJRT_Buffer_IsOnCpu_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
bool is_on_cpu;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_IsOnCpu_Args, is_on_cpu);
typedef PJRT_Error* PJRT_Buffer_IsOnCpu(PJRT_Buffer_IsOnCpu_Args* args);
struct PJRT_Buffer_Device_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
PJRT_Device* device;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_Device_Args, device);
typedef PJRT_Error* PJRT_Buffer_Device(PJRT_Buffer_Device_Args* args);
struct PJRT_Buffer_Memory_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
PJRT_Memory* memory;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_Memory_Args, memory);
typedef PJRT_Error* PJRT_Buffer_Memory(PJRT_Buffer_Memory_Args* args);
struct PJRT_Buffer_ReadyEvent_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
PJRT_Event* event;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_ReadyEvent_Args, event);
typedef PJRT_Error* PJRT_Buffer_ReadyEvent(PJRT_Buffer_ReadyEvent_Args* args);
struct PJRT_Buffer_UnsafePointer_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
uintptr_t buffer_pointer;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_UnsafePointer_Args, buffer_pointer);
typedef PJRT_Error* PJRT_Buffer_UnsafePointer(
PJRT_Buffer_UnsafePointer_Args* args);
struct PJRT_Buffer_IncreaseExternalReferenceCount_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_IncreaseExternalReferenceCount_Args,
buffer);
typedef PJRT_Error* PJRT_Buffer_IncreaseExternalReferenceCount(
PJRT_Buffer_IncreaseExternalReferenceCount_Args* args);
struct PJRT_Buffer_DecreaseExternalReferenceCount_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_DecreaseExternalReferenceCount_Args,
buffer);
typedef PJRT_Error* PJRT_Buffer_DecreaseExternalReferenceCount(
PJRT_Buffer_DecreaseExternalReferenceCount_Args* args);
struct PJRT_Buffer_OpaqueDeviceMemoryDataPointer_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
void* device_memory_ptr;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_OpaqueDeviceMemoryDataPointer_Args,
device_memory_ptr);
typedef PJRT_Error* PJRT_Buffer_OpaqueDeviceMemoryDataPointer(
PJRT_Buffer_OpaqueDeviceMemoryDataPointer_Args* args);
struct PJRT_CopyToDeviceStream_Destroy_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_CopyToDeviceStream* stream;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_CopyToDeviceStream_Destroy_Args, stream);
typedef PJRT_Error* PJRT_CopyToDeviceStream_Destroy(
PJRT_CopyToDeviceStream_Destroy_Args* args);
struct PJRT_CopyToDeviceStream_AddChunk_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_CopyToDeviceStream* stream;
PJRT_Chunk* chunk;
PJRT_Event* transfer_complete;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_CopyToDeviceStream_AddChunk_Args,
transfer_complete);
typedef PJRT_Error* PJRT_CopyToDeviceStream_AddChunk(
PJRT_CopyToDeviceStream_AddChunk_Args* args);
struct PJRT_CopyToDeviceStream_TotalBytes_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_CopyToDeviceStream* stream;
int64_t total_bytes;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_CopyToDeviceStream_TotalBytes_Args, total_bytes);
typedef PJRT_Error* PJRT_CopyToDeviceStream_TotalBytes(
PJRT_CopyToDeviceStream_TotalBytes_Args* args);
struct PJRT_CopyToDeviceStream_GranuleSize_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_CopyToDeviceStream* stream;
int64_t granule_size_in_bytes;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_CopyToDeviceStream_GranuleSize_Args,
granule_size_in_bytes);
typedef PJRT_Error* PJRT_CopyToDeviceStream_GranuleSize(
PJRT_CopyToDeviceStream_GranuleSize_Args* args);
struct PJRT_CopyToDeviceStream_CurrentBytes_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_CopyToDeviceStream* stream;
int64_t current_bytes;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_CopyToDeviceStream_CurrentBytes_Args,
current_bytes);
typedef PJRT_Error* PJRT_CopyToDeviceStream_CurrentBytes(
PJRT_CopyToDeviceStream_CurrentBytes_Args* args);
struct PJRT_TopologyDescription_Create_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
const char* topology_name;
size_t topology_name_size;
const PJRT_NamedValue* create_options;
size_t num_options;
PJRT_TopologyDescription* topology;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_TopologyDescription_Create_Args, topology);
typedef PJRT_Error* PJRT_TopologyDescription_Create(
PJRT_TopologyDescription_Create_Args* args);
struct PJRT_TopologyDescription_Destroy_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_TopologyDescription* topology;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_TopologyDescription_Destroy_Args, topology);
typedef PJRT_Error* PJRT_TopologyDescription_Destroy(
PJRT_TopologyDescription_Destroy_Args* args);
struct PJRT_TopologyDescription_PlatformVersion_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_TopologyDescription* topology;
const char* platform_version;
size_t platform_version_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_TopologyDescription_PlatformVersion_Args,
platform_version_size);
typedef PJRT_Error* PJRT_TopologyDescription_PlatformVersion(
PJRT_TopologyDescription_PlatformVersion_Args* args);
struct PJRT_TopologyDescription_PlatformName_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_TopologyDescription* topology;
const char* platform_name;
size_t platform_name_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_TopologyDescription_PlatformName_Args,
platform_name_size);
typedef PJRT_Error* PJRT_TopologyDescription_PlatformName(
PJRT_TopologyDescription_PlatformName_Args* args);
struct PJRT_TopologyDescription_GetDeviceDescriptions_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_TopologyDescription* topology;
PJRT_DeviceDescription* const* descriptions;
size_t num_descriptions;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_TopologyDescription_GetDeviceDescriptions_Args,
num_descriptions);
typedef PJRT_Error* PJRT_TopologyDescription_GetDeviceDescriptions(
PJRT_TopologyDescription_GetDeviceDescriptions_Args* args);
typedef struct PJRT_SerializedTopology PJRT_SerializedTopology;
struct PJRT_TopologyDescription_Serialize_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_TopologyDescription* topology;
const char* serialized_bytes;
size_t serialized_bytes_size;
PJRT_SerializedTopology* serialized_topology;
void (*serialized_topology_deleter)(
PJRT_SerializedTopology* serialized_topology);
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_TopologyDescription_Serialize_Args,
serialized_topology_deleter);
typedef PJRT_Error* PJRT_TopologyDescription_Serialize(
PJRT_TopologyDescription_Serialize_Args* args);
struct PJRT_TopologyDescription_Attributes_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_TopologyDescription* topology;
const PJRT_NamedValue* attributes;
size_t num_attributes;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_TopologyDescription_Attributes_Args,
num_attributes);
typedef PJRT_Error* PJRT_TopologyDescription_Attributes(
PJRT_TopologyDescription_Attributes_Args* args);
struct PJRT_Compile_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
const PJRT_TopologyDescription* topology;
const PJRT_Program* program;
const char* compile_options;
size_t compile_options_size;
PJRT_Client* client;
PJRT_Executable* executable;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Compile_Args, executable);
typedef PJRT_Error* PJRT_Compile(PJRT_Compile_Args* args);
#define _PJRT_API_STRUCT_FIELD(fn_type) fn_type* fn_type
typedef struct PJRT_Api {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Api_Version pjrt_api_version;
_PJRT_API_STRUCT_FIELD(PJRT_Error_Destroy);
_PJRT_API_STRUCT_FIELD(PJRT_Error_Message);
_PJRT_API_STRUCT_FIELD(PJRT_Error_GetCode);
_PJRT_API_STRUCT_FIELD(PJRT_Plugin_Initialize);
_PJRT_API_STRUCT_FIELD(PJRT_Plugin_Attributes);
_PJRT_API_STRUCT_FIELD(PJRT_Event_Destroy);
_PJRT_API_STRUCT_FIELD(PJRT_Event_IsReady);
_PJRT_API_STRUCT_FIELD(PJRT_Event_Error);
_PJRT_API_STRUCT_FIELD(PJRT_Event_Await);
_PJRT_API_STRUCT_FIELD(PJRT_Event_OnReady);
_PJRT_API_STRUCT_FIELD(PJRT_Client_Create);
_PJRT_API_STRUCT_FIELD(PJRT_Client_Destroy);
_PJRT_API_STRUCT_FIELD(PJRT_Client_PlatformName);
_PJRT_API_STRUCT_FIELD(PJRT_Client_ProcessIndex);
_PJRT_API_STRUCT_FIELD(PJRT_Client_PlatformVersion);
_PJRT_API_STRUCT_FIELD(PJRT_Client_Devices);
_PJRT_API_STRUCT_FIELD(PJRT_Client_AddressableDevices);
_PJRT_API_STRUCT_FIELD(PJRT_Client_LookupDevice);
_PJRT_API_STRUCT_FIELD(PJRT_Client_LookupAddressableDevice);
_PJRT_API_STRUCT_FIELD(PJRT_Client_AddressableMemories);
_PJRT_API_STRUCT_FIELD(PJRT_Client_Compile);
_PJRT_API_STRUCT_FIELD(PJRT_Client_DefaultDeviceAssignment);
_PJRT_API_STRUCT_FIELD(PJRT_Client_BufferFromHostBuffer);
_PJRT_API_STRUCT_FIELD(PJRT_DeviceDescription_Id);
_PJRT_API_STRUCT_FIELD(PJRT_DeviceDescription_ProcessIndex);
_PJRT_API_STRUCT_FIELD(PJRT_DeviceDescription_Attributes);
_PJRT_API_STRUCT_FIELD(PJRT_DeviceDescription_Kind);
_PJRT_API_STRUCT_FIELD(PJRT_DeviceDescription_DebugString);
_PJRT_API_STRUCT_FIELD(PJRT_DeviceDescription_ToString);
_PJRT_API_STRUCT_FIELD(PJRT_Device_GetDescription);
_PJRT_API_STRUCT_FIELD(PJRT_Device_IsAddressable);
_PJRT_API_STRUCT_FIELD(PJRT_Device_LocalHardwareId);
_PJRT_API_STRUCT_FIELD(PJRT_Device_AddressableMemories);
_PJRT_API_STRUCT_FIELD(PJRT_Device_DefaultMemory);
_PJRT_API_STRUCT_FIELD(PJRT_Device_MemoryStats);
_PJRT_API_STRUCT_FIELD(PJRT_Memory_Id);
_PJRT_API_STRUCT_FIELD(PJRT_Memory_Kind);
_PJRT_API_STRUCT_FIELD(PJRT_Memory_DebugString);
_PJRT_API_STRUCT_FIELD(PJRT_Memory_ToString);
_PJRT_API_STRUCT_FIELD(PJRT_Memory_AddressableByDevices);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_Destroy);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_Name);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_NumReplicas);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_NumPartitions);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_NumOutputs);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_SizeOfGeneratedCodeInBytes);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_GetCostAnalysis);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_OutputMemoryKinds);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_OptimizedProgram);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_Serialize);
_PJRT_API_STRUCT_FIELD(PJRT_LoadedExecutable_Destroy);
_PJRT_API_STRUCT_FIELD(PJRT_LoadedExecutable_GetExecutable);
_PJRT_API_STRUCT_FIELD(PJRT_LoadedExecutable_AddressableDevices);
_PJRT_API_STRUCT_FIELD(PJRT_LoadedExecutable_Delete);
_PJRT_API_STRUCT_FIELD(PJRT_LoadedExecutable_IsDeleted);
_PJRT_API_STRUCT_FIELD(PJRT_LoadedExecutable_Execute);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_DeserializeAndLoad);
_PJRT_API_STRUCT_FIELD(PJRT_LoadedExecutable_Fingerprint);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_Destroy);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_ElementType);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_Dimensions);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_UnpaddedDimensions);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_DynamicDimensionIndices);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_GetMemoryLayout);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_OnDeviceSizeInBytes);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_Device);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_Memory);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_Delete);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_IsDeleted);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_CopyToDevice);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_ToHostBuffer);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_IsOnCpu);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_ReadyEvent);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_UnsafePointer);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_IncreaseExternalReferenceCount);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_DecreaseExternalReferenceCount);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_OpaqueDeviceMemoryDataPointer);
_PJRT_API_STRUCT_FIELD(PJRT_CopyToDeviceStream_Destroy);
_PJRT_API_STRUCT_FIELD(PJRT_CopyToDeviceStream_AddChunk);
_PJRT_API_STRUCT_FIELD(PJRT_CopyToDeviceStream_TotalBytes);
_PJRT_API_STRUCT_FIELD(PJRT_CopyToDeviceStream_GranuleSize);
_PJRT_API_STRUCT_FIELD(PJRT_CopyToDeviceStream_CurrentBytes);
_PJRT_API_STRUCT_FIELD(PJRT_TopologyDescription_Create);
_PJRT_API_STRUCT_FIELD(PJRT_TopologyDescription_Destroy);
_PJRT_API_STRUCT_FIELD(PJRT_TopologyDescription_PlatformName);
_PJRT_API_STRUCT_FIELD(PJRT_TopologyDescription_PlatformVersion);
_PJRT_API_STRUCT_FIELD(PJRT_TopologyDescription_GetDeviceDescriptions);
_PJRT_API_STRUCT_FIELD(PJRT_TopologyDescription_Serialize);
_PJRT_API_STRUCT_FIELD(PJRT_TopologyDescription_Attributes);
_PJRT_API_STRUCT_FIELD(PJRT_Compile);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_OutputElementTypes);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_OutputDimensions);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_CopyToMemory);
_PJRT_API_STRUCT_FIELD(PJRT_Client_CreateViewOfDeviceBuffer);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_Fingerprint);
_PJRT_API_STRUCT_FIELD(PJRT_Client_TopologyDescription);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_GetCompiledMemoryStats);
_PJRT_API_STRUCT_FIELD(PJRT_Memory_Kind_Id);
_PJRT_API_STRUCT_FIELD(PJRT_ExecuteContext_Create);
_PJRT_API_STRUCT_FIELD(PJRT_ExecuteContext_Destroy);
} PJRT_Api;
enum {
PJRT_Api_STRUCT_SIZE =
PJRT_STRUCT_SIZE(PJRT_Api, PJRT_Client_TopologyDescription)
};
#undef _PJRT_API_STRUCT_FIELD
#ifdef __cplusplus
}
#endif
#endif | #include "xla/pjrt/c/pjrt_c_api_test.h"
#include <cstddef>
#include <functional>
#include <memory>
#include <numeric>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/thread_annotations.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/client/executable_build_options.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/pjrt/c/pjrt_c_api.h"
#include "xla/pjrt/c/pjrt_c_api_helpers.h"
#include "xla/pjrt/c/pjrt_c_api_test_base.h"
#include "xla/pjrt/compile_options.pb.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/service/computation_placer.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/literal_test_util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status.h"
namespace pjrt {
namespace {
constexpr absl::string_view module_add_one =
R"(module {
func.func @main(%arg0: tensor<f32>) -> tensor<f32> {
%0 = "mhlo.copy"(%arg0) : (tensor<f32>) -> tensor<f32>
%1 = mhlo.constant dense<1.000000e+00> : tensor<f32>
%2 = mhlo.add %0, %1 : tensor<f32>
return %2 : tensor<f32>
}})";
constexpr absl::string_view kHloString =
R"(
HloModule TupleCreate_module:
ENTRY %TupleCreate.v4 (v1: f32[], v2: f32[3], v3: f32[2,3]) -> (f32[], f32[3], f32[2,3]) {
%v1 = f32[] parameter(0)
%v2 = f32[3]{0} parameter(1)
%v3 = f32[2,3]{1,0} parameter(2)
ROOT %tuple = (f32[], f32[3]{0}, f32[2,3]{1,0}) tuple(f32[] %v1, f32[3]{0} %v2, f32[2,3]{1,0} %v3)
}
)";
class TestCApiFactory {
public:
void Register(std::function<const PJRT_Api*()> factory,
absl::string_view platform_name) {
absl::MutexLock lock(&mu_);
CHECK(!factory_);
factory_ = std::move(factory);
CHECK(platform_name_.empty()) << "Platform name already provided";
CHECK(!platform_name.empty()) << "Provided platform name is empty";
platform_name_ = platform_name;
}
std::function<const PJRT_Api*()> Get() const {
absl::MutexLock lock(&mu_);
CHECK(factory_) << "Test didn't call RegisterPjRtCApiTestFactory()";
return factory_;
}
std::string GetPlatformName() const {
absl::MutexLock lock(&mu_);
CHECK(!platform_name_.empty())
<< "Test didn't call RegisterPjRtCApiTestFactory()";
return platform_name_;
}
private:
mutable absl::Mutex mu_;
std::function<const PJRT_Api*()> factory_ ABSL_GUARDED_BY(mu_);
std::string platform_name_;
};
TestCApiFactory& GetGlobalTestCApiFactory() {
static auto* const factory = new TestCApiFactory;
return *factory;
}
const PJRT_Api* GetCApi() { return GetGlobalTestCApiFactory().Get()(); }
std::string GetPlatformName() {
return GetGlobalTestCApiFactory().GetPlatformName();
}
}
void RegisterPjRtCApiTestFactory(std::function<const PJRT_Api*()> factory,
absl::string_view platform_name) {
GetGlobalTestCApiFactory().Register(std::move(factory), platform_name);
}
namespace {
class PjrtCApiTest : public PjrtCApiTestBase {
protected:
PjrtCApiTest() : PjrtCApiTestBase(GetCApi()) {}
std::string platform_name_ = GetPlatformName();
};
TEST_F(PjrtCApiTest, ApiVersion) {
CHECK_EQ(api_->pjrt_api_version.major_version, PJRT_API_MAJOR);
CHECK_EQ(api_->pjrt_api_version.minor_version, PJRT_API_MINOR);
}
TEST_F(PjrtCApiTest, PlatformName) {
PJRT_Client_PlatformName_Args args;
args.client = client_;
args.struct_size = PJRT_Client_PlatformName_Args_STRUCT_SIZE;
args.extension_start = nullptr;
PJRT_Error* error = api_->PJRT_Client_PlatformName(&args);
ASSERT_EQ(error, nullptr);
absl::string_view platform_name(args.platform_name, args.platform_name_size);
ASSERT_EQ(platform_name_, platform_name);
}
TEST_F(PjrtCApiTest, ClientProcessIndex) {
PJRT_Client_ProcessIndex_Args process_index_args =
PJRT_Client_ProcessIndex_Args{
.struct_size = PJRT_Client_ProcessIndex_Args_STRUCT_SIZE,
.extension_start = nullptr,
.client = client_,
.process_index = -1,
};
PJRT_Error* error = api_->PJRT_Client_ProcessIndex(&process_index_args);
CHECK_EQ(error, nullptr);
CHECK_EQ(process_index_args.process_index, 0);
}
TEST_F(PjrtCApiTest, ClientDevices) {
absl::Span<PJRT_Device* const> devices = GetClientDevices();
ASSERT_FALSE(devices.empty());
for (auto& device : devices) {
ASSERT_TRUE(this->IsValidDeviceId(device));
}
}
TEST_F(PjrtCApiTest, ClientAddressableDevices) {
absl::Span<PJRT_Device* const> addressable_devices =
GetClientAddressableDevices();
ASSERT_FALSE(addressable_devices.empty());
for (auto& device : addressable_devices) {
ASSERT_TRUE(this->IsValidDeviceId(device));
}
absl::Span<PJRT_Device* const> client_devices = GetClientDevices();
for (auto& addressable_device : addressable_devices) {
ASSERT_THAT(client_devices, ::testing::Contains(addressable_device));
}
}
TEST_F(PjrtCApiTest, LookupDevice) {
PJRT_Client_LookupDevice_Args lookup_device_args =
PJRT_Client_LookupDevice_Args{
.struct_size = PJRT_Client_LookupDevice_Args_STRUCT_SIZE,
.extension_start = nullptr,
.client = client_,
.id = 0,
.device = nullptr,
};
PJRT_Error* lookup_device_error =
api_->PJRT_Client_LookupDevice(&lookup_device_args);
ASSERT_EQ(lookup_device_error, nullptr);
int id = GetDeviceId(lookup_device_args.device);
ASSERT_EQ(id, 0);
}
TEST_F(PjrtCApiTest, LookupAddressableDevice) {
PJRT_Client_LookupAddressableDevice_Args lookup_addressable_device_args =
PJRT_Client_LookupAddressableDevice_Args{
.struct_size = PJRT_Client_LookupAddressableDevice_Args_STRUCT_SIZE,
.extension_start = nullptr,
.client = client_,
.local_hardware_id = 0,
.addressable_device = nullptr,
};
PJRT_Error* lookup_addressable_device_error =
api_->PJRT_Client_LookupAddressableDevice(
&lookup_addressable_device_args);
ASSERT_EQ(lookup_addressable_device_error, nullptr);
int local_hardware_id =
GetLocalHardwareId(lookup_addressable_device_args.addressable_device);
ASSERT_EQ(local_hardware_id, 0);
}
TEST_F(PjrtCApiTest, GetDefaultDeviceAssignmentNominal) {
constexpr int kNumReplicas = 2;
constexpr int kNumPartitions = 1;
std::vector<int> assignment_buffer(kNumReplicas * kNumPartitions);
PJRT_Client_DefaultDeviceAssignment_Args args{
.struct_size = PJRT_Client_DefaultDeviceAssignment_Args_STRUCT_SIZE,
.extension_start = nullptr,
.client = client_,
.num_replicas = kNumReplicas,
.num_partitions = kNumPartitions,
.default_assignment_size = assignment_buffer.size(),
.default_assignment = assignment_buffer.data(),
};
auto error = ToUniquePtr(api_->PJRT_Client_DefaultDeviceAssignment(&args));
EXPECT_EQ(error, nullptr);
}
TEST_F(PjrtCApiTest, GetDefaultDeviceAssignmentBufferTooSmall) {
constexpr int kNumReplicas = 4;
constexpr int kNumPartitions = 2;
constexpr size_t kBufferSize = 7;
std::vector<int> assignment_buffer(kBufferSize);
PJRT_Client_DefaultDeviceAssignment_Args args{
.struct_size = PJRT_Client_DefaultDeviceAssignment_Args_STRUCT_SIZE,
.extension_start = nullptr,
.client = client_,
.num_replicas = kNumReplicas,
.num_partitions = kNumPartitions,
.default_assignment_size = assignment_buffer.size(),
.default_assignment = assignment_buffer.data(),
};
auto error = ToUniquePtr(api_->PJRT_Client_DefaultDeviceAssignment(&args));
ASSERT_NE(error, nullptr);
absl::Status status = ::pjrt::PjrtErrorToStatus(error.get(), api_);
EXPECT_EQ(status.code(), absl::StatusCode::kFailedPrecondition);
EXPECT_EQ(status.message(),
"PJRT_Client_DefaultDeviceAssignment: `default_assignment_size` 7"
" < `num_replicas * num_partitions`, 4 * 2 = 8");
}
TEST_F(PjrtCApiTest, LookupDeviceNegativeId) {
PJRT_Client_LookupDevice_Args args = PJRT_Client_LookupDevice_Args{
.struct_size = PJRT_Client_LookupDevice_Args_STRUCT_SIZE,
.extension_start = nullptr,
.client = client_,
.id = -1,
.device = nullptr,
};
absl::Status expected =
absl::Status(absl::StatusCode::kInvalidArgument,
"No matching device found for device_id -1");
auto error = ToUniquePtr(api_->PJRT_Client_LookupDevice(&args));
ASSERT_NE(error, nullptr);
absl::Status status = ::pjrt::PjrtErrorToStatus(error.get(), api_);
ASSERT_EQ(status, expected);
}
TEST_F(PjrtCApiTest, LookupDeviceOutOfRangeId) {
int out_of_range_id = GetNumDevices();
PJRT_Client_LookupDevice_Args args = PJRT_Client_LookupDevice_Args{
.struct_size = PJRT_Client_LookupDevice_Args_STRUCT_SIZE,
.extension_start = nullptr,
.client = client_,
.id = out_of_range_id,
.device = nullptr,
};
absl::Status expected = absl::Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("No matching device found for device_id ", out_of_range_id));
auto error = ToUniquePtr(api_->PJRT_Client_LookupDevice(&args));
ASSERT_NE(error, nullptr);
absl::Status status = ::pjrt::PjrtErrorToStatus(error.get(), api_);
ASSERT_EQ(status, expected);
}
static constexpr std::string_view kExecutableName = "operation";
void destroy_executable(PJRT_LoadedExecutable* executable,
const PJRT_Api* api) {
PJRT_LoadedExecutable_Destroy_Args args{
.struct_size = PJRT_LoadedExecutable_Destroy_Args_STRUCT_SIZE,
.extension_start = nullptr,
.executable = executable,
};
PJRT_Error* error = api->PJRT_LoadedExecutable_Destroy(&args);
CHECK_EQ(error, nullptr);
}
TEST_F(PjrtCApiTest, BufferTransferImmutableUntilTransferCompletes) {
xla::Shape shape = xla::ShapeUtil::MakeShapeWithType<float>({4});
std::vector<float> float_data(4);
std::iota(float_data.begin(), float_data.end(), 41.0f);
PJRT_Client_BufferFromHostBuffer_Args args = CreateBufferFromHostBufferArgs(
float_data, shape,
xla::PjRtClient::HostBufferSemantics::kImmutableUntilTransferCompletes);
PJRT_Error* error = api_->PJRT_Client_BufferFromHostBuffer(&args);
CHECK_EQ(error, nullptr);
std::unique_ptr<PJRT_Buffer, ::pjrt::PJRT_BufferDeleter> buffer(
args.buffer, ::pjrt::MakeBufferDeleter(api_));
std::unique_ptr<PJRT_Event, ::pjrt::PJRT_EventDeleter> event(
args.done_with_host_buffer, ::pjrt::MakeEventDeleter(api_));
PJRT_Event_Await_Args await_args;
await_args.struct_size = PJRT_Event_Await_Args_STRUCT_SIZE;
await_args.extension_start = nullptr;
await_args.event = event.get();
PJRT_Error* event_error = api_->PJRT_Event_Await(&await_args);
ASSERT_EQ(event_error, nullptr);
}
TEST_F(PjrtCApiTest, Compile) {
PJRT_Client_Compile_Args args = PJRT_Client_Compile_Args{
.struct_size = PJRT_Client_Compile_Args_STRUCT_SIZE,
.extension_start = nullptr,
.client = client_,
};
std::string options_str = BuildSingleDeviceCompileOptionStr();
args.compile_options = options_str.c_str();
args.compile_options_size = options_str.size();
std::string format(::pjrt::kMlirFormat);
std::string program_code{module_add_one};
PJRT_Program program = PJRT_Program{
.struct_size = PJRT_Program_STRUCT_SIZE,
.extension_start = nullptr,
.code = program_code.data(),
.code_size = program_code.length(),
.format = format.c_str(),
.format_size = format.size(),
};
args.program = &program;
PJRT_Error* error = api_->PJRT_Client_Compile(&args);
::pjrt::LogFatalIfPjrtError(error, api_);
ASSERT_EQ(error, nullptr);
destroy_executable(args.executable, api_);
}
TEST_F(PjrtCApiTest, CompileXlaComputation) {
PJRT_Client_Compile_Args args = PJRT_Client_Compile_Args{
.struct_size = PJRT_Client_Compile_Args_STRUCT_SIZE,
.extension_start = nullptr,
.client = client_,
};
xla::DeviceAssignment device_assignment(1, 1);
device_assignment(0, 0) = 0;
xla::DeviceAssignmentProto proto;
device_assignment.Serialize(&proto);
std::string device_assignment_str = proto.SerializeAsString();
std::string options_str = BuildSingleDeviceCompileOptionStr();
args.compile_options = options_str.c_str();
args.compile_options_size = options_str.size();
absl::StatusOr<std::unique_ptr<xla::HloModule>> hlo_module =
xla::ParseAndReturnUnverifiedModule(kHloString);
ASSERT_EQ(hlo_module.ok(), true);
std::string module_str = hlo_module->get()->ToProto().SerializeAsString();
std::string format(::pjrt::kHloFormat);
PJRT_Program program = PJRT_Program{
.struct_size = PJRT_Program_STRUCT_SIZE,
.extension_start = nullptr,
.code = module_str.data(),
.code_size = module_str.size(),
.format = format.c_str(),
.format_size = format.size(),
};
args.program = &program;
PJRT_Error* error = api_->PJRT_Client_Compile(&args);
::pjrt::LogFatalIfPjrtError(error, api_);
ASSERT_EQ(error, nullptr);
destroy_executable(args.executable, api_);
}
TEST_F(PjrtCApiTest, CompileInvalidOption) {
PJRT_Client_Compile_Args args = PJRT_Client_Compile_Args{
.struct_size = PJRT_Client_Compile_Args_STRUCT_SIZE,
.extension_start = nullptr,
.client = client_,
};
std::string options_str = "invalid compile options";
args.compile_options = options_str.c_str();
args.compile_options_size = options_str.size();
std::string format(::pjrt::kMlirFormat);
std::string program_code{module_add_one};
PJRT_Program program = PJRT_Program{
.struct_size = PJRT_Program_STRUCT_SIZE,
.extension_start = nullptr,
.code = program_code.data(),
.code_size = program_code.length(),
.format = format.c_str(),
.format_size = format.size(),
};
args.program = &program;
PJRT_Error* error = api_->PJRT_Client_Compile(&args);
absl::Status status = ::pjrt::PjrtErrorToStatus(error, api_);
EXPECT_EQ(status.code(), absl::StatusCode::kInvalidArgument);
EXPECT_EQ(status.message(),
"PJRT_Client_Compile: failed to deserialize CompileOptionsProto");
destroy_executable(args.executable, api_);
::pjrt::MakeErrorDeleter(api_)(error);
}
TEST_F(PjrtCApiTest, CompileInvalidProgramFormat) {
PJRT_Client_Compile_Args args = PJRT_Client_Compile_Args{
.struct_size = PJRT_Client_Compile_Args_STRUCT_SIZE,
.extension_start = nullptr,
.client = client_,
};
xla::DeviceAssignment device_assignment(1, 1);
device_assignment(0, 0) = 0;
xla::DeviceAssignmentProto proto;
device_assignment.Serialize(&proto);
std::string device_assignment_str = proto.SerializeAsString();
std::string options_str = BuildSingleDeviceCompileOptionStr();
args.compile_options = options_str.c_str();
args.compile_options_size = options_str.size();
std::string format("invalid");
PJRT_Program program = PJRT_Program{
.struct_size = PJRT_Program_STRUCT_SIZE,
.extension_start = nullptr,
.code = nullptr,
.code_size = 0,
.format = format.c_str(),
.format_size = format.size(),
};
args.program = &program;
PJRT_Error* error = api_->PJRT_Client_Compile(&args);
absl::Status status = ::pjrt::PjrtErrorToStatus(error, api_);
EXPECT_EQ(status.code(), absl::StatusCode::kInvalidArgument);
EXPECT_EQ(status.message(), "Unknown program format 'invalid'.");
destroy_executable(args.executable, api_);
::pjrt::MakeErrorDeleter(api_)(error);
}
TEST_F(PjrtCApiTest, DeviceId) {
auto* device = GetClientDevices()[0];
int id = GetDeviceId(device);
CHECK_EQ(id, 0);
}
TEST_F(PjrtCApiTest, DeviceProcessIndex) {
PJRT_DeviceDescription_ProcessIndex_Args args =
PJRT_DeviceDescription_ProcessIndex_Args{
.struct_size = PJRT_DeviceDescription_ProcessIndex_Args_STRUCT_SIZE,
.extension_start = nullptr,
.device_description =
::pjrt::GetDeviceDescription(api_, GetClientDevices()[0]),
.process_index = -1,
};
PJRT_Error* error = api_->PJRT_DeviceDescription_ProcessIndex(&args);
ASSERT_EQ(error, nullptr);
CHECK_EQ(args.process_index, 0);
}
TEST_F(PjrtCApiTest, DeviceIsAddressable) {
PJRT_Device_IsAddressable_Args args = PJRT_Device_IsAddressable_Args{
.struct_size = PJRT_Device_IsAddressable_Args_STRUCT_SIZE,
.extension_start = nullptr,
.device = GetClientDevices()[0],
.is_addressable = false,
};
PJRT_Error* error = api_->PJRT_Device_IsAddressable(&args);
ASSERT_EQ(error, nullptr);
CHECK_EQ(args.is_addressable, true);
}
TEST_F(PjrtCApiTest, DeviceLocalHardwareId) {
PJRT_Device_LocalHardwareId_Args args = PJRT_Device_LocalHardwareId_Args{
.struct_size = PJRT_Device_LocalHardwareId_Args_STRUCT_SIZE,
.extension_start = nullptr,
.device = GetClientDevices()[0],
.local_hardware_id = -1,
};
PJRT_Error* error = api_->PJRT_Device_LocalHardwareId(&args);
ASSERT_EQ(error, nullptr);
CHECK_EQ(args.local_hardware_id, 0);
}
class PjrtCApiBufferTest : public PjrtCApiTest {
protected:
void SetUp() override {
PjrtCApiTest::SetUp();
auto buffer_and_event = create_buffer();
buffer_ = std::move(buffer_and_event.first);
event_ = buffer_and_event.second;
}
void TearDown() override {
TF_CHECK_OK(event_.Await());
buffer_.reset(nullptr);
PjrtCApiTest::TearDown();
}
std::unique_ptr<PJRT_Buffer, ::pjrt::PJRT_BufferDeleter> buffer_;
xla::PjRtFuture<> event_;
};
TEST_F(PjrtCApiBufferTest, IsDeleted) {
PJRT_Buffer_IsDeleted_Args is_deleted_args;
is_deleted_args.struct_size = PJRT_Buffer_IsDeleted_Args_STRUCT_SIZE;
is_deleted_args.extension_start = nullptr;
is_deleted_args.buffer = buffer_.get();
PJRT_Error* is_deleted_error = api_->PJRT_Buffer_IsDeleted(&is_deleted_args);
ASSERT_EQ(is_deleted_error, nullptr);
ASSERT_FALSE(is_deleted_args.is_deleted);
PJRT_Buffer_Delete_Args delete_args;
delete_args.struct_size = PJRT_Buffer_Delete_Args_STRUCT_SIZE;
delete_args.extension_start = nullptr;
delete_args.buffer = buffer_.get();
PJRT_Error* delete_error = api_->PJRT_Buffer_Delete(&delete_args);
ASSERT_EQ(delete_error, nullptr);
is_deleted_error = api_->PJRT_Buffer_IsDeleted(&is_deleted_args);
ASSERT_EQ(is_deleted_error, nullptr);
ASSERT_TRUE(is_deleted_args.is_deleted);
}
TEST_F(PjrtCApiBufferTest, GetOnDeviceSizeInBytes) {
PJRT_Buffer_OnDeviceSizeInBytes_Args args;
args.struct_size = PJRT_Buffer_OnDeviceSizeInBytes_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
PJRT_Error* on_device_size_bytes_error =
api_->PJRT_Buffer_OnDeviceSizeInBytes(&args);
ASSERT_EQ(on_device_size_bytes_error, nullptr);
ASSERT_GT(args.on_device_size_in_bytes, 0);
}
TEST_F(PjrtCApiBufferTest, ReadyEvent) {
PJRT_Buffer_ReadyEvent_Args get_event_args;
get_event_args.struct_size = PJRT_Buffer_ReadyEvent_Args_STRUCT_SIZE;
get_event_args.extension_start = nullptr;
get_event_args.buffer = buffer_.get();
auto error = ToUniquePtr(api_->PJRT_Buffer_ReadyEvent(&get_event_args));
ASSERT_EQ(error, nullptr);
PJRT_Event* event = get_event_args.event;
ASSERT_NE(event, nullptr);
PJRT_Event_Await_Args await_args;
await_args.struct_size = PJRT_Event_Await_Args_STRUCT_SIZE;
await_args.extension_start = nullptr;
await_args.event = event;
error.reset(api_->PJRT_Event_Await(&await_args));
ASSERT_EQ(error, nullptr);
PJRT_Event_IsReady_Args ready_args;
ready_args.struct_size = PJRT_Event_IsReady_Args_STRUCT_SIZE;
ready_args.extension_start = nullptr;
ready_args.event = event;
error.reset(api_->PJRT_Event_IsReady(&ready_args));
ASSERT_EQ(error, nullptr);
EXPECT_TRUE(ready_args.is_ready);
PJRT_Event_Destroy_Args destroy_args;
destroy_args.struct_size = PJRT_Event_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.event = event;
error.reset(api_->PJRT_Event_Destroy(&destroy_args));
EXPECT_EQ(error, nullptr);
}
TEST_F(PjrtCApiBufferTest, ToHostBufferNoHostLayout) {
PJRT_Buffer_ToHostBuffer_Args args;
args.struct_size = PJRT_Buffer_ToHostBuffer_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.src = buffer_.get();
xla::Shape host_shape = xla::ShapeUtil::MakeShape(xla::F32, {4});
auto literal = std::make_shared<xla::Literal>(host_shape);
args.host_layout = nullptr;
args.dst = literal->untyped_data();
args.dst_size = xla::ShapeUtil::ByteSizeOfElements(host_shape);
args.event = nullptr;
PJRT_Error* error = api_->PJRT_Buffer_ToHostBuffer(&args);
xla::PjRtFuture<> transfer_to_host =
::pjrt::ConvertCEventToCppFuture(args.event, api_);
TF_CHECK_OK(transfer_to_host.Await());
EXPECT_EQ(error, nullptr);
ASSERT_EQ(literal->data<float>().size(), 4);
std::vector<float> float_data(4);
std::iota(float_data.begin(), float_data.end(), 41.0f);
EXPECT_TRUE(xla::LiteralTestUtil::Equal(
xla::LiteralUtil::CreateR1<float>(float_data), *literal));
}
TEST_F(PjrtCApiBufferTest, IncreaseAndDecreaseReferenceCount) {
PJRT_Buffer_IncreaseExternalReferenceCount_Args increase_reference_count_args;
increase_reference_count_args.struct_size =
PJRT_Buffer_IncreaseExternalReferenceCount_Args_STRUCT_SIZE;
increase_reference_count_args.extension_start = nullptr;
increase_reference_count_args.buffer = buffer_.get();
PJRT_Error* increase_reference_count_error =
api_->PJRT_Buffer_IncreaseExternalReferenceCount(
&increase_reference_count_args);
EXPECT_EQ(increase_reference_count_error, nullptr);
PJRT_Buffer_DecreaseExternalReferenceCount_Args decrease_reference_count_args;
decrease_reference_count_args.struct_size =
PJRT_Buffer_DecreaseExternalReferenceCount_Args_STRUCT_SIZE;
decrease_reference_count_args.extension_start = nullptr;
decrease_reference_count_args.buffer = buffer_.get();
PJRT_Error* decrease_reference_error =
api_->PJRT_Buffer_DecreaseExternalReferenceCount(
&decrease_reference_count_args);
EXPECT_EQ(decrease_reference_error, nullptr);
}
TEST_F(PjrtCApiBufferTest, DecreaseReferenceCountReturnsError) {
PJRT_Buffer_DecreaseExternalReferenceCount_Args args;
args.struct_size =
PJRT_Buffer_DecreaseExternalReferenceCount_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
auto error =
ToUniquePtr(api_->PJRT_Buffer_DecreaseExternalReferenceCount(&args));
ASSERT_NE(error, nullptr);
absl::Status status = ::pjrt::PjrtErrorToStatus(error.get(), api_);
EXPECT_EQ(status.code(), absl::StatusCode::kInvalidArgument);
EXPECT_EQ(status.message(),
"Attempting to decrease reference on a buffer with zero reference "
"count.");
}
TEST_F(PjrtCApiBufferTest, OpaqueDeviceMemoryDataPointer) {
PJRT_Buffer_OpaqueDeviceMemoryDataPointer_Args args;
args.struct_size = PJRT_Buffer_OpaqueDeviceMemoryDataPointer_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
PJRT_Error* error = api_->PJRT_Buffer_OpaqueDeviceMemoryDataPointer(&args);
EXPECT_EQ(error, nullptr);
EXPECT_NE(args.device_memory_ptr, nullptr);
}
class PjrtCommonCApiHelpersTest : public PjrtCApiTest {};
TEST_F(PjrtCommonCApiHelpersTest, PjrtErrorToStatus) {
EXPECT_TRUE(::pjrt::PjrtErrorToStatus(nullptr, api_).ok());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/c/pjrt_c_api.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/c/pjrt_c_api_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
855867c7-e9c3-4aca-9d4d-c16ccd2ae5a3 | cpp | tensorflow/tensorflow | future | tensorflow/core/tfrt/mlrt/interpreter/future.h | tensorflow/core/tfrt/mlrt/interpreter/future_test.cc | #ifndef TENSORFLOW_CORE_TFRT_MLRT_INTERPRETER_FUTURE_H_
#define TENSORFLOW_CORE_TFRT_MLRT_INTERPRETER_FUTURE_H_
#include <atomic>
#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/log/check.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/context.h"
#include "tfrt/concurrency/async_value.h"
#include "tfrt/concurrency/async_value_ref.h"
namespace mlrt {
namespace future_internal {
void GetArgumentType(void (*)());
template <typename F>
void GetArgumentType(void (F::*)());
template <typename F>
void GetArgumentType(void (F::*)() const);
template <typename Arg>
Arg GetArgumentType(void (*)(Arg));
template <typename F, typename Arg>
Arg GetArgumentType(void (F::*)(Arg));
template <typename F, typename Arg>
Arg GetArgumentType(void (F::*)(Arg) const);
template <typename F>
decltype(GetArgumentType(&F::operator())) GetArgumentType(F);
template <typename F>
using ArgumentType = decltype(GetArgumentType(std::declval<F>()));
template <typename T>
struct ArgTag {};
template <typename F, typename T>
ABSL_ATTRIBUTE_ALWAYS_INLINE void InvokeThen(F&& then,
tsl::AsyncValue* shared_state,
ArgTag<T>) {
auto& arg = shared_state->get<T>();
if (shared_state->IsUnique()) {
std::forward<F>(then)(std::move(arg));
} else {
std::forward<F>(then)(arg);
}
}
template <typename F>
ABSL_ATTRIBUTE_ALWAYS_INLINE void InvokeThen(F&& then,
tsl::AsyncValue* shared_state,
ArgTag<absl::Status>) {
if (shared_state->IsError()) {
std::forward<F>(then)(shared_state->GetError());
} else {
std::forward<F>(then)(absl::OkStatus());
}
}
template <typename F, typename T>
ABSL_ATTRIBUTE_ALWAYS_INLINE void InvokeThen(F&& then,
tsl::AsyncValue* shared_state,
ArgTag<absl::StatusOr<T>>) {
if (shared_state->IsError()) {
std::forward<F>(then)(shared_state->GetError());
} else {
InvokeThen(std::forward<F>(then), shared_state, ArgTag<T>());
}
}
}
struct Control {};
class Future {
public:
template <typename T>
explicit Future(tsl::AsyncValueRef<T> async_value)
: shared_state_(std::move(async_value)) {}
Future(const Future& other) = default;
Future& operator=(const Future& other) = default;
Future(Future&& other) = default;
Future& operator=(Future&& other) = default;
explicit operator bool() const { return shared_state_ != nullptr; }
bool IsReady() const {
DCHECK(shared_state_);
return shared_state_->IsAvailable();
}
bool IsError() const {
DCHECK(shared_state_);
return shared_state_->IsError();
}
template <typename T>
const T& Get() const {
DCHECK(shared_state_);
return shared_state_->get<T>();
}
const absl::Status& GetError() const {
DCHECK(shared_state_);
return shared_state_->GetError();
}
template <typename F,
typename Arg = std::decay_t<future_internal::ArgumentType<F>>>
typename std::enable_if_t<!std::is_void_v<Arg>, void> Then(F then) && {
DCHECK(shared_state_);
auto* shared_state_ptr = shared_state_.get();
shared_state_ptr->AndThen([shared_state = std::move(shared_state_),
then = std::move(then)]() mutable {
future_internal::InvokeThen(std::move(then), shared_state.get(),
future_internal::ArgTag<Arg>());
});
}
template <typename F,
typename Arg = std::decay_t<future_internal::ArgumentType<F>>>
typename std::enable_if_t<std::is_void_v<Arg>, void> Then(F then) && {
DCHECK(shared_state_);
auto* shared_state_ptr = shared_state_.get();
shared_state_ptr->AndThen(
[shared_state = std::move(shared_state_),
then = std::move(then)]() mutable { std::move(then)(); });
}
size_t UseCount() const {
DCHECK(shared_state_);
return shared_state_->NumRef();
}
private:
friend class Promise;
explicit Future(tsl::RCReference<tsl::AsyncValue> shared_state)
: shared_state_(std::move(shared_state)) {}
tsl::RCReference<tsl::AsyncValue> shared_state_;
};
class Promise {
public:
template <typename T>
static Promise Allocate() {
return Promise(tsl::MakeUnconstructedAsyncValueRef<T>().ReleaseRCRef());
}
~Promise() {
DCHECK(!shared_state_ || shared_state_->IsAvailable())
<< "A non-empty promise must be fulfilled.";
}
Promise(const Promise&) = delete;
Promise& operator=(const Promise&) = delete;
Promise(Promise&&) = default;
Promise& operator=(Promise&&) = default;
Future GetFuture() const { return Future(shared_state_); }
template <typename T, typename... Args>
void Set(Args&&... args) && {
DCHECK(shared_state_);
auto shared_state = std::move(shared_state_);
auto* shared_state_ptr = shared_state.get();
if (!shared_state->IsUnique()) {
shared_state.reset();
}
shared_state_ptr->emplace<T>(std::forward<Args>(args)...);
}
void SetError(absl::Status status) && {
DCHECK(shared_state_);
DCHECK(!status.ok());
shared_state_->SetError(std::move(status));
shared_state_.reset();
}
void HandleError(Value* arg) && {
if (!shared_state_ || shared_state_->IsAvailable()) {
return;
}
auto& execution_context = *arg->Get<ExecutionContext*>();
DCHECK(!execution_context.status().ok());
std::move(*this).SetError(execution_context.status());
}
explicit operator bool() const { return shared_state_ != nullptr; }
private:
explicit Promise(tsl::RCReference<tsl::AsyncValue> shared_state)
: shared_state_(std::move(shared_state)) {}
tsl::RCReference<tsl::AsyncValue> shared_state_;
};
namespace future_internal {
struct State {
State(int size, mlrt::Promise promise)
: count(size), promise(std::move(promise)) {}
std::atomic<int> count;
mlrt::Promise promise;
absl::Mutex mu;
absl::Status status;
void SetError(absl::Status status) {
absl::MutexLock lock(&mu);
this->status = std::move(status);
}
bool DecrementCount() {
if (count.fetch_sub(1, std::memory_order_acq_rel) == 1) {
if (status.ok()) {
std::move(promise).Set<Control>(Control());
} else {
std::move(promise).SetError(std::move(status));
}
return true;
}
return false;
}
};
}
template <typename T, typename FutureLikeContainer, typename ResultRefContainer>
ABSL_ATTRIBUTE_ALWAYS_INLINE Future AwaitAll(FutureLikeContainer futures,
ResultRefContainer results) {
DCHECK(!futures.empty());
auto promise = Promise::Allocate<Control>();
auto await_all = promise.GetFuture();
auto* state = new future_internal::State(futures.size(), std::move(promise));
DCHECK_EQ(futures.size(), results.size());
for (int i = 0; i < futures.size(); ++i) {
auto& future = futures[i];
std::move(future).Then(
[state, result = &results[i]](absl::StatusOr<T> value) {
if (value.ok()) {
result->Set(std::move(*value));
} else {
state->SetError(std::move(value).status());
}
if (state->DecrementCount()) {
delete state;
}
});
}
return await_all;
}
template <typename FutureLikeContainer>
ABSL_ATTRIBUTE_ALWAYS_INLINE Future AwaitAll(FutureLikeContainer futures) {
DCHECK(!futures.empty());
auto promise = Promise::Allocate<Control>();
auto await_all = promise.GetFuture();
auto* state = new future_internal::State(futures.size(), std::move(promise));
for (int i = 0; i < futures.size(); ++i) {
auto& future = futures[i];
std::move(future).Then([state](absl::Status status) {
if (!status.ok()) {
state->SetError(std::move(status));
}
if (state->DecrementCount()) {
delete state;
}
});
}
return await_all;
}
}
#endif | #include "tensorflow/core/tfrt/mlrt/interpreter/future.h"
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/status_matchers.h"
namespace mlrt {
namespace {
TEST(FutureTest, Basic) {
{
auto promise = Promise::Allocate<int>();
auto future = promise.GetFuture();
std::move(promise).Set<int>(1);
EXPECT_FALSE(promise);
ASSERT_TRUE(future);
ASSERT_TRUE(future.IsReady());
EXPECT_EQ(future.Get<int>(), 1);
}
{
auto promise = Promise::Allocate<int>();
auto future = promise.GetFuture();
int u = 0;
ASSERT_TRUE(future);
std::move(future).Then(
[&](absl::StatusOr<int> result) { u = result.value(); });
EXPECT_FALSE(future);
EXPECT_EQ(u, 0);
std::move(promise).Set<int>(1);
EXPECT_EQ(u, 1);
}
{
auto promise = Promise::Allocate<int>();
auto future = promise.GetFuture();
int v = 0;
ASSERT_TRUE(future);
std::move(future).Then([&](int result) { v = result; });
EXPECT_FALSE(future);
EXPECT_EQ(v, 0);
std::move(promise).Set<int>(1);
EXPECT_EQ(v, 1);
}
{
auto promise = Promise::Allocate<int>();
auto future = promise.GetFuture();
int w = 0;
ASSERT_TRUE(future);
std::move(future).Then([&]() { w = 2; });
EXPECT_FALSE(future);
EXPECT_EQ(w, 0);
std::move(promise).Set<int>(1);
EXPECT_EQ(w, 2);
}
{
auto promise = Promise::Allocate<int>();
auto future = promise.GetFuture();
absl::Status s = absl::InternalError("error");
ASSERT_TRUE(future);
std::move(future).Then([&](absl::Status status) { s = status; });
EXPECT_FALSE(future);
EXPECT_FALSE(s.ok());
std::move(promise).Set<int>(1);
EXPECT_TRUE(s.ok());
}
}
TEST(FutureTest, CopyAndMove) {
auto promise = Promise::Allocate<int>();
auto future = promise.GetFuture();
EXPECT_EQ(future.UseCount(), 2);
{
auto copy = future;
EXPECT_EQ(copy.UseCount(), 3);
}
auto move = std::move(future);
EXPECT_EQ(move.UseCount(), 2);
std::move(promise).Set<int>(1);
EXPECT_EQ(move.UseCount(), 1);
}
TEST(FutureTest, CreateFromAsyncValue) {
auto promise = tsl::MakeUnconstructedAsyncValueRef<int>();
mlrt::Future future(promise);
int v = 0;
std::move(future).Then([&](int result) { v = result; });
EXPECT_EQ(v, 0);
promise.emplace(1);
EXPECT_EQ(v, 1);
}
TEST(FutureTest, Error) {
{
auto promise = Promise::Allocate<int>();
auto future = promise.GetFuture();
std::move(promise).SetError(absl::InternalError("test error"));
ASSERT_TRUE(future.IsError());
EXPECT_THAT(
future.GetError(),
::tsl::testing::StatusIs(absl::StatusCode::kInternal, "test error"));
}
{
auto promise = Promise::Allocate<int>();
auto future = promise.GetFuture();
absl::StatusOr<int> r;
std::move(future).Then(
[&](absl::StatusOr<int> result) { r = std::move(result); });
std::move(promise).SetError(absl::InternalError("test error"));
EXPECT_THAT(
r, ::tsl::testing::StatusIs(absl::StatusCode::kInternal, "test error"));
}
{
auto promise = Promise::Allocate<int>();
auto future = promise.GetFuture();
absl::Status s;
std::move(future).Then([&](absl::Status status) { s = std::move(status); });
std::move(promise).SetError(absl::InternalError("test error"));
EXPECT_THAT(
s, ::tsl::testing::StatusIs(absl::StatusCode::kInternal, "test error"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/interpreter/future.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/interpreter/future_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f9ae76a2-0074-419d-8e7e-40f7fcdfd6d3 | cpp | tensorflow/tensorflow | real_time_in_memory_metric | third_party/xla/xla/tsl/framework/real_time_in_memory_metric.h | third_party/xla/xla/tsl/framework/real_time_in_memory_metric_test.cc | #ifndef XLA_TSL_FRAMEWORK_REAL_TIME_IN_MEMORY_METRIC_H_
#define XLA_TSL_FRAMEWORK_REAL_TIME_IN_MEMORY_METRIC_H_
#include <atomic>
namespace tsl {
template <typename T>
class RealTimeInMemoryMetric {
public:
RealTimeInMemoryMetric() : value_(T{0}) {}
T Get() const { return value_.load(std::memory_order_relaxed); }
void Set(T new_value) { value_.store(new_value, std::memory_order_relaxed); }
RealTimeInMemoryMetric(const RealTimeInMemoryMetric&) = delete;
RealTimeInMemoryMetric& operator=(const RealTimeInMemoryMetric&) = delete;
RealTimeInMemoryMetric(RealTimeInMemoryMetric&&) = delete;
RealTimeInMemoryMetric& operator=(RealTimeInMemoryMetric&&) = delete;
static_assert(std::is_arithmetic_v<T>);
private:
std::atomic<T> value_;
};
}
#endif | #include "xla/tsl/framework/real_time_in_memory_metric.h"
#include <cstdint>
#include "tsl/platform/test.h"
namespace tsl {
namespace {
TEST(RealTimeInMemoryMetric, SetAndGet) {
RealTimeInMemoryMetric<int64_t> m;
EXPECT_EQ(m.Get(), 0);
m.Set(100);
EXPECT_EQ(m.Get(), 100);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/framework/real_time_in_memory_metric.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/framework/real_time_in_memory_metric_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6179176a-3ede-48e4-aaad-e68588ded661 | cpp | tensorflow/tensorflow | eigen_spatial_convolutions | tensorflow/lite/kernels/internal/optimized/eigen_spatial_convolutions.h | third_party/xla/xla/tsl/framework/convolution/eigen_spatial_convolutions_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_EIGEN_SPATIAL_CONVOLUTIONS_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_EIGEN_SPATIAL_CONVOLUTIONS_H_
#define EIGEN_USE_CUSTOM_THREAD_POOL
#define EIGEN_USE_THREADS
#define Eigen EigenForTFLite
#define TFLITE_REDUCE_INSTANTIATIONS
#if defined(TFLITE_REDUCE_INSTANTIATIONS)
#define TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \
if (this->m_lhs_inner_dim_contiguous && this->m_rhs_inner_dim_contiguous && \
!this->m_rhs_inner_dim_reordered) { \
METHOD<true, true, false, ALIGNMENT> ARGS; \
} else { \
eigen_assert(false && "Unsupported contraction formats"); \
}
#endif
#include "unsupported/Eigen/CXX11/Tensor"
#include "xla/tsl/framework/convolution/eigen_spatial_convolutions-inl.h"
#endif | #include "xla/tsl/framework/convolution/eigen_spatial_convolutions.h"
#include "absl/strings/str_cat.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace Eigen {
#define EigenApprox(a, b) \
{ ASSERT_TRUE(std::abs(a - b) <= std::min(std::abs(a), std::abs(b)) * 1e-3); }
static int ceil_div(int a, int b) { return (a + b - 1) / b; }
TEST(EigenSpatialConvolutionsTest, Simple) {
const int input_depth = 7;
const int input_rows = 4;
const int input_cols = 5;
const int output_depth = 10;
const int patch_rows = 3;
const int patch_cols = 4;
const int output_rows = input_rows;
const int output_cols = input_cols;
Tensor<float, 3> input(input_depth, input_rows, input_cols);
Tensor<float, 4> kernel(output_depth, input_depth, patch_rows, patch_cols);
Tensor<float, 3> result(output_depth, output_rows, output_cols);
input = input.constant(11.0f) + input.random();
kernel = kernel.constant(2.0f) + kernel.random();
result.setRandom();
result = SpatialConvolution(input, kernel);
EXPECT_EQ(result.dimension(0), output_depth);
EXPECT_EQ(result.dimension(1), output_rows);
EXPECT_EQ(result.dimension(2), output_cols);
for (int od = 0; od < output_depth; ++od) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int id = 0; id < input_depth; ++id) {
if (r - 1 + i >= 0 && c - 1 + j >= 0 && r - 1 + i < output_rows &&
c - 1 + j < output_cols) {
expected +=
input(id, r - 1 + i, c - 1 + j) * kernel(od, id, r, c);
}
}
}
}
EigenApprox(result(od, i, j), expected);
}
}
}
}
TEST(EigenSpatialConvolutionsTest, SimpleRowMajor) {
const int input_depth = 7;
const int input_rows = 4;
const int input_cols = 5;
const int output_depth = 10;
const int patch_rows = 3;
const int patch_cols = 4;
const int output_rows = input_rows;
const int output_cols = input_cols;
Tensor<float, 3, RowMajor> input(input_cols, input_rows, input_depth);
Tensor<float, 4, RowMajor> kernel(patch_cols, patch_rows, input_depth,
output_depth);
Tensor<float, 3, RowMajor> result(output_cols, output_rows, output_depth);
input = input.constant(11.0f) + input.random();
kernel = kernel.constant(2.0f) + kernel.random();
result.setRandom();
result = SpatialConvolution(input, kernel);
EXPECT_EQ(result.dimension(0), output_cols);
EXPECT_EQ(result.dimension(1), output_rows);
EXPECT_EQ(result.dimension(2), output_depth);
for (int od = 0; od < output_depth; ++od) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int id = 0; id < input_depth; ++id) {
if (r - 1 + i >= 0 && c - 1 + j >= 0 && r - 1 + i < output_rows &&
c - 1 + j < output_cols) {
expected +=
input(c - 1 + j, r - 1 + i, id) * kernel(c, r, id, od);
}
}
}
}
EigenApprox(result(j, i, od), expected);
}
}
}
}
TEST(EigenSpatialConvolutionsTest, BatchedSpatialConvolution) {
Tensor<float, 4> input(10, 5, 5, 13);
Tensor<float, 4> kernel(7, 10, 3, 3);
Tensor<float, 4> result(7, 5, 5, 13);
input = input.constant(11.0f) + input.random();
kernel = kernel.constant(2.0f) + kernel.random();
result.setRandom();
result = SpatialConvolution(input, kernel);
EXPECT_EQ(result.dimension(0), 7);
EXPECT_EQ(result.dimension(1), 5);
EXPECT_EQ(result.dimension(2), 5);
for (int b = 0; b < 13; ++b) {
for (int od = 0; od < 7; ++od) {
for (int i = 0; i < 5; ++i) {
for (int j = 0; j < 5; ++j) {
float expected = 0.0f;
for (int c = 0; c < 3; ++c) {
for (int r = 0; r < 3; ++r) {
for (int id = 0; id < 10; ++id) {
if (r - 1 + i >= 0 && c - 1 + j >= 0 && r - 1 + i < 5 &&
c - 1 + j < 5) {
expected +=
input(id, r - 1 + i, c - 1 + j, b) * kernel(od, id, r, c);
}
}
}
}
EigenApprox(result(od, i, j, b), expected);
}
}
}
}
}
TEST(EigenSpatialConvolutionsTest, BatchedSpatialConvolutionRowMajor) {
Tensor<float, 4, RowMajor> input(13, 5, 5, 10);
Tensor<float, 4, RowMajor> kernel(3, 3, 10, 7);
Tensor<float, 4, RowMajor> result(13, 5, 5, 7);
input = input.constant(11.0f) + input.random();
kernel = kernel.constant(2.0f) + kernel.random();
result.setRandom();
result = SpatialConvolution(input, kernel);
EXPECT_EQ(result.dimension(1), 5);
EXPECT_EQ(result.dimension(2), 5);
EXPECT_EQ(result.dimension(3), 7);
for (int b = 0; b < 13; ++b) {
for (int od = 0; od < 7; ++od) {
for (int i = 0; i < 5; ++i) {
for (int j = 0; j < 5; ++j) {
float expected = 0.0f;
for (int c = 0; c < 3; ++c) {
for (int r = 0; r < 3; ++r) {
for (int id = 0; id < 10; ++id) {
if (r - 1 + i >= 0 && c - 1 + j >= 0 && r - 1 + i < 5 &&
c - 1 + j < 5) {
expected +=
input(b, c - 1 + j, r - 1 + i, id) * kernel(c, r, id, od);
}
}
}
}
EigenApprox(result(b, j, i, od), expected);
}
}
}
}
}
TEST(EigenSpatialConvolutionsTest, ValidSpatialConvolution) {
const int input_depth = 10;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int output_depth = 7;
const int patch_rows = 4;
const int patch_cols = 4;
const int output_rows = input_rows - patch_rows + 1;
const int output_cols = input_cols - patch_cols + 1;
Tensor<float, 4> input(input_depth, input_rows, input_cols, num_batches);
Tensor<float, 4> kernel(output_depth, input_depth, patch_rows, patch_cols);
Tensor<float, 4> result(output_depth, output_rows, output_cols, num_batches);
input = input.constant(11.0f) + input.random();
kernel = kernel.constant(2.0f) + kernel.random();
result.setRandom();
const int stride = 1;
result = SpatialConvolution(input, kernel, stride, stride, PADDING_VALID);
EXPECT_EQ(result.dimension(0), output_depth);
EXPECT_EQ(result.dimension(1), output_rows);
EXPECT_EQ(result.dimension(2), output_cols);
EXPECT_EQ(result.dimension(3), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int od = 0; od < output_depth; ++od) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int id = 0; id < input_depth; ++id) {
expected += input(id, r + i, c + j, b) * kernel(od, id, r, c);
}
}
}
if (result(od, i, j, b) != expected) {
std::cout << "at od=" << od << " b=" << b << " i=" << i
<< " j=" << j << " " << result(od, i, j, b) << " vs "
<< expected << std::endl;
}
EigenApprox(result(od, i, j, b), expected);
}
}
}
}
}
TEST(EigenSpatialConvolutionsTest, ValidSpatialConvolutionUnequalStrides) {
const int input_depth = 10;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int output_depth = 7;
const int patch_rows = 4;
const int patch_cols = 4;
const int row_stride = 1;
const int col_stride = 2;
const int output_rows = 2;
const int output_cols = 1;
Tensor<float, 4> input(input_depth, input_rows, input_cols, num_batches);
Tensor<float, 4> kernel(output_depth, input_depth, patch_rows, patch_cols);
Tensor<float, 4> result(output_depth, output_rows, output_cols, num_batches);
input = input.constant(11.0f) + input.random();
kernel = kernel.constant(2.0f) + kernel.random();
result.setRandom();
result =
SpatialConvolution(input, kernel, row_stride, col_stride, PADDING_VALID);
EXPECT_EQ(result.dimension(0), output_depth);
EXPECT_EQ(result.dimension(1), output_rows);
EXPECT_EQ(result.dimension(2), output_cols);
EXPECT_EQ(result.dimension(3), num_batches);
if ( (true)) return;
for (int b = 0; b < num_batches; ++b) {
for (int od = 0; od < output_depth; ++od) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int id = 0; id < input_depth; ++id) {
expected +=
input(id, r + row_stride * i, c + col_stride * j, b) *
kernel(od, id, r, c);
}
}
}
if (result(od, i, j, b) != expected) {
std::cout << "at od=" << od << " b=" << b << " i=" << i
<< " j=" << j << " " << result(od, i, j, b) << " vs "
<< expected << std::endl;
}
EigenApprox(result(od, i, j, b), expected);
}
}
}
}
}
TEST(EigenSpatialConvolutionsTest, ValidSpatialConvolutionRowMajor) {
const int input_depth = 10;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int output_depth = 7;
const int patch_rows = 4;
const int patch_cols = 4;
const int output_rows = input_rows - patch_rows + 1;
const int output_cols = input_cols - patch_cols + 1;
Tensor<float, 4, RowMajor> input(num_batches, input_cols, input_rows,
input_depth);
Tensor<float, 4, RowMajor> kernel(patch_cols, patch_rows, input_depth,
output_depth);
Tensor<float, 4, RowMajor> result(num_batches, output_cols, output_rows,
output_depth);
input = input.constant(11.0f) + input.random();
kernel = kernel.constant(2.0f) + kernel.random();
result.setRandom();
const int stride = 1;
result = SpatialConvolution(input, kernel, stride, stride, PADDING_VALID);
EXPECT_EQ(result.dimension(0), num_batches);
EXPECT_EQ(result.dimension(1), output_cols);
EXPECT_EQ(result.dimension(2), output_rows);
EXPECT_EQ(result.dimension(3), output_depth);
for (int b = 0; b < num_batches; ++b) {
for (int od = 0; od < output_depth; ++od) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_rows; ++c) {
for (int r = 0; r < patch_cols; ++r) {
for (int id = 0; id < input_depth; ++id) {
expected += input(b, c + j, r + i, id) * kernel(c, r, id, od);
}
}
}
if (result(b, j, i, od) != expected) {
std::cout << "at od=" << od << " b=" << b << " i=" << i
<< " j=" << j << " " << result(b, j, i, od) << " vs "
<< expected << std::endl;
}
EigenApprox(result(b, j, i, od), expected);
}
}
}
}
}
TEST(EigenSpatialConvolutionsTest, StridedSpatialConvolution) {
const int input_depth = 10;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int output_depth = 7;
const int patch_rows = 3;
const int patch_cols = 3;
const int output_rows = 2;
const int output_cols = 2;
Tensor<float, 4> input(input_depth, input_rows, input_cols, num_batches);
Tensor<float, 4> kernel(output_depth, input_depth, patch_rows, patch_cols);
Tensor<float, 4> result(output_depth, output_rows, output_cols, num_batches);
input = input.constant(11.0f) + input.random();
kernel = kernel.constant(2.0f) + kernel.random();
result.setRandom();
int stride = 2;
result = SpatialConvolution(input, kernel, stride, stride, PADDING_VALID);
EXPECT_EQ(result.dimension(0), output_depth);
EXPECT_EQ(result.dimension(1), output_rows);
EXPECT_EQ(result.dimension(2), output_cols);
EXPECT_EQ(result.dimension(3), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int od = 0; od < output_depth; ++od) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int id = 0; id < input_depth; ++id) {
expected += input(id, r + stride * i, c + stride * j, b) *
kernel(od, id, r, c);
}
}
}
EigenApprox(result(od, i, j, b), expected);
}
}
}
}
}
TEST(EigenSpatialConvolutionsTest, KernelSmallerThanStride) {
const int input_depth = 2;
const int input_rows = 3;
const int input_cols = 3;
const int num_batches = 5;
const int output_depth = 6;
const int patch_rows = 1;
const int patch_cols = 1;
const int output_rows = 2;
const int output_cols = 2;
Tensor<float, 4> input(input_depth, input_rows, input_cols, num_batches);
Tensor<float, 4> kernel(output_depth, input_depth, patch_rows, patch_cols);
Tensor<float, 4> result(output_depth, output_rows, output_cols, num_batches);
input = input.constant(11.0f) + input.random();
kernel = kernel.constant(2.0f) + kernel.random();
result.setRandom();
int stride = 2;
result = SpatialConvolution(input, kernel, stride, stride, PADDING_VALID);
EXPECT_EQ(result.dimension(0), output_depth);
EXPECT_EQ(result.dimension(1), output_rows);
EXPECT_EQ(result.dimension(2), output_cols);
EXPECT_EQ(result.dimension(3), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int od = 0; od < output_depth; ++od) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int id = 0; id < input_depth; ++id) {
expected += input(id, r + stride * i, c + stride * j, b) *
kernel(od, id, r, c);
}
}
}
EigenApprox(result(od, i, j, b), expected);
}
}
}
}
}
TEST(EigenSpatialConvolutionsTest, StridedSpatialConvolutionRowMajor) {
const int input_depth = 10;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int output_depth = 7;
const int patch_rows = 3;
const int patch_cols = 3;
const int output_rows = 2;
const int output_cols = 2;
Tensor<float, 4, RowMajor> input(num_batches, input_cols, input_rows,
input_depth);
Tensor<float, 4, RowMajor> kernel(patch_cols, patch_rows, input_depth,
output_depth);
Tensor<float, 4, RowMajor> result(num_batches, output_cols, output_rows,
output_depth);
input = input.constant(11.0f) + input.random();
kernel = kernel.constant(2.0f) + kernel.random();
result.setRandom();
int stride = 2;
result = SpatialConvolution(input, kernel, stride, stride, PADDING_VALID);
EXPECT_EQ(result.dimension(0), num_batches);
EXPECT_EQ(result.dimension(1), output_cols);
EXPECT_EQ(result.dimension(2), output_rows);
EXPECT_EQ(result.dimension(3), output_depth);
for (int b = 0; b < num_batches; ++b) {
for (int od = 0; od < output_depth; ++od) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int id = 0; id < input_depth; ++id) {
expected += input(b, c + stride * j, r + stride * i, id) *
kernel(c, r, id, od);
}
}
}
EigenApprox(result(b, j, i, od), expected);
}
}
}
}
}
TEST(EigenSpatialConvolutionsTest, AtrousSpatial) {
const int input_depth = 10;
const int input_rows = 7;
const int input_cols = 7;
const int num_batches = 13;
const int output_depth = 7;
const int patch_rows = 3;
const int patch_cols = 3;
const int output_rows = 3;
const int output_cols = 3;
Tensor<float, 4> input(input_depth, input_rows, input_cols, num_batches);
Tensor<float, 4> kernel(output_depth, input_depth, patch_rows, patch_cols);
Tensor<float, 4> result(output_depth, output_rows, output_cols, num_batches);
input = input.constant(11.0f) + input.random();
kernel = kernel.constant(2.0f) + kernel.random();
result.setRandom();
int stride = 1;
int in_stride = 2;
result = SpatialConvolution(input, kernel, stride, stride, PADDING_VALID,
in_stride, in_stride);
EXPECT_EQ(result.dimension(0), output_depth);
EXPECT_EQ(result.dimension(1), output_rows);
EXPECT_EQ(result.dimension(2), output_cols);
EXPECT_EQ(result.dimension(3), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int od = 0; od < output_depth; ++od) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int id = 0; id < input_depth; ++id) {
expected += input(id, in_stride * r + stride * i,
in_stride * c + stride * j, b) *
kernel(od, id, r, c);
}
}
}
EigenApprox(result(od, i, j, b), expected);
}
}
}
}
}
TEST(EigenSpatialConvolutionsTest, AtrousSpatialRowMajor) {
const int input_depth = 10;
const int input_rows = 7;
const int input_cols = 7;
const int num_batches = 13;
const int output_depth = 7;
const int patch_rows = 3;
const int patch_cols = 3;
const int output_rows = 3;
const int output_cols = 3;
Tensor<float, 4, RowMajor> input(num_batches, input_cols, input_rows,
input_depth);
Tensor<float, 4, RowMajor> kernel(patch_cols, patch_rows, input_depth,
output_depth);
Tensor<float, 4, RowMajor> result(num_batches, output_cols, output_rows,
output_depth);
input = input.constant(11.0f) + input.random();
kernel = kernel.constant(2.0f) + kernel.random();
result.setRandom();
int stride = 1;
int in_stride = 2;
result = SpatialConvolution(input, kernel, stride, stride, PADDING_VALID,
in_stride, in_stride);
EXPECT_EQ(result.dimension(0), num_batches);
EXPECT_EQ(result.dimension(1), output_cols);
EXPECT_EQ(result.dimension(2), output_rows);
EXPECT_EQ(result.dimension(3), output_depth);
for (int b = 0; b < num_batches; ++b) {
for (int od = 0; od < output_depth; ++od) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int id = 0; id < input_depth; ++id) {
expected += input(b, in_stride * c + stride * j,
in_stride * r + stride * i, id) *
kernel(c, r, id, od);
}
}
}
EigenApprox(result(b, j, i, od), expected);
}
}
}
}
}
TEST(EigenSpatialConvolutionsTest, AtrousSpatialRowMajorUnequalStrides) {
const int input_depth = 10;
const int input_rows = 7;
const int input_cols = 7;
const int num_batches = 13;
const int output_depth = 7;
const int patch_rows = 3;
const int patch_cols = 3;
const int output_rows = 1;
const int output_cols = 3;
Tensor<float, 4, RowMajor> input(num_batches, input_cols, input_rows,
input_depth);
Tensor<float, 4, RowMajor> kernel(patch_cols, patch_rows, input_depth,
output_depth);
Tensor<float, 4, RowMajor> result(num_batches, output_cols, output_rows,
output_depth);
input = input.constant(11.0f) + input.random();
kernel = kernel.constant(2.0f) + kernel.random();
result.setRandom();
int row_stride = 1;
int col_stride = 2;
int row_in_stride = 3;
int col_in_stride = 1;
result = SpatialConvolution(input, kernel, row_stride, col_stride,
PADDING_VALID, row_in_stride, col_in_stride);
EXPECT_EQ(result.dimension(0), num_batches);
EXPECT_EQ(result.dimension(1), output_cols);
EXPECT_EQ(result.dimension(2), output_rows);
EXPECT_EQ(result.dimension(3), output_depth);
for (int b = 0; b < num_batches; ++b) {
for (int od = 0; od < output_depth; ++od) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int id = 0; id < input_depth; ++id) {
expected += input(b, col_in_stride * c + col_stride * j,
row_in_stride * r + row_stride * i, id) *
kernel(c, r, id, od);
}
}
}
EigenApprox(result(b, j, i, od), expected);
}
}
}
}
}
TEST(EigenSpatialConvolutionsTest, SpatialConvContractionMapper) {
typedef Tensor<float, 1>::DimensionPair DimPair;
Tensor<float, 4> out(1, 1, 2, 1);
Tensor<float, 4> kern(1, 1, 2, 2);
for (int i = 0; i < kern.size(); ++i) {
kern.coeffRef(i) = static_cast<float>(i) + 1;
}
for (int i = 0; i < out.size(); ++i) {
out.coeffRef(i) = static_cast<float>(i) + 1;
}
DSizes<ptrdiff_t, 4> strides;
strides[0] = 1;
strides[1] = 2;
strides[2] = 2;
strides[3] = 1;
array<std::pair<ptrdiff_t, ptrdiff_t>, 4> paddings;
paddings[0] = std::make_pair(0, 0);
paddings[1] = std::make_pair(1, 2);
paddings[2] = std::make_pair(1, 1);
paddings[3] = std::make_pair(0, 0);
DSizes<ptrdiff_t, 3> out_dim;
out_dim[0] = 1;
out_dim[1] = 4;
out_dim[2] = 12;
array<bool, 4> kernel_reverse;
kernel_reverse[0] = false;
kernel_reverse[1] = false;
kernel_reverse[2] = true;
kernel_reverse[3] = true;
DSizes<ptrdiff_t, 3> k_dims;
k_dims[0] = 1;
k_dims[1] = 1;
k_dims[2] = 4;
array<DimPair, 2> contract_dims;
contract_dims[0] = DimPair(0, 0);
contract_dims[1] = DimPair(2, 1);
DSizes<ptrdiff_t, 4> in_dim;
in_dim[0] = 1;
in_dim[1] = 3;
in_dim[2] = 4;
in_dim[3] = 1;
DSizes<ptrdiff_t, 2> in_dbg_dim;
in_dbg_dim[0] = 3;
in_dbg_dim[1] = 4;
DSizes<ptrdiff_t, 2> out_dbg_dim;
out_dbg_dim[0] = 4;
out_dbg_dim[1] = 12;
Tensor<float, 4> direct =
kern.reverse(kernel_reverse)
.reshape(k_dims)
.contract(
out.extract_image_patches(2, 2, 1, 1, 1, 1, 2, 2, 1, 2, 1, 1, 0)
.reshape(out_dim),
contract_dims)
.reshape(in_dim);
Tensor<float, 4> indirect =
kern.reverse(kernel_reverse)
.reshape(k_dims)
.contract(
out.inflate(strides)
.pad(paddings)
.extract_image_patches(2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0)
.reshape(out_dim),
contract_dims)
.reshape(in_dim);
eigen_assert(dimensions_match(direct.dimensions(), indirect.dimensions()));
for (size_t i = 0; i < direct.dimensions().TotalSize(); ++i) {
EigenApprox(direct.data()[i], indirect.data()[i]);
}
EigenApprox(1.0f, direct(0, 0, 0, 0));
EigenApprox(3.0f, direct(0, 0, 1, 0));
EigenApprox(2.0f, direct(0, 0, 2, 0));
EigenApprox(6.0f, direct(0, 0, 3, 0));
EigenApprox(2.0f, direct(0, 1, 0, 0));
EigenApprox(4.0f, direct(0, 1, 1, 0));
EigenApprox(4.0f, direct(0, 1, 2, 0));
EigenApprox(8.0f, direct(0, 1, 3, 0));
}
template <typename T>
static void PackRhsHelper(::testing::benchmark::State& state,
int input_batches, int input_cols, int input_rows,
int input_depth,
int filter_count, int filter_cols, int filter_rows,
Eigen::PaddingType padding,
int col_strides, int row_strides,
int patch_col_inflate_stride,
int patch_row_inflate_stride,
Index block_rows, Index block_cols) {
srand(12345);
using Dimensions = Eigen::DSizes<Eigen::Index, 4>;
Dimensions input_dims(input_depth, input_rows, input_cols, input_batches);
static const int packet_size = Eigen::internal::packet_traits<T>::size;
using NewDimension = Eigen::DSizes<Index, 2>;
using nocontract_t = Eigen::array<Eigen::Index, 1>;
using contract_t = Eigen::array<Eigen::Index, 1>;
using ArgType = TensorMap<Tensor<T, 4>, Eigen::Aligned>;
using Evaluator = TensorEvaluator<
const TensorReshapingOp<
NewDimension, const TensorImagePatchOp<Dynamic, Dynamic, ArgType>>,
Eigen::DefaultDevice>;
using InputMapper = Eigen::internal::TensorContractionInputMapper<
T, Index, Eigen::internal::Rhs, Evaluator,
nocontract_t, contract_t,
packet_size,
true,
false,
0>;
using SubMapper = Eigen::internal::TensorContractionSubMapper<
T, Index, Eigen::internal::Rhs, Evaluator,
nocontract_t, contract_t,
packet_size,
true,
false,
0>;
#if defined(TENSORFLOW_USE_MKLDNN_CONTRACTION_KERNEL)
using PackRhsImpl =
Eigen::internal::gemm_pack_colmajor_block<T, Eigen::Index, SubMapper,
ColMajor>;
#else
using Traits = typename Eigen::internal::gebp_traits<T, T>;
using PackRhsImpl =
Eigen::internal::gemm_pack_rhs<T, Eigen::Index, SubMapper,
Traits::nr,
ColMajor,
false,
false>;
#endif
Eigen::DefaultDevice device;
const Eigen::Index not_important = -1234;
nocontract_t nocontract_dim = {not_important};
contract_t contract_dim = {not_important};
Tensor<T, 4> packed(input_dims);
size_t input_bytes = input_dims.TotalSize() * sizeof(T);
size_t mem_size_bytes = 1024 * 1024 * 512;
size_t num_inputs =
std::max(static_cast<size_t>(1), mem_size_bytes / input_bytes);
std::vector<Tensor<T, 4>> inputs;
std::vector<Evaluator> evaluators;
std::vector<InputMapper> input_mappers;
inputs.reserve(num_inputs);
evaluators.reserve(num_inputs);
input_mappers.reserve(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
inputs.emplace_back(input_dims);
inputs[i].setRandom();
ArgType tensor_map(inputs[i].data(), input_dims);
const auto image_patch_op = TensorImagePatchOp<Dynamic, Dynamic, ArgType>(
tensor_map,
filter_rows, filter_cols,
row_strides, col_strides,
1, 1,
patch_row_inflate_stride, patch_col_inflate_stride,
padding, 0.0);
Index input_rows_eff = (input_rows - 1) * patch_row_inflate_stride + 1;
Index input_cols_eff = (input_cols - 1) * patch_col_inflate_stride + 1;
Index output_rows = 0;
Index output_cols = 0;
if (padding == Eigen::PADDING_SAME) {
output_rows = input_rows_eff / row_strides;
output_cols = input_cols_eff / col_strides;
} else if (padding == Eigen::PADDING_VALID) {
output_rows =
numext::ceil((input_rows_eff - filter_rows + 1.f) / row_strides);
output_cols =
numext::ceil((input_cols_eff - filter_cols + 1.f) / col_strides);
} else {
eigen_assert(false && "not supported");
}
NewDimension reshape_dims;
reshape_dims[0] = input_depth * filter_rows * filter_cols;
reshape_dims[1] = output_rows * output_cols * input_batches;
const auto reshape_op =
TensorReshapingOp<NewDimension, decltype(image_patch_op)>(
image_patch_op, reshape_dims);
evaluators.emplace_back(reshape_op, device);
input_mappers.emplace_back(evaluators[i], nocontract_dim, nocontract_dim,
contract_dim, contract_dim);
}
const Index patch_depth = evaluators[0].impl().dimensions()[0];
const Index patch_rows = evaluators[0].impl().dimensions()[1];
const Index patch_cols = evaluators[0].impl().dimensions()[2];
const Index num_patches = evaluators[0].impl().dimensions()[3];
const Index patch_size = patch_depth * patch_rows * patch_cols;
PackRhsImpl pack_rhs;
const Index packed_total_size = input_dims.TotalSize();
const auto round_up = [](const Index idx) {
return (idx / packet_size) * packet_size;
};
for (auto s : state) {
int input_idx =
num_inputs == 1 ? 1 : internal::random<int>(0, num_inputs - 1);
Index depth_offset =
(patch_size > block_rows)
? round_up(internal::random<Index>(0, patch_size - 10))
: 0;
Index col_offset = internal::random<Index>(0, num_patches - 10);
Index depth = std::min(block_rows, patch_size - depth_offset);
Index cols = std::min(block_cols, num_patches - col_offset);
Index packed_size = depth * cols;
Index packed_offset =
internal::random<Index>(0, packed_total_size - packed_size - 1);
SubMapper sub_mapper =
input_mappers[input_idx].getSubMapper(depth_offset, col_offset);
pack_rhs(packed.data() + packed_offset, sub_mapper, depth, cols);
}
state.SetLabel(
absl::StrCat("patch: ", patch_rows, "x", patch_cols, " D", patch_depth,
"; num_patches=", num_patches, " patch_size=", patch_size,
" num_inputs=", num_inputs, " padding=", padding));
}
template <typename T>
static void PackLhsHelper(::testing::benchmark::State& state,
int input_depth,
int filter_count, int filter_cols, int filter_rows,
Index block_rows, Index block_cols) {
srand(12345);
eigen_assert(block_rows <= filter_count);
eigen_assert(block_cols <= input_depth * filter_rows * filter_cols);
using Dimensions = Eigen::DSizes<Eigen::Index, 4>;
Dimensions filter_dims(filter_count, filter_rows, filter_cols, input_depth);
static const int packet_size = Eigen::internal::packet_traits<T>::size;
using NewDimension = Eigen::DSizes<Index, 2>;
using nocontract_t = Eigen::array<Eigen::Index, 1>;
using contract_t = Eigen::array<Eigen::Index, 1>;
using ArgType = TensorMap<Tensor<T, 4>, Eigen::Aligned>;
using Evaluator =
TensorEvaluator<const TensorReshapingOp<NewDimension, ArgType>,
Eigen::DefaultDevice>;
using InputMapper = Eigen::internal::TensorContractionInputMapper<
T, Index, Eigen::internal::Lhs, Evaluator,
nocontract_t, contract_t,
packet_size,
true,
false,
0>;
using SubMapper = Eigen::internal::TensorContractionSubMapper<
T, Index, Eigen::internal::Lhs, Evaluator,
nocontract_t, contract_t,
packet_size,
true,
false,
0>;
#if defined(TENSORFLOW_USE_MKLDNN_CONTRACTION_KERNEL)
using PackLhsImpl =
Eigen::internal::gemm_pack_colmajor_block<T, Eigen::Index, SubMapper,
ColMajor>;
#else
using Traits = typename Eigen::internal::gebp_traits<T, T>;
using PackLhsImpl =
Eigen::internal::gemm_pack_lhs<T, Eigen::Index, SubMapper,
Traits::mr,
Traits::LhsProgress,
typename Traits::LhsPacket4Packing,
ColMajor>;
#endif
Eigen::DefaultDevice device;
NewDimension reshape_dims;
reshape_dims[0] = filter_count;
reshape_dims[1] = input_depth * filter_rows * filter_cols;
nocontract_t nocontract_strides = {1};
contract_t contract_strides = {filter_count};
nocontract_t i_strides = {1};
contract_t k_strides = {1};
Tensor<T, 4> packed(filter_dims);
size_t input_bytes = filter_dims.TotalSize() * sizeof(T);
size_t mem_size_bytes = 1024 * 1024 * 512;
size_t num_filters =
std::max(static_cast<size_t>(1), mem_size_bytes / input_bytes);
std::vector<Tensor<T, 4>> filters;
std::vector<Evaluator> evaluators;
std::vector<InputMapper> input_mappers;
filters.reserve(num_filters);
evaluators.reserve(num_filters);
input_mappers.reserve(num_filters);
for (int i = 0; i < num_filters; ++i) {
filters.emplace_back(filter_dims);
filters[i].setRandom();
ArgType tensor_map(filters[i].data(), filter_dims);
const auto reshape_op =
TensorReshapingOp<NewDimension, ArgType>(tensor_map, reshape_dims);
evaluators.emplace_back(reshape_op, device);
input_mappers.emplace_back(evaluators[i], nocontract_strides, i_strides,
contract_strides, k_strides);
}
PackLhsImpl pack_lhs;
const Index packed_total_size = filter_dims.TotalSize();
const auto round_up = [](const Index idx) {
return (idx / packet_size) * packet_size;
};
const Index max_row = filter_count;
const Index max_col = filter_rows * filter_cols * input_depth;
for (auto s : state) {
int filter_idx =
num_filters == 1 ? 1 : internal::random<int>(0, num_filters - 1);
Index row_offset = round_up(internal::random<Index>(0, max_row - 10));
Index col_offset = round_up(internal::random<Index>(0, max_col - 10));
Index rows = std::min(block_rows, max_row - row_offset);
Index cols = std::min(block_cols, max_col - col_offset);
Index packed_offset = round_up(
internal::random<Index>(0, packed_total_size - rows * cols - 1));
SubMapper sub_mapper =
input_mappers[filter_idx].getSubMapper(row_offset, col_offset);
#if defined(TENSORFLOW_USE_MKLDNN_CONTRACTION_KERNEL)
pack_lhs(packed.data() + packed_offset, sub_mapper, rows, cols);
#else
pack_lhs(packed.data() + packed_offset, sub_mapper, cols, rows);
#endif
}
state.SetLabel(absl::StrCat(
"filter: count=", filter_count, " dims=", filter_rows, "x", filter_cols,
"; input: depth=", input_depth, "; num_filers=", num_filters));
}
#define BM_CONCAT(a, b) a##b
#define BM_RHS_NAME(prefix, T, N, H, W, C, FC, FH, FW, PAD, SH, SW, ISH, ISW, \
BR, BC) \
BM_CONCAT( \
BM_##prefix##_##T##_##N##_##H##x##W##_IC##C##_FC##FC##_##FH##x##FW, \
_##PAD##_s##SH##x##SW##_is##ISH##x##ISW##_B##BR##x##BC)
#define BM_PackRhs(T, N, H, W, C, FC, FH, FW, PAD, SH, SW, ISH, ISW, BR, BC) \
static void BM_RHS_NAME(PackRhs, T, N, H, W, C, FC, FH, FW, PAD, SH, SW, \
ISH, ISW, BR, \
BC)(::testing::benchmark::State & state) { \
PackRhsHelper<T>(state, N, H, W, C, FC, FH, FW, PADDING_##PAD, SH, SW, \
ISH, ISW, BR, BC); \
} \
BENCHMARK(BM_RHS_NAME(PackRhs, T, N, H, W, C, FC, FH, FW, PAD, SH, SW, ISH, \
ISW, BR, BC)) \
->UseRealTime()
BM_PackRhs( float,
32,
64, 64,
32,
64,
5, 5,
VALID,
1, 1,
1, 1,
256, 56);
BM_PackRhs( float,
32,
64, 64,
32,
64,
5, 5,
SAME,
1, 1,
1, 1,
256, 56);
BM_PackRhs( float,
32,
64, 64,
32,
64,
5, 5,
VALID,
2, 2,
1, 1,
256, 56);
BM_PackRhs( float,
32,
64, 64,
32,
64,
5, 5,
SAME,
2, 2,
1, 1,
256, 56);
BM_PackRhs( float,
32,
64, 64,
30,
64,
5, 5,
SAME,
1, 1,
1, 1,
256, 56);
BM_PackRhs( float,
32,
64, 64,
30,
64,
5, 5,
VALID,
1, 1,
1, 1,
256, 56);
BM_PackRhs( float,
32,
64, 64,
30,
64,
5, 5,
SAME,
2, 2,
1, 1,
256, 56);
BM_PackRhs( float,
32,
64, 64,
30,
64,
5, 5,
VALID,
2, 2,
1, 1,
256, 56);
BM_PackRhs( float,
32,
256, 256,
4,
16,
8, 8,
SAME,
1, 1,
1, 1,
256, 56);
BM_PackRhs( float,
32,
256, 256,
4,
16,
8, 8,
VALID,
1, 1,
1, 1,
256, 56);
BM_PackRhs( float,
32,
256, 256,
4,
16,
8, 8,
SAME,
2, 4,
1, 1,
256, 56);
BM_PackRhs( float,
32,
256, 256,
4,
16,
8, 8,
VALID,
2, 4,
1, 1,
256, 56);
BM_PackRhs( float,
32,
64, 64,
4,
16,
3, 3,
SAME,
1, 1,
1, 1,
36, 432);
BM_PackRhs( float,
32,
64, 64,
4,
16,
3, 3,
VALID,
1, 1,
1, 1,
36, 432);
BM_PackRhs( float,
32,
64, 64,
4,
16,
3, 3,
SAME,
2, 2,
1, 1,
36, 432);
BM_PackRhs( float,
32,
64, 64,
4,
16,
3, 3,
VALID,
2, 2,
1, 1,
36, 432);
BM_PackRhs( float,
32,
32, 32,
96,
96,
5, 5,
SAME,
1, 1,
2, 2,
272, 240);
BM_PackRhs( float,
32,
32, 32,
96,
96,
5, 5,
VALID,
1, 1,
2, 2,
272, 240);
#if defined(TENSORFLOW_USE_CUSTOM_CONTRACTION_KERNEL)
using qint8 = Eigen::QInt8;
BM_PackRhs( qint8,
32,
64, 64,
32,
64,
5, 5,
SAME,
1, 1,
1, 1,
256, 56);
#endif
#define BM_LHS_NAME(prefix, T, C, FC, FH, FW, BR, BC) \
BM_CONCAT(BM_##prefix##_##T##_##C##_FC##FC##_##FH##x##FW, _B##BR##x##BC)
#define BM_PackLhs(T, C, FC, FH, FW, BR, BC) \
static void BM_LHS_NAME(PackLhs, T, C, FC, FH, FW, BR, \
BC)(::testing::benchmark::State & state) { \
PackLhsHelper<T>(state, C, FC, FH, FW, BR, BC); \
} \
BENCHMARK(BM_LHS_NAME(PackLhs, T, C, FC, FH, FW, BR, BC))->UseRealTime()
BM_PackLhs( float,
128,
1024,
3, 3,
256, 56);
BM_PackLhs( float,
128,
1024,
3, 3,
56, 256);
BM_PackLhs( float,
30,
64,
3, 3,
256, 56);
BM_PackLhs( float,
50,
64,
3, 3,
56, 256);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/optimized/eigen_spatial_convolutions.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/framework/convolution/eigen_spatial_convolutions_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8961def6-4231-4c6e-8688-9ca6ff3df268 | cpp | tensorflow/tensorflow | concurrent_vector | third_party/xla/xla/tsl/concurrency/concurrent_vector.h | third_party/xla/xla/tsl/concurrency/concurrent_vector_test.cc | #ifndef XLA_TSL_CONCURRENCY_CONCURRENT_VECTOR_H_
#define XLA_TSL_CONCURRENCY_CONCURRENT_VECTOR_H_
#include <algorithm>
#include <array>
#include <atomic>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <vector>
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "tsl/platform/logging.h"
namespace tsl {
namespace internal {
template <typename T>
class ConcurrentVector {
public:
explicit ConcurrentVector(size_t initial_capacity) : state_(0ull) {
auto& v = all_allocated_elements_[0];
v.reserve(std::max(static_cast<size_t>(1), initial_capacity));
}
const T& operator[](size_t index) const {
auto state = State::Decode(state_.load(std::memory_order_acquire));
DCHECK_LT(index, state.size);
return all_allocated_elements_.data()[state.last_allocated].data()[index];
}
absl::Span<const T> ToConstSpan() const {
auto state = State::Decode(state_.load(std::memory_order_acquire));
auto& storage = all_allocated_elements_[state.last_allocated];
return absl::MakeConstSpan(storage.data(), state.size);
}
size_t size() const {
return State::Decode(state_.load(std::memory_order_relaxed)).size;
}
template <typename... Args>
size_t emplace_back(Args&&... args) {
absl::MutexLock lock(&mutex_);
auto state = State::Decode(state_.load(std::memory_order_relaxed));
auto& last = all_allocated_elements_[state.last_allocated];
if (last.size() < last.capacity()) {
last.emplace_back(std::forward<Args>(args)...);
state.size += 1;
state_.store(state.Encode(), std::memory_order_release);
return state.size - 1;
}
auto& new_last = all_allocated_elements_[state.last_allocated + 1];
new_last.reserve(last.capacity() * 2);
DCHECK_EQ(last.size(), last.capacity());
new_last.insert(new_last.begin(), last.begin(), last.end());
new_last.emplace_back(std::forward<Args>(args)...);
state.last_allocated += 1;
state.size += 1;
state_.store(state.Encode(), std::memory_order_release);
return state.size - 1;
}
private:
static constexpr uint64_t kLastAllocatedMask = (1ull << 32) - 1;
static constexpr uint64_t kSizeMask = ((1ull << 32) - 1) << 32;
struct State {
uint64_t last_allocated;
uint64_t size;
static State Decode(uint64_t state) {
uint64_t last_allocated = (state & kLastAllocatedMask);
uint64_t size = (state & kSizeMask) >> 32;
return {last_allocated, size};
}
uint64_t Encode() const { return (size << 32) | last_allocated; }
};
std::atomic<uint64_t> state_;
absl::Mutex mutex_;
std::array<std::vector<T>, 64> all_allocated_elements_;
};
}
}
#endif | #include "xla/tsl/concurrency/concurrent_vector.h"
#include <algorithm>
#include <vector>
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace tsl {
using ::tsl::internal::ConcurrentVector;
TEST(ConcurrentVectorTest, SingleThreaded) {
ConcurrentVector<int> vec(1);
constexpr int kCount = 1000;
for (int i = 0; i < kCount; ++i) {
ASSERT_EQ(i, vec.emplace_back(i));
}
for (int i = 0; i < kCount; ++i) {
EXPECT_EQ(i, vec[i]);
}
}
TEST(ConcurrentVectorTest, OneWriterOneReader) {
ConcurrentVector<int> vec(1);
thread::ThreadPool pool(Env::Default(), "concurrent-vector", 4);
constexpr int kCount = 1000;
pool.Schedule([&] {
for (int i = 0; i < kCount; ++i) {
ASSERT_EQ(i, vec.emplace_back(i));
}
});
pool.Schedule([&] {
for (int i = 0; i < kCount; ++i) {
while (i >= vec.size()) {
}
EXPECT_EQ(i, vec[i]);
}
});
}
TEST(ConcurrentVectorTest, TwoWritersTwoReaders) {
ConcurrentVector<int> vec(1);
thread::ThreadPool pool(Env::Default(), "concurrent-vector", 4);
constexpr int kCount = 1000;
auto writer = [&] {
for (int i = 0; i < kCount / 2; ++i) {
vec.emplace_back(i);
}
};
pool.Schedule(writer);
pool.Schedule(writer);
auto reader = [&] {
std::vector<int> stored;
for (int i = 0; i < kCount; ++i) {
while (i >= vec.size()) {
}
stored.emplace_back(vec[i]);
}
std::sort(stored.begin(), stored.end());
for (int i = 0; i < kCount / 2; ++i) {
ASSERT_EQ(stored[2 * i], i);
ASSERT_EQ(stored[2 * i + 1], i);
}
};
pool.Schedule(reader);
pool.Schedule(reader);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/concurrency/concurrent_vector.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/concurrency/concurrent_vector_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
aae77314-eb0b-4792-8519-6a254ba3f7fb | cpp | tensorflow/tensorflow | bits | tensorflow/lite/experimental/microfrontend/lib/bits.h | third_party/xla/xla/tsl/lib/core/bits_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_BITS_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_BITS_H_
#ifdef __cplusplus
#include <cstdint>
extern "C" {
#endif
static inline int CountLeadingZeros32Slow(uint64_t n) {
int zeroes = 28;
if (n >> 16) zeroes -= 16, n >>= 16;
if (n >> 8) zeroes -= 8, n >>= 8;
if (n >> 4) zeroes -= 4, n >>= 4;
return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes;
}
static inline int CountLeadingZeros32(uint32_t n) {
#if !defined(__clang__) && defined(_MSC_VER)
unsigned long result = 0;
if (_BitScanReverse(&result, n)) {
return 31 - result;
}
return 32;
#elif defined(__clang__) && defined(__GNUC__)
if (n == 0) {
return 32;
}
return __builtin_clz(n);
#else
return CountLeadingZeros32Slow(n);
#endif
}
static inline int MostSignificantBit32(uint32_t n) {
return 32 - CountLeadingZeros32(n);
}
static inline int CountLeadingZeros64Slow(uint64_t n) {
int zeroes = 60;
if (n >> 32) zeroes -= 32, n >>= 32;
if (n >> 16) zeroes -= 16, n >>= 16;
if (n >> 8) zeroes -= 8, n >>= 8;
if (n >> 4) zeroes -= 4, n >>= 4;
return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes;
}
static inline int CountLeadingZeros64(uint64_t n) {
#if !defined(__clang__) && defined(_MSC_VER) && defined(_M_X64)
unsigned long result = 0;
if (_BitScanReverse64(&result, n)) {
return 63 - result;
}
return 64;
#elif !defined(__clang__) && defined(_MSC_VER)
unsigned long result = 0;
if ((n >> 32) && _BitScanReverse(&result, n >> 32)) {
return 31 - result;
}
if (_BitScanReverse(&result, n)) {
return 63 - result;
}
return 64;
#elif defined(__clang__) || defined(__GNUC__)
if (n == 0) {
return 64;
}
return __builtin_clzll(n);
#else
return CountLeadingZeros64Slow(n);
#endif
}
static inline int MostSignificantBit64(uint64_t n) {
return 64 - CountLeadingZeros64(n);
}
#ifdef __cplusplus
}
#endif
#endif | #include "xla/tsl/lib/core/bits.h"
#include <cstdint>
#include "tsl/platform/test.h"
namespace tsl {
namespace {
TEST(BitsTest, NextPowerOfTwoS64) {
constexpr int64_t kMaxRepresentablePowerOfTwo =
static_cast<int64_t>(uint64_t{1} << 62);
EXPECT_EQ(NextPowerOfTwoS64(0), 1);
EXPECT_EQ(NextPowerOfTwoS64(1), 1);
EXPECT_EQ(NextPowerOfTwoS64(2), 2);
EXPECT_EQ(NextPowerOfTwoS64(3), 4);
EXPECT_EQ(NextPowerOfTwoS64(kMaxRepresentablePowerOfTwo - 1),
kMaxRepresentablePowerOfTwo);
EXPECT_EQ(NextPowerOfTwoS64(kMaxRepresentablePowerOfTwo),
kMaxRepresentablePowerOfTwo);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/microfrontend/lib/bits.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/core/bits_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
708fd6f0-6f63-4b65-9f78-85af47149678 | cpp | tensorflow/tensorflow | buffered_file | third_party/xla/xla/tsl/lib/io/buffered_file.h | third_party/xla/xla/tsl/lib/io/buffered_file_test.cc | #ifndef XLA_TSL_LIB_IO_BUFFERED_FILE_H_
#define XLA_TSL_LIB_IO_BUFFERED_FILE_H_
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include "xla/tsl/lib/hash/crc32c.h"
#include "tsl/platform/cord.h"
#include "tsl/platform/file_system.h"
#include "tsl/platform/status.h"
namespace tsl {
class BufferedWritableFile : public WritableFile {
public:
explicit BufferedWritableFile(std::unique_ptr<WritableFile> file,
int64_t buffer_size = kDefaultBufferSize)
: file_(std::move(file)) {
buffer_.resize(buffer_size);
}
~BufferedWritableFile() override { Close().IgnoreError(); }
absl::Status Append(absl::string_view str_data) override {
int64_t bytes_left = str_data.size();
const char* data = str_data.data();
while (bytes_left > 0) {
int64_t append_bytes = std::min(
static_cast<int64_t>(buffer_.size() - buffer_pos_), bytes_left);
std::copy_n(data, append_bytes, buffer_.begin() + buffer_pos_);
crc32_ = crc32c::Extend(crc32_, &buffer_[buffer_pos_], append_bytes);
buffer_pos_ += append_bytes;
if (buffer_pos_ == buffer_.size()) {
TF_RETURN_IF_ERROR(file_->Append(buffer_));
buffer_pos_ = 0;
}
data = data + append_bytes;
bytes_left -= append_bytes;
}
return absl::OkStatus();
}
absl::Status Append(const absl::Cord& data) override {
for (absl::string_view fragment : data.Chunks()) {
TF_RETURN_IF_ERROR(Append(fragment));
}
return absl::OkStatus();
}
absl::Status Close() override {
TF_RETURN_IF_ERROR(Flush());
return file_->Close();
}
absl::Status Flush() override {
if (buffer_pos_ > 0) {
TF_RETURN_IF_ERROR(
file_->Append(absl::string_view(&buffer_[0], buffer_pos_)));
buffer_pos_ = 0;
}
return file_->Flush();
}
absl::Status Tell(int64_t* position) override {
int64_t bytes_written;
absl::Status status = file_->Tell(&bytes_written);
if (status.ok()) {
*position = bytes_written + buffer_pos_;
return absl::OkStatus();
} else {
return status;
}
}
absl::Status Sync() override { return file_->Sync(); }
uint32_t crc32() const { return crc32_; }
void reset_crc32() { crc32_ = 0; }
private:
static constexpr int64_t kDefaultBufferSize = 1048576;
std::string buffer_;
int64_t buffer_pos_ = 0;
std::unique_ptr<WritableFile> file_;
uint32_t crc32_ = 0;
BufferedWritableFile(const BufferedWritableFile&) = delete;
void operator=(const BufferedWritableFile&) = delete;
};
}
#endif | #include "xla/tsl/lib/io/buffered_file.h"
#include <memory>
#include <utility>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace tsl {
namespace io {
namespace {
TEST(BufferedInputStream, Tell) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
std::unique_ptr<WritableFile> write_file;
TF_ASSERT_OK(env->NewWritableFile(fname, &write_file));
BufferedWritableFile file(std::move(write_file), 8);
int64_t position;
TF_ASSERT_OK(file.Append("foo"));
TF_ASSERT_OK(file.Tell(&position));
EXPECT_EQ(position, 3);
TF_ASSERT_OK(file.Append("bar"));
TF_ASSERT_OK(file.Tell(&position));
EXPECT_EQ(position, 6);
TF_ASSERT_OK(file.Append("baz"));
TF_ASSERT_OK(file.Tell(&position));
EXPECT_EQ(position, 9);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/io/buffered_file.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/io/buffered_file_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ed071fc0-b1a7-41b0-b171-402f275ef9fc | cpp | tensorflow/tensorflow | math_util | tensorflow/core/lib/math/math_util.h | third_party/xla/xla/tsl/lib/math/math_util_test.cc | #ifndef TENSORFLOW_CORE_LIB_MATH_MATH_UTIL_H_
#define TENSORFLOW_CORE_LIB_MATH_MATH_UTIL_H_
#include "xla/tsl/lib/math/math_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
using tsl::MathUtil;
}
#endif | #include "xla/tsl/lib/math/math_util.h"
#include <cmath>
#include <limits>
#include <vector>
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace {
const int kNumTestArguments = 4;
template <typename IntegralType, typename TestDataType>
void TestCeilOfRatio(const TestDataType test_data[][kNumTestArguments],
int num_tests) {
for (int i = 0; i < num_tests; ++i) {
const IntegralType numerator = test_data[i][0];
const IntegralType denominator = test_data[i][1];
const IntegralType expected_floor = test_data[i][2];
const IntegralType expected_ceil = test_data[i][3];
IntegralType floor_1 = tsl::MathUtil::FloorOfRatio(numerator, denominator);
IntegralType floor_2 =
tsl::MathUtil::CeilOrFloorOfRatio<IntegralType, false>(numerator,
denominator);
EXPECT_EQ(floor_1, floor_2);
EXPECT_EQ(expected_floor, floor_1)
<< "FloorOfRatio fails with numerator = " << numerator
<< ", denominator = " << denominator << " "
<< (8 * sizeof(IntegralType)) << " bits";
IntegralType ceil_1 = tsl::MathUtil::CeilOfRatio(numerator, denominator);
IntegralType ceil_2 = tsl::MathUtil::CeilOrFloorOfRatio<IntegralType, true>(
numerator, denominator);
EXPECT_EQ(ceil_1, ceil_2);
EXPECT_EQ(expected_ceil, ceil_1)
<< "CeilOfRatio fails with numerator = " << numerator
<< ", denominator = " << denominator << " "
<< (8 * sizeof(IntegralType)) << " bits";
}
}
template <typename UnsignedIntegralType>
void TestCeilOfRatioUnsigned(uint64 kMax) {
const int kNumTests = 12;
const uint64 kTestData[kNumTests][kNumTestArguments] = {
{0, 1, 0, 0},
{0, 2, 0, 0},
{0, kMax, 0, 0},
{1, 1, 1, 1},
{5, 2, 2, 3},
{kMax, 1, kMax, kMax},
{kMax, 2, kMax / 2, kMax / 2 + ((kMax % 2 != 0) ? 1 : 0)},
{kMax, 3, kMax / 3, kMax / 3 + ((kMax % 3 != 0) ? 1 : 0)},
{1, kMax, 0, 1},
{2, kMax, 0, 1},
{3, kMax, 0, 1},
{kMax, kMax, 1, 1},
};
TestCeilOfRatio<UnsignedIntegralType, uint64>(kTestData, kNumTests);
}
template <typename SignedInteger>
void TestCeilOfRatioSigned(int64_t kMin, int64_t kMax) {
const int kNumTests = 30;
const int64_t kTestData[kNumTests][kNumTestArguments] = {
{0, 1, 0, 0},
{0, -1, 0, 0},
{0, 2, 0, 0},
{0, kMin, 0, 0},
{0, kMax, 0, 0},
{1, 1, 1, 1},
{-1, 1, -1, -1},
{1, -1, -1, -1},
{-1, -1, 1, 1},
{5, 2, 2, 3},
{-5, 2, -3, -2},
{5, -2, -3, -2},
{-5, -2, 2, 3},
{kMax, 1, kMax, kMax},
{kMax, -1, -kMax, -kMax},
{kMax, 2, kMax / 2, kMax / 2 + ((kMax % 2 != 0) ? 1 : 0)},
{kMax, 3, kMax / 3, kMax / 3 + ((kMax % 3 != 0) ? 1 : 0)},
{kMin, 1, kMin, kMin},
{kMin, 2, kMin / 2 - ((kMin % 2 != 0) ? 1 : 0), kMin / 2},
{kMin, 3, kMin / 3 - ((kMin % 3 != 0) ? 1 : 0), kMin / 3},
{1, kMax, 0, 1},
{2, kMax, 0, 1},
{3, kMax, 0, 1},
{1, kMin, -1, 0},
{2, kMin, -1, 0},
{3, kMin, -1, 0},
{kMin, kMin, 1, 1},
{kMin, kMax, -2, -1},
{kMax, kMin, -1, 0},
{kMax, kMax, 1, 1},
};
TestCeilOfRatio<SignedInteger, int64_t>(kTestData, kNumTests);
}
template <typename IntegralType>
static IntegralType CeilOfRatioDenomMinusOne(IntegralType numerator,
IntegralType denominator) {
const IntegralType kOne(1);
return (numerator + denominator - kOne) / denominator;
}
template <typename IntegralType>
static IntegralType FloorOfRatioByDivision(IntegralType numerator,
IntegralType denominator) {
return numerator / denominator;
}
template <typename Integer, bool ComputeCeil>
static Integer CeilOrFloorOfRatioArithmetic(Integer numerator,
Integer denominator) {
if (ComputeCeil) {
return CeilOfRatioDenomMinusOne(numerator, denominator);
} else {
return FloorOfRatioByDivision(numerator, denominator);
}
}
void TestThatCeilOfRatioDenomMinusOneIsIncorrect(int64_t numerator,
int64_t denominator,
int64_t expected_error) {
const int64_t correct_result =
tsl::MathUtil::CeilOfRatio(numerator, denominator);
const int64_t result_by_denom_minus_one =
CeilOfRatioDenomMinusOne(numerator, denominator);
EXPECT_EQ(result_by_denom_minus_one + expected_error, correct_result)
<< "numerator = " << numerator << " denominator = " << denominator
<< " expected error = " << expected_error
<< " Actual difference: " << (correct_result - result_by_denom_minus_one);
}
void TestThatCeilOfRatioDenomMinusOneIsIncorrect() {
TestThatCeilOfRatioDenomMinusOneIsIncorrect(-1LL, -2LL, -1LL);
}
TEST(MathUtil, CeilOfRatio) {
TestCeilOfRatioUnsigned<uint8>(kuint8max);
TestCeilOfRatioUnsigned<uint16>(kuint16max);
TestCeilOfRatioUnsigned<uint32>(kuint32max);
TestCeilOfRatioUnsigned<uint64>(kuint64max);
TestCeilOfRatioSigned<int8>(kint8min, kint8max);
TestCeilOfRatioSigned<int16>(kint16min, kint16max);
TestCeilOfRatioSigned<int32>(kint32min, kint32max);
TestCeilOfRatioSigned<int64_t>(kint64min, kint64max);
#if 0
TestThatCeilOfRatioDenomMinusOneIsIncorrect();
#endif
}
struct GCDTestCase {
unsigned int x;
unsigned int y;
unsigned int gcd;
};
TEST(MathUtil, GCD) {
std::vector<GCDTestCase> testcases({
{10, 20, 10},
{27, 8, 1},
{4, 3, 1},
{6, 8, 2},
{5, 0, 5},
{5, 5, 5},
{0, 0, 0}
});
for (const auto& tc : testcases) {
EXPECT_EQ(tc.gcd, tsl::MathUtil::GCD<uint32>(tc.x, tc.y));
EXPECT_EQ(tc.gcd, tsl::MathUtil::GCD<uint32>(tc.y, tc.x));
EXPECT_EQ(tc.gcd, tsl::MathUtil::GCD<uint64>(tc.x, tc.y));
EXPECT_EQ(tc.gcd, tsl::MathUtil::GCD<uint64>(tc.y, tc.x));
}
const uint64 biggish_prime = 1666666667;
EXPECT_EQ(biggish_prime,
tsl::MathUtil::GCD<uint64>(biggish_prime * 3, biggish_prime * 4));
}
template <typename T>
void TestOneIPowN() {
const T one{1};
for (int i = 0; i < 1024; ++i) {
EXPECT_EQ(tsl::MathUtil::IPow(one, i), one);
}
}
template <typename T>
void TestTwoIPowN() {
int limit = std::is_integral<T>::value ? std::numeric_limits<T>::digits : 63;
for (int i = 0; i < limit; ++i) {
EXPECT_EQ(tsl::MathUtil::IPow(T{2}, i), static_cast<T>(1ull << i));
}
}
template <typename T>
void TestFloatIPow(const int max_exponent, const T start, const T end,
const T step) {
for (T f = start; f < end; f += step) {
for (int i = 0; i < max_exponent; ++i) {
EXPECT_FLOAT_EQ(tsl::MathUtil::IPow(f, i), std::pow(f, i));
}
}
}
TEST(MathUtil, IPow) {
TestOneIPowN<double>();
TestOneIPowN<float>();
TestOneIPowN<int>();
TestOneIPowN<int64_t>();
TestTwoIPowN<double>();
TestTwoIPowN<float>();
TestTwoIPowN<int>();
TestTwoIPowN<int64_t>();
EXPECT_EQ(tsl::MathUtil::IPow(3, 0), 1);
EXPECT_EQ(tsl::MathUtil::IPow(3, 1), 3);
EXPECT_EQ(tsl::MathUtil::IPow(3, 2), 9);
EXPECT_EQ(tsl::MathUtil::IPow(3, 3), 27);
EXPECT_EQ(tsl::MathUtil::IPow(3, 4), 81);
EXPECT_EQ(tsl::MathUtil::IPow(3, 5), 243);
TestFloatIPow<float>(13, -16.0f, 16.0f, 1.0f / 8);
TestFloatIPow<double>(13, -16.0, 16.0, 1.0 / 8);
TestFloatIPow<float>(13, -1.0f / (1 << 12), -1.0f / (1 << 12),
1.0f / (1 << 16));
TestFloatIPow<double>(13, -1.0 / (1 << 12), -1.0 / (1 << 12),
1.0 / (1 << 16));
}
TEST(MathUtil, IPowEdgeCases) {
constexpr const double kInf = std::numeric_limits<double>::infinity();
EXPECT_EQ(tsl::MathUtil::IPow(-12345.0, 79), -kInf);
EXPECT_EQ(tsl::MathUtil::IPow(-12345.0, 80), +kInf);
EXPECT_EQ(tsl::MathUtil::IPow(+0.0, 3), +0.0);
EXPECT_EQ(tsl::MathUtil::IPow(-0.0, 3), -0.0);
EXPECT_EQ(tsl::MathUtil::IPow(+0.0, 42), +0.0);
EXPECT_EQ(tsl::MathUtil::IPow(-0.0, 42), +0.0);
EXPECT_EQ(tsl::MathUtil::IPow(-kInf, 0.0), 1.0);
EXPECT_EQ(tsl::MathUtil::IPow(-2.0, 0.0), 1.0);
EXPECT_EQ(tsl::MathUtil::IPow(-1.0, 0.0), 1.0);
EXPECT_EQ(tsl::MathUtil::IPow(-0.0, 0.0), 1.0);
EXPECT_EQ(tsl::MathUtil::IPow(+0.0, 0.0), 1.0);
EXPECT_EQ(tsl::MathUtil::IPow(+1.0, 0.0), 1.0);
EXPECT_EQ(tsl::MathUtil::IPow(+2.0, 0.0), 1.0);
EXPECT_EQ(tsl::MathUtil::IPow(+kInf, 0.0), 1.0);
EXPECT_EQ(tsl::MathUtil::IPow(std::numeric_limits<double>::quiet_NaN(), 0.0),
1.0);
EXPECT_EQ(tsl::MathUtil::IPow(-kInf, 43), -kInf);
EXPECT_EQ(tsl::MathUtil::IPow(-kInf, 42), +kInf);
EXPECT_EQ(tsl::MathUtil::IPow(+kInf, 42), +kInf);
EXPECT_EQ(tsl::MathUtil::IPow(+kInf, 43), +kInf);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/math/math_util.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/math/math_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
38da609b-05a2-4452-af29-1e55ce366fdc | cpp | tensorflow/tensorflow | iterator_range | tensorflow/core/lib/gtl/iterator_range.h | third_party/xla/xla/tsl/lib/gtl/iterator_range_test.cc | #ifndef TENSORFLOW_CORE_LIB_GTL_ITERATOR_RANGE_H_
#define TENSORFLOW_CORE_LIB_GTL_ITERATOR_RANGE_H_
#include "xla/tsl/lib/gtl/iterator_range.h"
namespace tensorflow {
namespace gtl {
using ::tsl::gtl::iterator_range;
using ::tsl::gtl::make_range;
}
}
#endif | #include "xla/tsl/lib/gtl/iterator_range.h"
#include <vector>
#include "tsl/platform/macros.h"
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace gtl {
namespace {
TEST(IteratorRange, WholeVector) {
std::vector<int> v = {2, 3, 5, 7, 11, 13};
iterator_range<std::vector<int>::iterator> range(v.begin(), v.end());
int index = 0;
for (int prime : range) {
ASSERT_LT(index, v.size());
EXPECT_EQ(v[index], prime);
++index;
}
EXPECT_EQ(v.size(), index);
}
TEST(IteratorRange, VectorMakeRange) {
std::vector<int> v = {2, 3, 5, 7, 11, 13};
auto range = make_range(v.begin(), v.end());
int index = 0;
for (int prime : range) {
ASSERT_LT(index, v.size());
EXPECT_EQ(v[index], prime);
++index;
}
EXPECT_EQ(v.size(), index);
}
TEST(IteratorRange, PartArray) {
int v[] = {2, 3, 5, 7, 11, 13};
iterator_range<int*> range(&v[1], &v[4]);
int index = 1;
for (int prime : range) {
ASSERT_LT(index, TF_ARRAYSIZE(v));
EXPECT_EQ(v[index], prime);
++index;
}
EXPECT_EQ(4, index);
}
TEST(IteratorRange, ArrayMakeRange) {
int v[] = {2, 3, 5, 7, 11, 13};
auto range = make_range(&v[1], &v[4]);
int index = 1;
for (int prime : range) {
ASSERT_LT(index, TF_ARRAYSIZE(v));
EXPECT_EQ(v[index], prime);
++index;
}
EXPECT_EQ(4, index);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/gtl/iterator_range.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/gtl/iterator_range_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ba4bb99e-6122-4932-bfa9-b7e6c0737093 | cpp | tensorflow/tensorflow | flatmap | tensorflow/core/lib/gtl/flatmap.h | third_party/xla/xla/tsl/lib/gtl/flatmap_test.cc | #ifndef TENSORFLOW_CORE_LIB_GTL_FLATMAP_H_
#define TENSORFLOW_CORE_LIB_GTL_FLATMAP_H_
#include "xla/tsl/lib/gtl/flatmap.h"
#include "tensorflow/core/lib/gtl/flatrep.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace gtl {
using tsl::gtl::FlatMap;
}
}
#endif | #include "xla/tsl/lib/gtl/flatmap.h"
#include <algorithm>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "tsl/platform/hash.h"
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace gtl {
namespace {
typedef FlatMap<int64_t, int32> NumMap;
int32 Get(const NumMap& map, int64_t k, int32_t def = -1) {
auto iter = map.find(k);
if (iter == map.end()) {
EXPECT_EQ(map.count(k), 0);
return def;
} else {
EXPECT_EQ(map.count(k), 1);
EXPECT_EQ(&map.at(k), &iter->second);
EXPECT_EQ(iter->first, k);
return iter->second;
}
}
typedef std::vector<std::pair<int64_t, int32>> NumMapContents;
NumMapContents Contents(const NumMap& map) {
NumMapContents result;
for (const auto& p : map) {
result.push_back({p.first, p.second});
}
std::sort(result.begin(), result.end());
return result;
}
void Fill(NumMap* map, int64_t start, int64_t limit) {
for (int64_t i = start; i < limit; i++) {
map->insert({i, i * 100});
}
}
TEST(FlatMapTest, Find) {
NumMap map;
EXPECT_EQ(Get(map, 1), -1);
map.insert({1, 100});
map.insert({2, 200});
EXPECT_EQ(Get(map, 1), 100);
EXPECT_EQ(Get(map, 2), 200);
EXPECT_EQ(Get(map, 3), -1);
}
TEST(FlatMapTest, Insert) {
NumMap map;
EXPECT_EQ(Get(map, 1), -1);
auto result = map.insert({1, 100});
EXPECT_TRUE(result.second);
EXPECT_EQ(result.first->first, 1);
EXPECT_EQ(result.first->second, 100);
EXPECT_EQ(Get(map, 1), 100);
result = map.insert({1, 200});
EXPECT_FALSE(result.second);
EXPECT_EQ(result.first->first, 1);
EXPECT_EQ(result.first->second, 100);
EXPECT_EQ(Get(map, 1), 100);
result.first->second = 300;
EXPECT_EQ(result.first->second, 300);
EXPECT_EQ(Get(map, 1), 300);
result = map.insert({1, 400});
EXPECT_FALSE(result.second);
EXPECT_EQ(result.first->first, 1);
EXPECT_EQ(result.first->second, 300);
EXPECT_EQ(Get(map, 1), 300);
}
TEST(FlatMapTest, InsertGrowth) {
NumMap map;
const int n = 100;
Fill(&map, 0, 100);
EXPECT_EQ(map.size(), n);
for (int i = 0; i < n; i++) {
EXPECT_EQ(Get(map, i), i * 100) << i;
}
}
TEST(FlatMapTest, Emplace) {
NumMap map;
auto result = map.emplace(1, 100);
EXPECT_TRUE(result.second);
EXPECT_EQ(result.first->first, 1);
EXPECT_EQ(result.first->second, 100);
EXPECT_EQ(Get(map, 1), 100);
result = map.emplace(1, 200);
EXPECT_FALSE(result.second);
EXPECT_EQ(result.first->first, 1);
EXPECT_EQ(result.first->second, 100);
EXPECT_EQ(Get(map, 1), 100);
result.first->second = 300;
EXPECT_EQ(result.first->second, 300);
EXPECT_EQ(Get(map, 1), 300);
result = map.emplace(2, 400);
EXPECT_TRUE(result.second);
EXPECT_EQ(result.first->first, 2);
EXPECT_EQ(result.first->second, 400);
EXPECT_EQ(Get(map, 2), 400);
}
TEST(FlatMapTest, EmplaceUniquePtr) {
FlatMap<int64_t, std::unique_ptr<string>> smap;
smap.emplace(1, std::make_unique<string>("hello"));
}
TEST(FlatMapTest, Size) {
NumMap map;
EXPECT_EQ(map.size(), 0);
map.insert({1, 100});
map.insert({2, 200});
EXPECT_EQ(map.size(), 2);
}
TEST(FlatMapTest, Empty) {
NumMap map;
EXPECT_TRUE(map.empty());
map.insert({1, 100});
map.insert({2, 200});
EXPECT_FALSE(map.empty());
}
TEST(FlatMapTest, ArrayOperator) {
NumMap map;
auto v1 = &map[1];
EXPECT_EQ(*v1, 0);
EXPECT_EQ(Get(map, 1), 0);
*v1 = 100;
EXPECT_EQ(map[1], 100);
EXPECT_EQ(Get(map, 1), 100);
auto v1a = &map[1];
EXPECT_EQ(v1, v1a);
EXPECT_EQ(*v1, 100);
map[2] = 200;
EXPECT_EQ(Get(map, 1), 100);
EXPECT_EQ(Get(map, 2), 200);
}
TEST(FlatMapTest, Count) {
NumMap map;
EXPECT_EQ(map.count(1), 0);
EXPECT_EQ(map.count(2), 0);
map.insert({1, 100});
EXPECT_EQ(map.count(1), 1);
EXPECT_EQ(map.count(2), 0);
map.insert({2, 200});
EXPECT_EQ(map.count(1), 1);
EXPECT_EQ(map.count(2), 1);
}
TEST(FlatMapTest, Iter) {
NumMap map;
EXPECT_EQ(Contents(map), NumMapContents());
map.insert({1, 100});
map.insert({2, 200});
EXPECT_EQ(Contents(map), NumMapContents({{1, 100}, {2, 200}}));
}
TEST(FlatMapTest, Erase) {
NumMap map;
EXPECT_EQ(map.erase(1), 0);
map[1] = 100;
map[2] = 200;
EXPECT_EQ(map.erase(3), 0);
EXPECT_EQ(map.erase(1), 1);
EXPECT_EQ(map.size(), 1);
EXPECT_EQ(Get(map, 2), 200);
EXPECT_EQ(Contents(map), NumMapContents({{2, 200}}));
EXPECT_EQ(map.erase(2), 1);
EXPECT_EQ(Contents(map), NumMapContents());
}
TEST(FlatMapTest, EraseIter) {
NumMap map;
Fill(&map, 1, 11);
size_t size = 10;
for (auto iter = map.begin(); iter != map.end();) {
iter = map.erase(iter);
size--;
EXPECT_EQ(map.size(), size);
}
EXPECT_EQ(Contents(map), NumMapContents());
}
TEST(FlatMapTest, EraseIterPair) {
NumMap map;
Fill(&map, 1, 11);
NumMap expected;
auto p1 = map.begin();
expected.insert(*p1);
++p1;
expected.insert(*p1);
++p1;
auto p2 = map.end();
EXPECT_EQ(map.erase(p1, p2), map.end());
EXPECT_EQ(map.size(), 2);
EXPECT_EQ(Contents(map), Contents(expected));
}
TEST(FlatMapTest, EraseLongChains) {
NumMap map;
const int num = 128;
Fill(&map, 0, num);
for (int i = 0; i < num; i += 3) {
EXPECT_EQ(map.erase(i), 1);
}
for (int i = 0; i < num; i++) {
if ((i % 3) != 0) {
EXPECT_EQ(Get(map, i), i * 100);
} else {
EXPECT_EQ(map.count(i), 0);
}
}
const size_t orig_buckets = map.bucket_count();
for (int i = 0; i < num; i++) {
map.erase(i);
}
EXPECT_TRUE(map.empty());
EXPECT_EQ(map.bucket_count(), orig_buckets);
map[1] = 100;
EXPECT_LT(map.bucket_count(), orig_buckets);
}
TEST(FlatMap, AlternatingInsertRemove) {
NumMap map;
map.insert({1000, 1000});
map.insert({2000, 1000});
map.insert({3000, 1000});
for (int i = 0; i < 10000; i++) {
map.insert({i, i});
map.erase(i);
}
}
TEST(FlatMap, ClearNoResize) {
NumMap map;
Fill(&map, 0, 100);
const size_t orig = map.bucket_count();
map.clear_no_resize();
EXPECT_EQ(map.size(), 0);
EXPECT_EQ(Contents(map), NumMapContents());
EXPECT_EQ(map.bucket_count(), orig);
}
TEST(FlatMap, Clear) {
NumMap map;
Fill(&map, 0, 100);
const size_t orig = map.bucket_count();
map.clear();
EXPECT_EQ(map.size(), 0);
EXPECT_EQ(Contents(map), NumMapContents());
EXPECT_LT(map.bucket_count(), orig);
}
TEST(FlatMap, Copy) {
for (int n = 0; n < 10; n++) {
NumMap src;
Fill(&src, 0, n);
NumMap copy = src;
EXPECT_EQ(Contents(src), Contents(copy));
NumMap copy2;
copy2 = src;
EXPECT_EQ(Contents(src), Contents(copy2));
copy2 = *©2;
EXPECT_EQ(Contents(src), Contents(copy2));
}
}
TEST(FlatMap, InitFromIter) {
for (int n = 0; n < 10; n++) {
NumMap src;
Fill(&src, 0, n);
auto vec = Contents(src);
NumMap dst(vec.begin(), vec.end());
EXPECT_EQ(Contents(dst), vec);
}
}
TEST(FlatMap, InitializerList) {
NumMap a{{1, 10}, {2, 20}, {3, 30}};
NumMap b({{1, 10}, {2, 20}, {3, 30}});
NumMap c = {{1, 10}, {2, 20}, {3, 30}};
typedef std::unordered_map<int64_t, int32> StdNumMap;
StdNumMap std({{1, 10}, {2, 20}, {3, 30}});
StdNumMap::value_type std_r1 = *std.find(1);
StdNumMap::value_type std_r2 = *std.find(2);
StdNumMap::value_type std_r3 = *std.find(3);
NumMap d{std_r1, std_r2, std_r3};
NumMap e({std_r1, std_r2, std_r3});
NumMap f = {std_r1, std_r2, std_r3};
for (NumMap* map : std::vector<NumMap*>({&a, &b, &c, &d, &e, &f})) {
EXPECT_EQ(Get(*map, 1), 10);
EXPECT_EQ(Get(*map, 2), 20);
EXPECT_EQ(Get(*map, 3), 30);
EXPECT_EQ(Contents(*map), NumMapContents({{1, 10}, {2, 20}, {3, 30}}));
}
}
TEST(FlatMap, InsertIter) {
NumMap a, b;
Fill(&a, 1, 10);
Fill(&b, 8, 20);
b[9] = 10000;
a.insert(b.begin(), b.end());
NumMap expected;
Fill(&expected, 1, 20);
EXPECT_EQ(Contents(a), Contents(expected));
}
TEST(FlatMap, Eq) {
NumMap empty;
NumMap elems;
Fill(&elems, 0, 5);
EXPECT_FALSE(empty == elems);
EXPECT_TRUE(empty != elems);
NumMap copy = elems;
EXPECT_TRUE(copy == elems);
EXPECT_FALSE(copy != elems);
NumMap changed = elems;
changed[3] = 1;
EXPECT_FALSE(changed == elems);
EXPECT_TRUE(changed != elems);
NumMap changed2 = elems;
changed2.erase(3);
EXPECT_FALSE(changed2 == elems);
EXPECT_TRUE(changed2 != elems);
}
TEST(FlatMap, Swap) {
NumMap a, b;
Fill(&a, 1, 5);
Fill(&b, 100, 200);
NumMap c = a;
NumMap d = b;
EXPECT_EQ(c, a);
EXPECT_EQ(d, b);
c.swap(d);
EXPECT_EQ(c, b);
EXPECT_EQ(d, a);
}
TEST(FlatMap, Reserve) {
NumMap src;
Fill(&src, 1, 100);
NumMap a = src;
a.reserve(10);
EXPECT_EQ(a, src);
NumMap b = src;
b.rehash(1000);
EXPECT_EQ(b, src);
}
TEST(FlatMap, EqualRangeMutable) {
NumMap map;
Fill(&map, 1, 10);
auto p1 = map.equal_range(3);
EXPECT_TRUE(p1.first != p1.second);
EXPECT_EQ(p1.first->first, 3);
EXPECT_EQ(p1.first->second, 300);
++p1.first;
EXPECT_TRUE(p1.first == p1.second);
auto p2 = map.equal_range(100);
EXPECT_TRUE(p2.first == p2.second);
}
TEST(FlatMap, EqualRangeConst) {
NumMap tmp;
Fill(&tmp, 1, 10);
const NumMap map = tmp;
auto p1 = map.equal_range(3);
EXPECT_TRUE(p1.first != p1.second);
EXPECT_EQ(p1.first->first, 3);
EXPECT_EQ(p1.first->second, 300);
++p1.first;
EXPECT_TRUE(p1.first == p1.second);
auto p2 = map.equal_range(100);
EXPECT_TRUE(p2.first == p2.second);
}
TEST(FlatMap, Prefetch) {
NumMap map;
Fill(&map, 0, 1000);
for (int i = 0; i < 2000; i++) {
map.prefetch_value(i);
}
}
struct NA {
int64_t value;
NA() : value(-1) {}
explicit NA(int64_t v) : value(v) {}
NA(const NA& x) : value(x.value) {}
bool operator==(const NA& x) const { return value == x.value; }
};
struct HashNA {
size_t operator()(NA x) const { return x.value; }
};
TEST(FlatMap, NonAssignable) {
FlatMap<NA, NA, HashNA> map;
for (int i = 0; i < 100; i++) {
map[NA(i)] = NA(i * 100);
}
for (int i = 0; i < 100; i++) {
EXPECT_EQ(map.count(NA(i)), 1);
auto iter = map.find(NA(i));
EXPECT_NE(iter, map.end());
EXPECT_EQ(iter->first, NA(i));
EXPECT_EQ(iter->second, NA(i * 100));
EXPECT_EQ(map[NA(i)], NA(i * 100));
}
map.erase(NA(10));
EXPECT_EQ(map.count(NA(10)), 0);
}
TEST(FlatMap, ForwardIterator) {
typedef FlatMap<NA, NA, HashNA> NAMap;
NAMap map({{NA(1), NA(10)}, {NA(2), NA(20)}});
NAMap::iterator it1 = map.find(NA(1));
NAMap::iterator it2 = map.find(NA(2));
EXPECT_TRUE(it1 != map.end());
EXPECT_TRUE(it2 != map.end());
EXPECT_FALSE(it1 == map.end());
EXPECT_FALSE(it2 == map.end());
EXPECT_TRUE(it1 != it2);
EXPECT_FALSE(it1 == it2);
EXPECT_EQ((*it1).first, NA(1));
EXPECT_EQ((*it1).second, NA(10));
EXPECT_EQ((*it2).first, NA(2));
EXPECT_EQ((*it2).second, NA(20));
EXPECT_EQ(it1->first, NA(1));
EXPECT_EQ(it1->second, NA(10));
EXPECT_EQ(it2->first, NA(2));
EXPECT_EQ(it2->second, NA(20));
NAMap::iterator copy_it1 = it1;
NAMap::iterator copy_it2 = it2;
EXPECT_EQ(copy_it1->first, NA(1));
EXPECT_EQ(copy_it1->second, NA(10));
EXPECT_EQ(copy_it2->first, NA(2));
EXPECT_EQ(copy_it2->second, NA(20));
NAMap::iterator& pp_copy_it1 = ++copy_it1;
NAMap::iterator& pp_copy_it2 = ++copy_it2;
EXPECT_TRUE(pp_copy_it1 == copy_it1);
EXPECT_TRUE(pp_copy_it2 == copy_it2);
EXPECT_TRUE(copy_it1 != it1);
EXPECT_TRUE(copy_it2 != it2);
if (copy_it1 == map.end()) {
EXPECT_TRUE(copy_it2 != map.end());
EXPECT_EQ(copy_it2->first, NA(1));
EXPECT_EQ(copy_it2->second, NA(10));
EXPECT_EQ(pp_copy_it2->first, NA(1));
EXPECT_EQ(pp_copy_it2->second, NA(10));
} else {
EXPECT_TRUE(copy_it2 == map.end());
EXPECT_EQ(copy_it1->first, NA(2));
EXPECT_EQ(copy_it1->second, NA(20));
EXPECT_EQ(pp_copy_it1->first, NA(2));
EXPECT_EQ(pp_copy_it1->second, NA(20));
}
EXPECT_EQ(it1->first, NA(1));
EXPECT_EQ(it1->second, NA(10));
EXPECT_EQ(it2->first, NA(2));
EXPECT_EQ(it2->second, NA(20));
copy_it1 = it1;
copy_it2 = it2;
EXPECT_EQ(copy_it1->first, NA(1));
EXPECT_EQ(copy_it1->second, NA(10));
EXPECT_EQ(copy_it2->first, NA(2));
EXPECT_EQ(copy_it2->second, NA(20));
NAMap::iterator copy_it1_pp = copy_it1++;
NAMap::iterator copy_it2_pp = copy_it2++;
EXPECT_TRUE(copy_it1_pp != copy_it1);
EXPECT_TRUE(copy_it2_pp != copy_it2);
EXPECT_TRUE(copy_it1_pp == it1);
EXPECT_TRUE(copy_it2_pp == it2);
EXPECT_EQ(copy_it1_pp->first, NA(1));
EXPECT_EQ(copy_it1_pp->second, NA(10));
EXPECT_EQ(copy_it2_pp->first, NA(2));
EXPECT_EQ(copy_it2_pp->second, NA(20));
EXPECT_TRUE(copy_it1 != it1);
EXPECT_TRUE(copy_it2 != it2);
if (copy_it1 == map.end()) {
EXPECT_TRUE(copy_it2 != map.end());
EXPECT_EQ(copy_it2->first, NA(1));
EXPECT_EQ(copy_it2->second, NA(10));
} else {
EXPECT_TRUE(copy_it2 == map.end());
EXPECT_EQ(copy_it1->first, NA(2));
EXPECT_EQ(copy_it1->second, NA(20));
}
EXPECT_EQ(it1->first, NA(1));
EXPECT_EQ(it1->second, NA(10));
EXPECT_EQ(it2->first, NA(2));
EXPECT_EQ(it2->second, NA(20));
}
TEST(FlatMap, ConstructDestruct) {
FlatMap<string, string> map;
string k1 = "the quick brown fox jumped over the lazy dog";
string k2 = k1 + k1;
string k3 = k1 + k2;
map[k1] = k2;
map[k3] = k1;
EXPECT_EQ(k1, map.find(k1)->first);
EXPECT_EQ(k2, map.find(k1)->second);
EXPECT_EQ(k1, map[k3]);
map.erase(k3);
EXPECT_EQ(string(), map[k3]);
map.clear();
map[k1] = k2;
EXPECT_EQ(k2, map[k1]);
map.reserve(100);
EXPECT_EQ(k2, map[k1]);
}
struct CustomCmpKey {
int64_t a;
int64_t b;
CustomCmpKey(int64_t v1, int64_t v2) : a(v1), b(v2) {}
bool operator==(const CustomCmpKey& x) const { return a == x.a && b == x.b; }
};
struct HashA {
size_t operator()(CustomCmpKey x) const { return x.a; }
};
struct EqA {
bool operator()(CustomCmpKey x, CustomCmpKey y) const { return x.a == y.a; }
};
TEST(FlatMap, CustomCmp) {
FlatMap<CustomCmpKey, int, HashA, EqA> map;
map[CustomCmpKey(100, 200)] = 300;
EXPECT_EQ(300, map[CustomCmpKey(100, 200)]);
EXPECT_EQ(300, map[CustomCmpKey(100, 500)]);
}
typedef std::unique_ptr<int> UniqInt;
static UniqInt MakeUniq(int i) { return std::make_unique<int>(i); }
struct HashUniq {
size_t operator()(const UniqInt& p) const { return *p; }
};
struct EqUniq {
bool operator()(const UniqInt& a, const UniqInt& b) const { return *a == *b; }
};
typedef FlatMap<UniqInt, UniqInt, HashUniq, EqUniq> UniqMap;
TEST(FlatMap, UniqueMap) {
UniqMap map;
const int N = 10;
for (int i = 0; i < N; i++) {
if ((i % 2) == 0) {
map[MakeUniq(i)] = MakeUniq(i + 100);
} else {
map.emplace(MakeUniq(i), MakeUniq(i + 100));
}
}
EXPECT_EQ(map.size(), N);
UniqMap map2(std::move(map));
for (int i = 0; i < N; i++) {
EXPECT_EQ(*map2.at(MakeUniq(i)), i + 100);
}
UniqMap map3;
map3 = std::move(map2);
EXPECT_EQ(map3.count(MakeUniq(2)), 1);
map3.erase(MakeUniq(2));
EXPECT_EQ(map3.count(MakeUniq(2)), 0);
map3.clear();
EXPECT_EQ(map3.size(), 0);
EXPECT_GE(map.size(), 0);
EXPECT_GE(map2.size(), 0);
EXPECT_TRUE(map.emplace(MakeUniq(-1), MakeUniq(-1)).second);
}
TEST(FlatMap, UniqueMapIter) {
UniqMap map;
const int kCount = 10;
const int kValueDelta = 100;
for (int i = 1; i <= kCount; i++) {
map[MakeUniq(i)] = MakeUniq(i + kValueDelta);
}
int key_sum = 0;
int val_sum = 0;
for (const auto& p : map) {
key_sum += *p.first;
val_sum += *p.second;
}
EXPECT_EQ(key_sum, (kCount * (kCount + 1)) / 2);
EXPECT_EQ(val_sum, key_sum + (kCount * kValueDelta));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/gtl/flatmap.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/gtl/flatmap_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b8debb4c-34f9-42e0-8dc5-dcfb180414bb | cpp | tensorflow/tensorflow | flatset | tensorflow/core/lib/gtl/flatset.h | third_party/xla/xla/tsl/lib/gtl/flatset_test.cc | #ifndef TENSORFLOW_CORE_LIB_GTL_FLATSET_H_
#define TENSORFLOW_CORE_LIB_GTL_FLATSET_H_
#include "xla/tsl/lib/gtl/flatset.h"
namespace tensorflow {
namespace gtl {
using tsl::gtl::FlatSet;
}
}
#endif | #include "xla/tsl/lib/gtl/flatset.h"
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
#include "tsl/platform/hash.h"
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace gtl {
namespace {
typedef FlatSet<int64_t> NumSet;
bool Has(const NumSet& set, int64_t k) {
auto iter = set.find(k);
if (iter == set.end()) {
EXPECT_EQ(set.count(k), 0);
return false;
} else {
EXPECT_EQ(set.count(k), 1);
EXPECT_EQ(*iter, k);
return true;
}
}
typedef std::vector<int64_t> NumSetContents;
NumSetContents Contents(const NumSet& set) {
NumSetContents result(set.begin(), set.end());
std::sort(result.begin(), result.end());
return result;
}
void Fill(NumSet* set, int64_t start, int64_t limit) {
for (int64_t i = start; i < limit; i++) {
set->insert(i);
}
}
TEST(FlatSetTest, Find) {
NumSet set;
EXPECT_FALSE(Has(set, 1));
set.insert(1);
set.insert(2);
EXPECT_TRUE(Has(set, 1));
EXPECT_TRUE(Has(set, 2));
EXPECT_FALSE(Has(set, 3));
}
TEST(FlatSetTest, Insert) {
NumSet set;
EXPECT_FALSE(Has(set, 1));
auto result = set.insert(1);
EXPECT_TRUE(result.second);
EXPECT_EQ(*result.first, 1);
EXPECT_TRUE(Has(set, 1));
result = set.insert(1);
EXPECT_FALSE(result.second);
EXPECT_EQ(*result.first, 1);
EXPECT_TRUE(Has(set, 1));
}
TEST(FlatSetTest, InsertGrowth) {
NumSet set;
const int n = 100;
Fill(&set, 0, 100);
EXPECT_EQ(set.size(), n);
for (int i = 0; i < n; i++) {
EXPECT_TRUE(Has(set, i)) << i;
}
}
TEST(FlatSetTest, Emplace) {
NumSet set;
auto result = set.emplace(73);
EXPECT_TRUE(result.second);
EXPECT_EQ(*result.first, 73);
EXPECT_TRUE(Has(set, 73));
result = set.emplace(73);
EXPECT_FALSE(result.second);
EXPECT_EQ(*result.first, 73);
EXPECT_TRUE(Has(set, 73));
result = set.emplace(103);
EXPECT_TRUE(result.second);
EXPECT_EQ(*result.first, 103);
EXPECT_TRUE(Has(set, 103));
}
TEST(FlatSetTest, Size) {
NumSet set;
EXPECT_EQ(set.size(), 0);
set.insert(1);
set.insert(2);
EXPECT_EQ(set.size(), 2);
}
TEST(FlatSetTest, Empty) {
NumSet set;
EXPECT_TRUE(set.empty());
set.insert(1);
set.insert(2);
EXPECT_FALSE(set.empty());
}
TEST(FlatSetTest, Count) {
NumSet set;
EXPECT_EQ(set.count(1), 0);
EXPECT_EQ(set.count(2), 0);
set.insert(1);
EXPECT_EQ(set.count(1), 1);
EXPECT_EQ(set.count(2), 0);
set.insert(2);
EXPECT_EQ(set.count(1), 1);
EXPECT_EQ(set.count(2), 1);
}
TEST(FlatSetTest, Iter) {
NumSet set;
EXPECT_EQ(Contents(set), NumSetContents());
set.insert(1);
set.insert(2);
EXPECT_EQ(Contents(set), NumSetContents({1, 2}));
}
TEST(FlatSetTest, Erase) {
NumSet set;
EXPECT_EQ(set.erase(1), 0);
set.insert(1);
set.insert(2);
EXPECT_EQ(set.erase(3), 0);
EXPECT_EQ(set.erase(1), 1);
EXPECT_EQ(set.size(), 1);
EXPECT_TRUE(Has(set, 2));
EXPECT_EQ(Contents(set), NumSetContents({2}));
EXPECT_EQ(set.erase(2), 1);
EXPECT_EQ(Contents(set), NumSetContents());
}
TEST(FlatSetTest, EraseIter) {
NumSet set;
Fill(&set, 1, 11);
size_t size = 10;
for (auto iter = set.begin(); iter != set.end();) {
iter = set.erase(iter);
size--;
EXPECT_EQ(set.size(), size);
}
EXPECT_EQ(Contents(set), NumSetContents());
}
TEST(FlatSetTest, EraseIterPair) {
NumSet set;
Fill(&set, 1, 11);
NumSet expected;
auto p1 = set.begin();
expected.insert(*p1);
++p1;
expected.insert(*p1);
++p1;
auto p2 = set.end();
EXPECT_EQ(set.erase(p1, p2), set.end());
EXPECT_EQ(set.size(), 2);
EXPECT_EQ(Contents(set), Contents(expected));
}
TEST(FlatSetTest, EraseLongChains) {
NumSet set;
const int num = 128;
Fill(&set, 0, num);
for (int i = 0; i < num; i += 3) {
EXPECT_EQ(set.erase(i), 1);
}
for (int i = 0; i < num; i++) {
EXPECT_EQ(Has(set, i), ((i % 3) != 0)) << i;
}
const size_t orig_buckets = set.bucket_count();
for (int i = 0; i < num; i++) {
set.erase(i);
}
EXPECT_TRUE(set.empty());
EXPECT_EQ(set.bucket_count(), orig_buckets);
set.insert(1);
EXPECT_LT(set.bucket_count(), orig_buckets);
}
TEST(FlatSet, ClearNoResize) {
NumSet set;
Fill(&set, 0, 100);
const size_t orig = set.bucket_count();
set.clear_no_resize();
EXPECT_EQ(set.size(), 0);
EXPECT_EQ(Contents(set), NumSetContents());
EXPECT_EQ(set.bucket_count(), orig);
}
TEST(FlatSet, Clear) {
NumSet set;
Fill(&set, 0, 100);
const size_t orig = set.bucket_count();
set.clear();
EXPECT_EQ(set.size(), 0);
EXPECT_EQ(Contents(set), NumSetContents());
EXPECT_LT(set.bucket_count(), orig);
}
TEST(FlatSet, Copy) {
for (int n = 0; n < 10; n++) {
NumSet src;
Fill(&src, 0, n);
NumSet copy = src;
EXPECT_EQ(Contents(src), Contents(copy));
NumSet copy2;
copy2 = src;
EXPECT_EQ(Contents(src), Contents(copy2));
copy2 = *©2;
EXPECT_EQ(Contents(src), Contents(copy2));
}
}
TEST(FlatSet, InitFromIter) {
for (int n = 0; n < 10; n++) {
NumSet src;
Fill(&src, 0, n);
auto vec = Contents(src);
NumSet dst(vec.begin(), vec.end());
EXPECT_EQ(Contents(dst), vec);
}
}
TEST(FlatSet, InitializerList) {
NumSet a{1, 2, 3};
NumSet b({1, 2, 3});
NumSet c = {1, 2, 3};
for (NumSet* set : std::vector<NumSet*>({&a, &b, &c})) {
EXPECT_TRUE(Has(*set, 1));
EXPECT_TRUE(Has(*set, 2));
EXPECT_TRUE(Has(*set, 3));
EXPECT_EQ(Contents(*set), NumSetContents({1, 2, 3}));
}
}
TEST(FlatSet, InsertIter) {
NumSet a, b;
Fill(&a, 1, 10);
Fill(&b, 8, 20);
b.insert(9);
a.insert(b.begin(), b.end());
NumSet expected;
Fill(&expected, 1, 20);
EXPECT_EQ(Contents(a), Contents(expected));
}
TEST(FlatSet, Eq) {
NumSet empty;
NumSet elems;
Fill(&elems, 0, 5);
EXPECT_FALSE(empty == elems);
EXPECT_TRUE(empty != elems);
NumSet copy = elems;
EXPECT_TRUE(copy == elems);
EXPECT_FALSE(copy != elems);
NumSet changed = elems;
changed.insert(7);
EXPECT_FALSE(changed == elems);
EXPECT_TRUE(changed != elems);
NumSet changed2 = elems;
changed2.erase(3);
EXPECT_FALSE(changed2 == elems);
EXPECT_TRUE(changed2 != elems);
}
TEST(FlatSet, Swap) {
NumSet a, b;
Fill(&a, 1, 5);
Fill(&b, 100, 200);
NumSet c = a;
NumSet d = b;
EXPECT_EQ(c, a);
EXPECT_EQ(d, b);
c.swap(d);
EXPECT_EQ(c, b);
EXPECT_EQ(d, a);
}
TEST(FlatSet, Reserve) {
NumSet src;
Fill(&src, 1, 100);
NumSet a = src;
a.reserve(10);
EXPECT_EQ(a, src);
NumSet b = src;
b.rehash(1000);
EXPECT_EQ(b, src);
}
TEST(FlatSet, EqualRangeMutable) {
NumSet set;
Fill(&set, 1, 10);
auto p1 = set.equal_range(3);
EXPECT_TRUE(p1.first != p1.second);
EXPECT_EQ(*p1.first, 3);
++p1.first;
EXPECT_TRUE(p1.first == p1.second);
auto p2 = set.equal_range(100);
EXPECT_TRUE(p2.first == p2.second);
}
TEST(FlatSet, EqualRangeConst) {
NumSet tmp;
Fill(&tmp, 1, 10);
const NumSet set = tmp;
auto p1 = set.equal_range(3);
EXPECT_TRUE(p1.first != p1.second);
EXPECT_EQ(*p1.first, 3);
++p1.first;
EXPECT_TRUE(p1.first == p1.second);
auto p2 = set.equal_range(100);
EXPECT_TRUE(p2.first == p2.second);
}
TEST(FlatSet, Prefetch) {
NumSet set;
Fill(&set, 0, 1000);
for (int i = 0; i < 2000; i++) {
set.prefetch_value(i);
}
}
struct NA {
int64_t value;
NA() : value(-1) {}
explicit NA(int64_t v) : value(v) {}
NA(const NA& x) : value(x.value) {}
bool operator==(const NA& x) const { return value == x.value; }
};
struct HashNA {
size_t operator()(NA x) const { return x.value; }
};
TEST(FlatSet, NonAssignable) {
FlatSet<NA, HashNA> set;
for (int i = 0; i < 100; i++) {
set.insert(NA(i));
}
for (int i = 0; i < 100; i++) {
EXPECT_EQ(set.count(NA(i)), 1);
auto iter = set.find(NA(i));
EXPECT_NE(iter, set.end());
EXPECT_EQ(*iter, NA(i));
}
set.erase(NA(10));
EXPECT_EQ(set.count(NA(10)), 0);
}
TEST(FlatSet, ForwardIterator) {
typedef FlatSet<NA, HashNA> NASet;
NASet set({NA(1), NA(2)});
NASet::iterator it1 = set.find(NA(1));
NASet::iterator it2 = set.find(NA(2));
EXPECT_TRUE(it1 != set.end());
EXPECT_TRUE(it2 != set.end());
EXPECT_FALSE(it1 == set.end());
EXPECT_FALSE(it2 == set.end());
EXPECT_TRUE(it1 != it2);
EXPECT_FALSE(it1 == it2);
EXPECT_EQ(*it1, NA(1));
EXPECT_EQ(*it2, NA(2));
EXPECT_EQ(it1->value, 1);
EXPECT_EQ(it2->value, 2);
NASet::iterator copy_it1 = it1;
NASet::iterator copy_it2 = it2;
EXPECT_EQ(*copy_it1, NA(1));
EXPECT_EQ(*copy_it2, NA(2));
NASet::iterator& pp_copy_it1 = ++copy_it1;
NASet::iterator& pp_copy_it2 = ++copy_it2;
EXPECT_TRUE(pp_copy_it1 == copy_it1);
EXPECT_TRUE(pp_copy_it2 == copy_it2);
EXPECT_TRUE(copy_it1 != it1);
EXPECT_TRUE(copy_it2 != it2);
if (copy_it1 == set.end()) {
EXPECT_TRUE(copy_it2 != set.end());
EXPECT_EQ(*copy_it2, NA(1));
EXPECT_EQ(*pp_copy_it2, NA(1));
} else {
EXPECT_TRUE(copy_it2 == set.end());
EXPECT_EQ(*copy_it1, NA(2));
EXPECT_EQ(*pp_copy_it1, NA(2));
}
EXPECT_EQ(*it1, NA(1));
EXPECT_EQ(*it2, NA(2));
copy_it1 = it1;
copy_it2 = it2;
EXPECT_EQ(*copy_it1, NA(1));
EXPECT_EQ(*copy_it2, NA(2));
NASet::iterator copy_it1_pp = copy_it1++;
NASet::iterator copy_it2_pp = copy_it2++;
EXPECT_TRUE(copy_it1_pp != copy_it1);
EXPECT_TRUE(copy_it2_pp != copy_it2);
EXPECT_TRUE(copy_it1_pp == it1);
EXPECT_TRUE(copy_it2_pp == it2);
EXPECT_EQ(*copy_it1_pp, NA(1));
EXPECT_EQ(*copy_it2_pp, NA(2));
EXPECT_TRUE(copy_it1 != it1);
EXPECT_TRUE(copy_it2 != it2);
if (copy_it1 == set.end()) {
EXPECT_TRUE(copy_it2 != set.end());
EXPECT_EQ(*copy_it2, NA(1));
} else {
EXPECT_TRUE(copy_it2 == set.end());
EXPECT_EQ(*copy_it1, NA(2));
}
EXPECT_EQ(*it1, NA(1));
EXPECT_EQ(*it2, NA(2));
}
TEST(FlatSet, ConstructDestruct) {
FlatSet<string> set;
string k1 = "the quick brown fox jumped over the lazy dog";
string k2 = k1 + k1;
string k3 = k1 + k2;
set.insert(k1);
set.insert(k3);
EXPECT_EQ(set.count(k1), 1);
EXPECT_EQ(set.count(k2), 0);
EXPECT_EQ(set.count(k3), 1);
set.erase(k3);
EXPECT_EQ(set.count(k3), 0);
set.clear();
set.insert(k1);
EXPECT_EQ(set.count(k1), 1);
EXPECT_EQ(set.count(k3), 0);
set.reserve(100);
EXPECT_EQ(set.count(k1), 1);
EXPECT_EQ(set.count(k3), 0);
}
struct CustomCmpKey {
int64_t a;
int64_t b;
CustomCmpKey(int64_t v1, int64_t v2) : a(v1), b(v2) {}
bool operator==(const CustomCmpKey& x) const { return a == x.a && b == x.b; }
};
struct HashA {
size_t operator()(CustomCmpKey x) const { return x.a; }
};
struct EqA {
bool operator()(CustomCmpKey x, CustomCmpKey y) const { return x.a == y.a; }
};
TEST(FlatSet, CustomCmp) {
FlatSet<CustomCmpKey, HashA, EqA> set;
set.insert(CustomCmpKey(100, 200));
EXPECT_EQ(set.count(CustomCmpKey(100, 200)), 1);
EXPECT_EQ(set.count(CustomCmpKey(100, 500)), 1);
}
typedef std::unique_ptr<int> UniqInt;
static UniqInt MakeUniq(int i) { return std::make_unique<int>(i); }
struct HashUniq {
size_t operator()(const UniqInt& p) const { return *p; }
};
struct EqUniq {
bool operator()(const UniqInt& a, const UniqInt& b) const { return *a == *b; }
};
typedef FlatSet<UniqInt, HashUniq, EqUniq> UniqSet;
TEST(FlatSet, UniqueSet) {
UniqSet set;
const int N = 10;
for (int i = 0; i < N; i++) {
set.emplace(MakeUniq(i));
}
EXPECT_EQ(set.size(), N);
UniqSet set2(std::move(set));
for (int i = 0; i < N; i++) {
EXPECT_EQ(set2.count(MakeUniq(i)), 1);
}
UniqSet set3;
set3 = std::move(set2);
set3.erase(MakeUniq(2));
EXPECT_EQ(set3.count(MakeUniq(2)), 0);
set.clear();
EXPECT_EQ(set.size(), 0);
EXPECT_GE(set.size(), 0);
EXPECT_GE(set2.size(), 0);
EXPECT_TRUE(set.emplace(MakeUniq(-1)).second);
}
TEST(FlatSet, UniqueSetIter) {
UniqSet set;
const int kCount = 10;
for (int i = 1; i <= kCount; i++) {
set.emplace(MakeUniq(i));
}
int sum = 0;
for (const auto& p : set) {
sum += *p;
}
EXPECT_EQ(sum, (kCount * (kCount + 1)) / 2);
}
TEST(FlatSet, InsertUncopyable) {
UniqSet set;
EXPECT_TRUE(set.insert(MakeUniq(0)).second);
EXPECT_EQ(set.size(), 1);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/gtl/flatset.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/gtl/flatset_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ceb9b732-936e-47e9-ba8d-ae0b7dd21e7e | cpp | tensorflow/tensorflow | compactptrset | tensorflow/core/lib/gtl/compactptrset.h | third_party/xla/xla/tsl/lib/gtl/compactptrset_test.cc | #ifndef TENSORFLOW_CORE_LIB_GTL_COMPACTPTRSET_H_
#define TENSORFLOW_CORE_LIB_GTL_COMPACTPTRSET_H_
#include "xla/tsl/lib/gtl/compactptrset.h"
namespace tensorflow {
namespace gtl {
using ::tsl::gtl::CompactPointerSet;
}
}
#endif | #include "xla/tsl/lib/gtl/compactptrset.h"
#include "tsl/platform/hash.h"
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace gtl {
namespace {
typedef CompactPointerSet<const char*> StringSet;
static std::vector<const char*> SortedContents(const StringSet& set) {
std::vector<const char*> contents(set.begin(), set.end());
std::sort(contents.begin(), contents.end());
return contents;
}
TEST(CompactPointerSetTest, Simple) {
string data = "ABCDEFG";
const char* a = &data[0];
const char* b = &data[1];
const char* c = &data[2];
const char* d = &data[3];
const char* e = &data[4];
const char* f = &data[5];
const char* g = &data[6];
for (const auto& list : std::vector<std::vector<const char*>>({{
{},
{a},
{b},
{nullptr},
{a, b, c, d, e, f, g},
}})) {
LOG(INFO) << list.size();
StringSet set;
ASSERT_TRUE(set.empty());
for (auto p : list) {
ASSERT_EQ(set.count(p), 0);
ASSERT_TRUE(set.insert(p).second);
ASSERT_EQ(set.count(p), 1);
ASSERT_TRUE(set.find(p) != set.end());
}
ASSERT_EQ(set.size(), list.size());
ASSERT_EQ(SortedContents(set), list);
{
StringSet set2(set);
ASSERT_EQ(SortedContents(set2), list);
}
for (const auto& initial : std::vector<std::vector<const char*>>({{
{},
{a},
{b},
{nullptr},
{a, b, c, d},
}})) {
StringSet dst;
for (auto p : initial) {
dst.insert(p);
}
ASSERT_EQ(dst.size(), initial.size());
dst = set;
ASSERT_EQ(SortedContents(dst), list);
dst.clear();
ASSERT_EQ(dst.size(), 0);
}
for (auto p : list) {
ASSERT_EQ(set.erase(p), 1);
ASSERT_EQ(set.erase(p), 0);
}
ASSERT_TRUE(set.empty());
ASSERT_EQ(set.size(), 0);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/gtl/compactptrset.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/gtl/compactptrset_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6288d502-0a37-48dc-8f80-6013bf3a35e7 | cpp | tensorflow/tensorflow | int_type | tensorflow/core/lib/gtl/int_type.h | third_party/xla/xla/tsl/lib/gtl/int_type_test.cc | #ifndef TENSORFLOW_CORE_LIB_GTL_INT_TYPE_H_
#define TENSORFLOW_CORE_LIB_GTL_INT_TYPE_H_
#include "xla/tsl/lib/gtl/int_type.h"
namespace tensorflow {
namespace gtl {
using ::tsl::gtl::IntType;
}
}
#endif | #include "xla/tsl/lib/gtl/int_type.h"
#include <memory>
#include <unordered_map>
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tsl {
TSL_LIB_GTL_DEFINE_INT_TYPE(Int8_IT, int8);
TSL_LIB_GTL_DEFINE_INT_TYPE(UInt8_IT, uint8);
TSL_LIB_GTL_DEFINE_INT_TYPE(Int16_IT, int16);
TSL_LIB_GTL_DEFINE_INT_TYPE(UInt16_IT, uint16);
TSL_LIB_GTL_DEFINE_INT_TYPE(Int32_IT, int32);
TSL_LIB_GTL_DEFINE_INT_TYPE(Int64_IT, int64_t);
TSL_LIB_GTL_DEFINE_INT_TYPE(UInt32_IT, uint32);
TSL_LIB_GTL_DEFINE_INT_TYPE(UInt64_IT, uint64);
TSL_LIB_GTL_DEFINE_INT_TYPE(Long_IT, long);
template <typename IntType_Type>
class IntTypeTest : public ::testing::Test {};
typedef ::testing::Types<Int8_IT, UInt8_IT, Int16_IT, UInt16_IT, Int32_IT,
Int64_IT, UInt64_IT, Long_IT>
SupportedIntTypes;
TYPED_TEST_SUITE(IntTypeTest, SupportedIntTypes);
TYPED_TEST(IntTypeTest, TestInitialization) {
constexpr TypeParam a;
constexpr TypeParam b(1);
constexpr TypeParam c(b);
EXPECT_EQ(0, a);
EXPECT_EQ(1, b);
EXPECT_EQ(1, c);
}
TYPED_TEST(IntTypeTest, TestOperators) {
TypeParam a(0);
TypeParam b(1);
TypeParam c(2);
constexpr TypeParam d(3);
constexpr TypeParam e(4);
EXPECT_EQ(0, (a++).value());
EXPECT_EQ(2, (++a).value());
EXPECT_EQ(2, (a--).value());
EXPECT_EQ(0, (--a).value());
EXPECT_EQ(true, !a);
EXPECT_EQ(false, !b);
static_assert(!d == false, "Unary operator! failed");
EXPECT_EQ(a.value(), +a);
static_assert(+d == d.value(), "Unary operator+ failed");
EXPECT_EQ(-a.value(), -a);
static_assert(-d == -d.value(), "Unary operator- failed");
EXPECT_EQ(~a.value(), ~a);
EXPECT_EQ(~b.value(), ~b);
static_assert(~d == ~d.value(), "Unary operator~ failed");
c = a = b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = b = 2;
EXPECT_EQ(2, b.value());
EXPECT_EQ(2, c.value());
c = a += b;
EXPECT_EQ(3, a.value());
EXPECT_EQ(3, c.value());
c = a -= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a *= b;
EXPECT_EQ(2, a.value());
EXPECT_EQ(2, c.value());
c = a /= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a <<= b;
EXPECT_EQ(4, a.value());
EXPECT_EQ(4, c.value());
c = a >>= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a %= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a += 2;
EXPECT_EQ(3, a.value());
EXPECT_EQ(3, c.value());
c = a -= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a *= 2;
EXPECT_EQ(2, a.value());
EXPECT_EQ(2, c.value());
c = a /= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a <<= 2;
EXPECT_EQ(4, a.value());
EXPECT_EQ(4, c.value());
c = a >>= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a %= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
a = 0;
b = 1;
EXPECT_FALSE(a == b);
EXPECT_TRUE(a == 0);
EXPECT_FALSE(1 == a);
static_assert(d == d, "operator== failed");
static_assert(d == 3, "operator== failed");
static_assert(3 == d, "operator== failed");
EXPECT_TRUE(a != b);
EXPECT_TRUE(a != 1);
EXPECT_FALSE(0 != a);
static_assert(d != e, "operator!= failed");
static_assert(d != 4, "operator!= failed");
static_assert(4 != d, "operator!= failed");
EXPECT_TRUE(a < b);
EXPECT_TRUE(a < 1);
EXPECT_FALSE(0 < a);
static_assert(d < e, "operator< failed");
static_assert(d < 4, "operator< failed");
static_assert(3 < e, "operator< failed");
EXPECT_TRUE(a <= b);
EXPECT_TRUE(a <= 1);
EXPECT_TRUE(0 <= a);
static_assert(d <= e, "operator<= failed");
static_assert(d <= 4, "operator<= failed");
static_assert(3 <= e, "operator<= failed");
EXPECT_FALSE(a > b);
EXPECT_FALSE(a > 1);
EXPECT_FALSE(0 > a);
static_assert(e > d, "operator> failed");
static_assert(e > 3, "operator> failed");
static_assert(4 > d, "operator> failed");
EXPECT_FALSE(a >= b);
EXPECT_FALSE(a >= 1);
EXPECT_TRUE(0 >= a);
static_assert(e >= d, "operator>= failed");
static_assert(e >= 3, "operator>= failed");
static_assert(4 >= d, "operator>= failed");
a = 1;
b = 3;
EXPECT_EQ(4, (a + b).value());
EXPECT_EQ(4, (a + 3).value());
EXPECT_EQ(4, (1 + b).value());
static_assert((d + e).value() == 7, "Binary operator+ failed");
static_assert((d + 4).value() == 7, "Binary operator+ failed");
static_assert((3 + e).value() == 7, "Binary operator+ failed");
EXPECT_EQ(2, (b - a).value());
EXPECT_EQ(2, (b - 1).value());
EXPECT_EQ(2, (3 - a).value());
static_assert((e - d).value() == 1, "Binary operator- failed");
static_assert((e - 3).value() == 1, "Binary operator- failed");
static_assert((4 - d).value() == 1, "Binary operator- failed");
EXPECT_EQ(3, (a * b).value());
EXPECT_EQ(3, (a * 3).value());
EXPECT_EQ(3, (1 * b).value());
static_assert((d * e).value() == 12, "Binary operator* failed");
static_assert((d * 4).value() == 12, "Binary operator* failed");
static_assert((3 * e).value() == 12, "Binary operator* failed");
EXPECT_EQ(0, (a / b).value());
EXPECT_EQ(0, (a / 3).value());
EXPECT_EQ(0, (1 / b).value());
static_assert((d / e).value() == 0, "Binary operator/ failed");
static_assert((d / 4).value() == 0, "Binary operator/ failed");
static_assert((3 / e).value() == 0, "Binary operator/ failed");
EXPECT_EQ(8, (a << b).value());
EXPECT_EQ(8, (a << 3).value());
EXPECT_EQ(8, (1 << b).value());
static_assert((d << e).value() == 48, "Binary operator<< failed");
static_assert((d << 4).value() == 48, "Binary operator<< failed");
static_assert((3 << e).value() == 48, "Binary operator<< failed");
b = 8;
EXPECT_EQ(4, (b >> a).value());
EXPECT_EQ(4, (b >> 1).value());
EXPECT_EQ(4, (8 >> a).value());
static_assert((d >> e).value() == 0, "Binary operator>> failed");
static_assert((d >> 4).value() == 0, "Binary operator>> failed");
static_assert((3 >> e).value() == 0, "Binary operator>> failed");
b = 3;
a = 2;
EXPECT_EQ(1, (b % a).value());
EXPECT_EQ(1, (b % 2).value());
EXPECT_EQ(1, (3 % a).value());
static_assert((e % d).value() == 1, "Binary operator% failed");
static_assert((e % 3).value() == 1, "Binary operator% failed");
static_assert((4 % d).value() == 1, "Binary operator% failed");
}
TYPED_TEST(IntTypeTest, TestHashFunctor) {
std::unordered_map<TypeParam, char, typename TypeParam::Hasher> map;
TypeParam a(0);
map[a] = 'c';
EXPECT_EQ('c', map[a]);
map[++a] = 'o';
EXPECT_EQ('o', map[a]);
TypeParam b(a);
EXPECT_EQ(typename TypeParam::Hasher()(a), typename TypeParam::Hasher()(b));
}
TYPED_TEST(IntTypeTest, TestValueAccessor) {
constexpr typename TypeParam::ValueType i = -1;
constexpr TypeParam int_type(i);
EXPECT_EQ(i, int_type.value());
static_assert(int_type.value() == i, "value() failed");
EXPECT_EQ(static_cast<int>(i), int_type.template value<int>());
EXPECT_EQ(static_cast<int8>(i), int_type.template value<int8>());
EXPECT_EQ(static_cast<int16>(i), int_type.template value<int16>());
EXPECT_EQ(static_cast<int32>(i), int_type.template value<int32>());
EXPECT_EQ(static_cast<uint32>(i), int_type.template value<uint32>());
EXPECT_EQ(static_cast<int64_t>(i), int_type.template value<int64_t>());
EXPECT_EQ(static_cast<uint64>(i), int_type.template value<uint64>());
EXPECT_EQ(static_cast<long>(i), int_type.template value<long>());
static_assert(int_type.template value<int>() == static_cast<int>(i),
"value<Value>() failed");
}
TYPED_TEST(IntTypeTest, TestMove) {
struct NotCopyable {
TypeParam inttype;
std::unique_ptr<int> ptr;
static NotCopyable Make(int i) {
NotCopyable f;
f.inttype = TypeParam(i);
f.ptr.reset(new int(i));
return f;
}
};
NotCopyable foo = NotCopyable::Make(123);
EXPECT_EQ(123, foo.inttype);
EXPECT_EQ(123, *foo.ptr);
foo = NotCopyable::Make(321);
EXPECT_EQ(321, foo.inttype);
EXPECT_EQ(321, *foo.ptr);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/gtl/int_type.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/gtl/int_type_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
992ee0d8-2b56-4259-a01e-6a37f75f1b53 | cpp | tensorflow/tensorflow | philox_random | tensorflow/core/lib/random/philox_random.h | third_party/xla/xla/tsl/lib/random/philox_random_test.cc | #ifndef TENSORFLOW_CORE_LIB_RANDOM_PHILOX_RANDOM_H_
#define TENSORFLOW_CORE_LIB_RANDOM_PHILOX_RANDOM_H_
#include "xla/tsl/lib/random/philox_random.h"
namespace tensorflow {
namespace random {
using tsl::random::Array;
using tsl::random::PhiloxRandom;
}
}
#endif | #include "xla/tsl/lib/random/philox_random.h"
#include <math.h>
#include <algorithm>
#include <functional>
#include <unordered_map>
#include <vector>
#include "xla/tsl/lib/random/philox_random_test_utils.h"
#include "xla/tsl/lib/random/random_distributions.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/random.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace random {
namespace {
class TrivialPhiloxDistribution {
public:
static constexpr int kResultElementCount = PhiloxRandom::kResultElementCount;
typedef PhiloxRandom::ResultType ResultType;
typedef PhiloxRandom::ResultElementType ResultElementType;
PHILOX_DEVICE_INLINE
ResultType operator()(PhiloxRandom* gen) { return (*gen)(); }
};
TEST(PhiloxRandomTest, SkipMatchTest) {
constexpr int count = 1024;
constexpr int skip_count = 2048;
uint64 test_seed = GetTestSeed();
std::vector<uint32> v1(count);
{
PhiloxRandom gen(test_seed);
gen.Skip(skip_count / 4);
FillRandoms<TrivialPhiloxDistribution>(gen, &v1[0], v1.size());
}
std::vector<uint32> v2(count + skip_count);
{
PhiloxRandom gen(test_seed);
FillRandoms<TrivialPhiloxDistribution>(gen, &v2[0], v2.size());
}
for (int i = 0; i < count; ++i) {
ASSERT_EQ(v1[i], v2[i + skip_count]);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/random/philox_random.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/random/philox_random_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2a66fc09-36a5-4a49-b77b-1dfda971ec7d | cpp | tensorflow/tensorflow | gauge | tensorflow/core/lib/monitoring/gauge.h | tensorflow/core/lib/monitoring/gauge_test.cc | #ifndef TENSORFLOW_CORE_LIB_MONITORING_GAUGE_H_
#define TENSORFLOW_CORE_LIB_MONITORING_GAUGE_H_
#include "xla/tsl/lib/monitoring/gauge.h"
#include "tensorflow/core/lib/monitoring/collection_registry.h"
#include "tensorflow/core/lib/monitoring/metric_def.h"
namespace tensorflow {
namespace monitoring {
using tsl::monitoring::Gauge;
using tsl::monitoring::GaugeCell;
}
}
#endif | #include "tensorflow/core/lib/monitoring/gauge.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace monitoring {
namespace {
auto* gauge_with_labels = Gauge<int64_t, 1>::New(
"/tensorflow/test/gauge_with_labels", "Gauge with one label.", "MyLabel");
TEST(LabeledGaugeTest, InitializedWithZero) {
EXPECT_EQ(0, gauge_with_labels->GetCell("Empty")->value());
}
TEST(LabeledGaugeTest, GetCell) {
auto* cell = gauge_with_labels->GetCell("GetCellOp");
EXPECT_EQ(0, cell->value());
cell->Set(1);
EXPECT_EQ(1, cell->value());
auto* same_cell = gauge_with_labels->GetCell("GetCellOp");
EXPECT_EQ(1, same_cell->value());
same_cell->Set(10);
EXPECT_EQ(10, cell->value());
EXPECT_EQ(10, same_cell->value());
}
auto* gauge_without_labels = Gauge<int64_t, 0>::New(
"/tensorflow/test/gauge_without_labels", "Gauge without any labels.");
TEST(UnlabeledGaugeTest, InitializedWithZero) {
EXPECT_EQ(0, gauge_without_labels->GetCell()->value());
}
TEST(UnlabeledGaugeTest, GetCell) {
auto* cell = gauge_without_labels->GetCell();
EXPECT_EQ(0, cell->value());
cell->Set(1);
EXPECT_EQ(1, cell->value());
auto* same_cell = gauge_without_labels->GetCell();
EXPECT_EQ(1, same_cell->value());
same_cell->Set(10);
EXPECT_EQ(10, cell->value());
EXPECT_EQ(10, same_cell->value());
}
auto* string_gauge = Gauge<string, 0>::New("/tensorflow/test/string_gauge",
"Gauge of string value.");
TEST(GaugeOfStringValue, InitializedWithEmptyString) {
EXPECT_EQ("", string_gauge->GetCell()->value());
}
TEST(GaugeOfStringValue, GetCell) {
auto* cell = string_gauge->GetCell();
EXPECT_EQ("", cell->value());
cell->Set("foo");
EXPECT_EQ("foo", cell->value());
auto* same_cell = string_gauge->GetCell();
EXPECT_EQ("foo", cell->value());
same_cell->Set("bar");
EXPECT_EQ("bar", cell->value());
EXPECT_EQ("bar", same_cell->value());
}
auto* bool_gauge =
Gauge<bool, 0>::New("/tensorflow/test/bool_gauge", "Gauge of bool value.");
TEST(GaugeOfBoolValue, InitializedWithFalseValue) {
EXPECT_EQ(false, bool_gauge->GetCell()->value());
}
TEST(GaugeOfBoolValue, GetCell) {
auto* cell = bool_gauge->GetCell();
EXPECT_EQ(false, cell->value());
cell->Set(true);
EXPECT_EQ(true, cell->value());
auto* same_cell = bool_gauge->GetCell();
EXPECT_EQ(true, cell->value());
same_cell->Set(false);
EXPECT_EQ(false, cell->value());
EXPECT_EQ(false, same_cell->value());
}
TEST(LabeledGaugeTest, SameName) {
auto* same_gauge = Gauge<int64_t, 1>::New(
"/tensorflow/test/gauge_with_labels", "Gauge with one label.", "MyLabel");
EXPECT_TRUE(gauge_with_labels->GetStatus().ok());
EXPECT_TRUE(same_gauge->GetStatus().ok());
delete same_gauge;
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/monitoring/gauge.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/monitoring/gauge_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ce88d644-ba62-4366-b77d-45d407940f71 | cpp | tensorflow/tensorflow | cell_reader | tensorflow/core/lib/monitoring/cell_reader.h | tensorflow/core/lib/monitoring/cell_reader_test.cc | #ifndef TENSORFLOW_CORE_LIB_MONITORING_CELL_READER_H_
#define TENSORFLOW_CORE_LIB_MONITORING_CELL_READER_H_
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "xla/tsl/lib/monitoring/cell_reader.h"
#include "tensorflow/core/lib/monitoring/cell_reader-inl.h"
#include "tensorflow/core/lib/monitoring/collected_metrics.h"
#include "tensorflow/core/lib/monitoring/metric_def.h"
namespace tensorflow {
namespace monitoring {
namespace testing {
using tsl::monitoring::testing::CellReader;
}
}
}
#endif | #include "tensorflow/core/lib/monitoring/cell_reader.h"
#include <cstdint>
#include <string>
#include <vector>
#include "tensorflow/core/lib/monitoring/counter.h"
#include "tensorflow/core/lib/monitoring/gauge.h"
#include "tensorflow/core/lib/monitoring/percentile_sampler.h"
#include "tensorflow/core/lib/monitoring/sampler.h"
#include "tensorflow/core/lib/monitoring/test_utils.h"
#include "tensorflow/core/lib/monitoring/types.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace monitoring {
namespace testing {
namespace {
std::vector<double> GetDefaultPercentiles() {
return {25.0, 50.0, 80.0, 90.0, 95.0, 99.0};
}
auto* test_counter = monitoring::Counter<0>::New(
"/tensorflow/monitoring/test/counter", "Test counter.");
auto* test_counter_with_labels = monitoring::Counter<2>::New(
"/tensorflow/monitoring/test/counter_with_labels",
"Test counter with two labels.", "label1", "label2");
auto* test_sampler = monitoring::Sampler<0>::New(
{"/tensorflow/monitoring/test/sampler", "Test sampler."},
monitoring::Buckets::Explicit(
{0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0}));
auto* test_sampler_with_labels = monitoring::Sampler<2>::New(
{"/tensorflow/monitoring/test/sampler_with_labels", "Test sampler.",
"label1", "label2"},
monitoring::Buckets::Exponential(
1, 10, 5));
auto* test_int_gauge = monitoring::Gauge<int64_t, 0>::New(
"/tensorflow/monitoring/test/int_gauge", "Test gauge.");
auto* test_int_gauge_with_labels = monitoring::Gauge<int64_t, 2>::New(
"/tensorflow/monitoring/test/int_gauge_with_labels", "Test gauge.",
"label1", "label2");
auto* test_string_gauge = monitoring::Gauge<std::string, 0>::New(
"/tensorflow/monitoring/test/string_gauge", "Test gauge.");
auto* test_string_gauge_with_labels = monitoring::Gauge<std::string, 2>::New(
"/tensorflow/monitoring/test/string_gauge_with_labels", "Test gauge.",
"label1", "label2");
auto* test_bool_gauge = monitoring::Gauge<bool, 0>::New(
"/tensorflow/monitoring/test/bool_gauge", "Test gauge.");
auto* test_bool_gauge_with_labels = monitoring::Gauge<bool, 2>::New(
"/tensorflow/monitoring/test/bool_gauge_with_labels", "Test gauge.",
"label1", "label2");
auto* test_percentiles = monitoring::PercentileSampler<0>::New(
{"/tensorflow/monitoring/test/percentiles", "Test percentiles."},
GetDefaultPercentiles(), 1024,
monitoring::UnitOfMeasure::kTime);
auto* test_percentiles_with_labels = monitoring::PercentileSampler<2>::New(
{"/tensorflow/monitoring/test/percentiles_with_labels", "Test percentiles.",
"label1", "label2"},
GetDefaultPercentiles(), 1024,
monitoring::UnitOfMeasure::kTime);
TEST(CellReaderTest, CounterDeltaNoLabels) {
CellReader<int64_t> cell_reader("/tensorflow/monitoring/test/counter");
EXPECT_EQ(cell_reader.Delta(), 0);
test_counter->GetCell()->IncrementBy(5);
EXPECT_EQ(cell_reader.Delta(), 5);
test_counter->GetCell()->IncrementBy(10);
EXPECT_EQ(cell_reader.Delta(), 10);
test_counter->GetCell()->IncrementBy(100);
EXPECT_EQ(cell_reader.Delta(), 100);
}
TEST(CellReaderTest, CounterReadNoLabels) {
CellReader<int64_t> cell_reader("/tensorflow/monitoring/test/counter");
EXPECT_EQ(cell_reader.Read(), 0);
test_counter->GetCell()->IncrementBy(5);
EXPECT_EQ(cell_reader.Read(), 5);
test_counter->GetCell()->IncrementBy(10);
EXPECT_EQ(cell_reader.Read(), 15);
test_counter->GetCell()->IncrementBy(100);
EXPECT_EQ(cell_reader.Read(), 115);
}
TEST(CellReaderTest, CounterDeltaAndReadNoLabels) {
CellReader<int64_t> cell_reader("/tensorflow/monitoring/test/counter");
EXPECT_EQ(cell_reader.Delta(), 0);
EXPECT_EQ(cell_reader.Read(), 0);
test_counter->GetCell()->IncrementBy(5);
EXPECT_EQ(cell_reader.Delta(), 5);
EXPECT_EQ(cell_reader.Read(), 5);
test_counter->GetCell()->IncrementBy(10);
EXPECT_EQ(cell_reader.Delta(), 10);
EXPECT_EQ(cell_reader.Read(), 15);
test_counter->GetCell()->IncrementBy(100);
EXPECT_EQ(cell_reader.Delta(), 100);
EXPECT_EQ(cell_reader.Read(), 115);
}
TEST(CellReaderTest, CounterDeltaWithLabels) {
CellReader<int64_t> cell_reader(
"/tensorflow/monitoring/test/counter_with_labels");
EXPECT_EQ(cell_reader.Delta("x1", "y1"), 0);
EXPECT_EQ(cell_reader.Delta("x1", "y2"), 0);
EXPECT_EQ(cell_reader.Delta("x2", "y1"), 0);
EXPECT_EQ(cell_reader.Delta("x2", "y2"), 0);
test_counter_with_labels->GetCell("x1", "y1")->IncrementBy(1);
test_counter_with_labels->GetCell("x1", "y2")->IncrementBy(10);
test_counter_with_labels->GetCell("x2", "y1")->IncrementBy(100);
EXPECT_EQ(cell_reader.Delta("x1", "y1"), 1);
EXPECT_EQ(cell_reader.Delta("x1", "y2"), 10);
EXPECT_EQ(cell_reader.Delta("x2", "y1"), 100);
EXPECT_EQ(cell_reader.Delta("x2", "y2"), 0);
test_counter_with_labels->GetCell("x1", "y2")->IncrementBy(5);
test_counter_with_labels->GetCell("x2", "y1")->IncrementBy(50);
test_counter_with_labels->GetCell("x2", "y2")->IncrementBy(500);
EXPECT_EQ(cell_reader.Delta("x1", "y1"), 0);
EXPECT_EQ(cell_reader.Delta("x1", "y2"), 5);
EXPECT_EQ(cell_reader.Delta("x2", "y1"), 50);
EXPECT_EQ(cell_reader.Delta("x2", "y2"), 500);
test_counter_with_labels->GetCell("x1", "y1")->IncrementBy(1000);
test_counter_with_labels->GetCell("x2", "y2")->IncrementBy(1000);
EXPECT_EQ(cell_reader.Delta("x1", "y1"), 1000);
EXPECT_EQ(cell_reader.Delta("x1", "y2"), 0);
EXPECT_EQ(cell_reader.Delta("x2", "y1"), 0);
EXPECT_EQ(cell_reader.Delta("x2", "y2"), 1000);
}
TEST(CellReaderTest, CounterReadWithLabels) {
CellReader<int64_t> cell_reader(
"/tensorflow/monitoring/test/counter_with_labels");
EXPECT_EQ(cell_reader.Read("x1", "y1"), 0);
EXPECT_EQ(cell_reader.Read("x1", "y2"), 0);
EXPECT_EQ(cell_reader.Read("x2", "y1"), 0);
EXPECT_EQ(cell_reader.Read("x2", "y2"), 0);
test_counter_with_labels->GetCell("x1", "y1")->IncrementBy(1);
test_counter_with_labels->GetCell("x1", "y2")->IncrementBy(10);
test_counter_with_labels->GetCell("x2", "y1")->IncrementBy(100);
EXPECT_EQ(cell_reader.Read("x1", "y1"), 1);
EXPECT_EQ(cell_reader.Read("x1", "y2"), 10);
EXPECT_EQ(cell_reader.Read("x2", "y1"), 100);
EXPECT_EQ(cell_reader.Read("x2", "y2"), 0);
test_counter_with_labels->GetCell("x1", "y2")->IncrementBy(5);
test_counter_with_labels->GetCell("x2", "y1")->IncrementBy(50);
test_counter_with_labels->GetCell("x2", "y2")->IncrementBy(500);
EXPECT_EQ(cell_reader.Read("x1", "y1"), 1);
EXPECT_EQ(cell_reader.Read("x1", "y2"), 15);
EXPECT_EQ(cell_reader.Read("x2", "y1"), 150);
EXPECT_EQ(cell_reader.Read("x2", "y2"), 500);
test_counter_with_labels->GetCell("x1", "y1")->IncrementBy(1000);
test_counter_with_labels->GetCell("x2", "y2")->IncrementBy(1000);
EXPECT_EQ(cell_reader.Read("x1", "y1"), 1001);
EXPECT_EQ(cell_reader.Read("x1", "y2"), 15);
EXPECT_EQ(cell_reader.Read("x2", "y1"), 150);
EXPECT_EQ(cell_reader.Read("x2", "y2"), 1500);
}
TEST(CellReaderTest, CounterDeltaAndReadWithLabels) {
CellReader<int64_t> cell_reader(
"/tensorflow/monitoring/test/counter_with_labels");
EXPECT_EQ(cell_reader.Delta("x1", "y1"), 0);
EXPECT_EQ(cell_reader.Delta("x1", "y2"), 0);
EXPECT_EQ(cell_reader.Delta("x2", "y1"), 0);
EXPECT_EQ(cell_reader.Delta("x2", "y2"), 0);
EXPECT_EQ(cell_reader.Read("x1", "y1"), 0);
EXPECT_EQ(cell_reader.Read("x1", "y2"), 0);
EXPECT_EQ(cell_reader.Read("x2", "y1"), 0);
EXPECT_EQ(cell_reader.Read("x2", "y2"), 0);
test_counter_with_labels->GetCell("x1", "y1")->IncrementBy(1);
test_counter_with_labels->GetCell("x1", "y2")->IncrementBy(10);
test_counter_with_labels->GetCell("x2", "y1")->IncrementBy(100);
EXPECT_EQ(cell_reader.Delta("x1", "y1"), 1);
EXPECT_EQ(cell_reader.Delta("x1", "y2"), 10);
EXPECT_EQ(cell_reader.Delta("x2", "y1"), 100);
EXPECT_EQ(cell_reader.Delta("x2", "y2"), 0);
EXPECT_EQ(cell_reader.Read("x1", "y1"), 1);
EXPECT_EQ(cell_reader.Read("x1", "y2"), 10);
EXPECT_EQ(cell_reader.Read("x2", "y1"), 100);
EXPECT_EQ(cell_reader.Read("x2", "y2"), 0);
test_counter_with_labels->GetCell("x1", "y2")->IncrementBy(5);
test_counter_with_labels->GetCell("x2", "y1")->IncrementBy(50);
test_counter_with_labels->GetCell("x2", "y2")->IncrementBy(500);
EXPECT_EQ(cell_reader.Delta("x1", "y1"), 0);
EXPECT_EQ(cell_reader.Delta("x1", "y2"), 5);
EXPECT_EQ(cell_reader.Delta("x2", "y1"), 50);
EXPECT_EQ(cell_reader.Delta("x2", "y2"), 500);
EXPECT_EQ(cell_reader.Read("x1", "y1"), 1);
EXPECT_EQ(cell_reader.Read("x1", "y2"), 15);
EXPECT_EQ(cell_reader.Read("x2", "y1"), 150);
EXPECT_EQ(cell_reader.Read("x2", "y2"), 500);
test_counter_with_labels->GetCell("x1", "y1")->IncrementBy(1000);
test_counter_with_labels->GetCell("x2", "y2")->IncrementBy(1000);
EXPECT_EQ(cell_reader.Delta("x1", "y1"), 1000);
EXPECT_EQ(cell_reader.Delta("x1", "y2"), 0);
EXPECT_EQ(cell_reader.Delta("x2", "y1"), 0);
EXPECT_EQ(cell_reader.Delta("x2", "y2"), 1000);
EXPECT_EQ(cell_reader.Read("x1", "y1"), 1001);
EXPECT_EQ(cell_reader.Read("x1", "y2"), 15);
EXPECT_EQ(cell_reader.Read("x2", "y1"), 150);
EXPECT_EQ(cell_reader.Read("x2", "y2"), 1500);
}
TEST(CellReaderTest, TwoCounterReaders) {
CellReader<int64_t> cell_reader("/tensorflow/monitoring/test/counter");
CellReader<int64_t> cell_reader_with_labels(
"/tensorflow/monitoring/test/counter_with_labels");
EXPECT_EQ(cell_reader.Delta(), 0);
EXPECT_EQ(cell_reader_with_labels.Delta("x1", "y1"), 0);
EXPECT_EQ(cell_reader_with_labels.Delta("x2", "y2"), 0);
EXPECT_EQ(cell_reader.Read(), 0);
EXPECT_EQ(cell_reader_with_labels.Read("x1", "y1"), 0);
EXPECT_EQ(cell_reader_with_labels.Read("x2", "y2"), 0);
test_counter->GetCell()->IncrementBy(1);
test_counter_with_labels->GetCell("x1", "y1")->IncrementBy(100);
EXPECT_EQ(cell_reader.Delta(), 1);
EXPECT_EQ(cell_reader_with_labels.Delta("x1", "y1"), 100);
EXPECT_EQ(cell_reader_with_labels.Delta("x2", "y2"), 0);
EXPECT_EQ(cell_reader.Read(), 1);
EXPECT_EQ(cell_reader_with_labels.Read("x1", "y1"), 100);
EXPECT_EQ(cell_reader_with_labels.Read("x2", "y2"), 0);
test_counter->GetCell()->IncrementBy(5);
test_counter_with_labels->GetCell("x2", "y2")->IncrementBy(500);
EXPECT_EQ(cell_reader.Delta(), 5);
EXPECT_EQ(cell_reader_with_labels.Delta("x1", "y1"), 0);
EXPECT_EQ(cell_reader_with_labels.Delta("x2", "y2"), 500);
EXPECT_EQ(cell_reader.Read(), 6);
EXPECT_EQ(cell_reader_with_labels.Read("x1", "y1"), 100);
EXPECT_EQ(cell_reader_with_labels.Read("x2", "y2"), 500);
test_counter->GetCell()->IncrementBy(1);
test_counter_with_labels->GetCell("x1", "y1")->IncrementBy(1);
test_counter_with_labels->GetCell("x2", "y2")->IncrementBy(1);
EXPECT_EQ(cell_reader.Delta(), 1);
EXPECT_EQ(cell_reader_with_labels.Delta("x1", "y1"), 1);
EXPECT_EQ(cell_reader_with_labels.Delta("x2", "y2"), 1);
EXPECT_EQ(cell_reader.Read(), 7);
EXPECT_EQ(cell_reader_with_labels.Read("x1", "y1"), 101);
EXPECT_EQ(cell_reader_with_labels.Read("x2", "y2"), 501);
}
TEST(CellReaderTest, RepeatedReads) {
CellReader<int64_t> cell_reader("/tensorflow/monitoring/test/counter");
CellReader<int64_t> cell_reader_with_labels(
"/tensorflow/monitoring/test/counter_with_labels");
EXPECT_EQ(cell_reader.Delta(), 0);
EXPECT_EQ(cell_reader_with_labels.Delta("x1", "y1"), 0);
EXPECT_EQ(cell_reader_with_labels.Delta("x2", "y2"), 0);
EXPECT_EQ(cell_reader.Read(), 0);
EXPECT_EQ(cell_reader_with_labels.Read("x1", "y1"), 0);
EXPECT_EQ(cell_reader_with_labels.Read("x2", "y2"), 0);
test_counter->GetCell()->IncrementBy(1);
test_counter_with_labels->GetCell("x1", "y1")->IncrementBy(100);
EXPECT_EQ(cell_reader.Delta(), 1);
EXPECT_EQ(cell_reader_with_labels.Delta("x1", "y1"), 100);
EXPECT_EQ(cell_reader_with_labels.Delta("x2", "y2"), 0);
EXPECT_EQ(cell_reader.Read(), 1);
EXPECT_EQ(cell_reader_with_labels.Read("x1", "y1"), 100);
EXPECT_EQ(cell_reader_with_labels.Read("x2", "y2"), 0);
EXPECT_EQ(cell_reader.Delta(), 0);
EXPECT_EQ(cell_reader_with_labels.Delta("x1", "y1"), 0);
EXPECT_EQ(cell_reader_with_labels.Delta("x2", "y2"), 0);
EXPECT_EQ(cell_reader.Read(), 1);
EXPECT_EQ(cell_reader_with_labels.Read("x1", "y1"), 100);
EXPECT_EQ(cell_reader_with_labels.Read("x2", "y2"), 0);
}
TEST(CellReaderTest, SamplerDeltaNoLabels) {
CellReader<Histogram> cell_reader("/tensorflow/monitoring/test/sampler");
Histogram histogram = cell_reader.Delta();
EXPECT_FLOAT_EQ(histogram.num(), 0.0);
EXPECT_FLOAT_EQ(histogram.sum(), 0.0);
EXPECT_FLOAT_EQ(histogram.sum_squares(), 0.0);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
EXPECT_FLOAT_EQ(histogram.num(6), 0.0);
EXPECT_FLOAT_EQ(histogram.num(7), 0.0);
EXPECT_FLOAT_EQ(histogram.num(8), 0.0);
EXPECT_FLOAT_EQ(histogram.num(9), 0.0);
EXPECT_FLOAT_EQ(histogram.num(10), 0.0);
EXPECT_FLOAT_EQ(histogram.num(11), 0.0);
EXPECT_FLOAT_EQ(histogram.num(12), 0.0);
test_sampler->GetCell()->Add(0.1);
histogram = cell_reader.Delta();
EXPECT_FLOAT_EQ(histogram.num(), 1.0);
EXPECT_FLOAT_EQ(histogram.sum(), 0.1);
EXPECT_FLOAT_EQ(histogram.sum_squares(), 0.01);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 1.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
EXPECT_FLOAT_EQ(histogram.num(6), 0.0);
EXPECT_FLOAT_EQ(histogram.num(7), 0.0);
EXPECT_FLOAT_EQ(histogram.num(8), 0.0);
EXPECT_FLOAT_EQ(histogram.num(9), 0.0);
EXPECT_FLOAT_EQ(histogram.num(10), 0.0);
EXPECT_FLOAT_EQ(histogram.num(11), 0.0);
EXPECT_FLOAT_EQ(histogram.num(12), 0.0);
test_sampler->GetCell()->Add(1.1);
histogram = cell_reader.Delta();
EXPECT_FLOAT_EQ(histogram.num(), 1.0);
EXPECT_FLOAT_EQ(histogram.sum(), 1.1);
EXPECT_FLOAT_EQ(histogram.sum_squares(), 1.21);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
EXPECT_FLOAT_EQ(histogram.num(6), 1.0);
EXPECT_FLOAT_EQ(histogram.num(7), 0.0);
EXPECT_FLOAT_EQ(histogram.num(8), 0.0);
EXPECT_FLOAT_EQ(histogram.num(9), 0.0);
EXPECT_FLOAT_EQ(histogram.num(10), 0.0);
EXPECT_FLOAT_EQ(histogram.num(11), 0.0);
EXPECT_FLOAT_EQ(histogram.num(12), 0.0);
test_sampler->GetCell()->Add(100);
histogram = cell_reader.Delta();
EXPECT_FLOAT_EQ(histogram.num(), 1.0);
EXPECT_FLOAT_EQ(histogram.sum(), 100);
EXPECT_FLOAT_EQ(histogram.sum_squares(), 10000);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
EXPECT_FLOAT_EQ(histogram.num(6), 0.0);
EXPECT_FLOAT_EQ(histogram.num(7), 0.0);
EXPECT_FLOAT_EQ(histogram.num(8), 0.0);
EXPECT_FLOAT_EQ(histogram.num(9), 0.0);
EXPECT_FLOAT_EQ(histogram.num(10), 0.0);
EXPECT_FLOAT_EQ(histogram.num(11), 1.0);
EXPECT_FLOAT_EQ(histogram.num(12), 0.0);
}
TEST(CellReaderTest, SamplerReadNoLabels) {
CellReader<Histogram> cell_reader("/tensorflow/monitoring/test/sampler");
Histogram histogram = cell_reader.Read();
EXPECT_FLOAT_EQ(histogram.num(), 0.0);
EXPECT_FLOAT_EQ(histogram.sum(), 0.0);
EXPECT_FLOAT_EQ(histogram.sum_squares(), 0.0);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
EXPECT_FLOAT_EQ(histogram.num(6), 0.0);
EXPECT_FLOAT_EQ(histogram.num(7), 0.0);
EXPECT_FLOAT_EQ(histogram.num(8), 0.0);
EXPECT_FLOAT_EQ(histogram.num(9), 0.0);
EXPECT_FLOAT_EQ(histogram.num(10), 0.0);
EXPECT_FLOAT_EQ(histogram.num(11), 0.0);
EXPECT_FLOAT_EQ(histogram.num(12), 0.0);
test_sampler->GetCell()->Add(0.1);
histogram = cell_reader.Read();
EXPECT_FLOAT_EQ(histogram.num(), 1.0);
EXPECT_FLOAT_EQ(histogram.sum(), 0.1);
EXPECT_FLOAT_EQ(histogram.sum_squares(), 0.01);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 1.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
EXPECT_FLOAT_EQ(histogram.num(6), 0.0);
EXPECT_FLOAT_EQ(histogram.num(7), 0.0);
EXPECT_FLOAT_EQ(histogram.num(8), 0.0);
EXPECT_FLOAT_EQ(histogram.num(9), 0.0);
EXPECT_FLOAT_EQ(histogram.num(10), 0.0);
EXPECT_FLOAT_EQ(histogram.num(11), 0.0);
EXPECT_FLOAT_EQ(histogram.num(12), 0.0);
test_sampler->GetCell()->Add(1.1);
histogram = cell_reader.Read();
EXPECT_FLOAT_EQ(histogram.num(), 2.0);
EXPECT_FLOAT_EQ(histogram.sum(), 1.2);
EXPECT_FLOAT_EQ(histogram.sum_squares(), 1.22);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 1.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
EXPECT_FLOAT_EQ(histogram.num(6), 1.0);
EXPECT_FLOAT_EQ(histogram.num(7), 0.0);
EXPECT_FLOAT_EQ(histogram.num(8), 0.0);
EXPECT_FLOAT_EQ(histogram.num(9), 0.0);
EXPECT_FLOAT_EQ(histogram.num(10), 0.0);
EXPECT_FLOAT_EQ(histogram.num(11), 0.0);
EXPECT_FLOAT_EQ(histogram.num(12), 0.0);
test_sampler->GetCell()->Add(100);
histogram = cell_reader.Read();
EXPECT_FLOAT_EQ(histogram.num(), 3.0);
EXPECT_FLOAT_EQ(histogram.sum(), 101.2);
EXPECT_FLOAT_EQ(histogram.sum_squares(), 10001.22);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 1.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
EXPECT_FLOAT_EQ(histogram.num(6), 1.0);
EXPECT_FLOAT_EQ(histogram.num(7), 0.0);
EXPECT_FLOAT_EQ(histogram.num(8), 0.0);
EXPECT_FLOAT_EQ(histogram.num(9), 0.0);
EXPECT_FLOAT_EQ(histogram.num(10), 0.0);
EXPECT_FLOAT_EQ(histogram.num(11), 1.0);
EXPECT_FLOAT_EQ(histogram.num(12), 0.0);
}
TEST(CellReaderTest, SamplerDeltaAndReadNoLabels) {
CellReader<Histogram> cell_reader("/tensorflow/monitoring/test/sampler");
Histogram histogram = cell_reader.Delta();
EXPECT_FLOAT_EQ(histogram.num(), 0.0);
EXPECT_FLOAT_EQ(histogram.sum(), 0.0);
EXPECT_FLOAT_EQ(histogram.sum_squares(), 0.0);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
EXPECT_FLOAT_EQ(histogram.num(6), 0.0);
EXPECT_FLOAT_EQ(histogram.num(7), 0.0);
EXPECT_FLOAT_EQ(histogram.num(8), 0.0);
EXPECT_FLOAT_EQ(histogram.num(9), 0.0);
EXPECT_FLOAT_EQ(histogram.num(10), 0.0);
EXPECT_FLOAT_EQ(histogram.num(11), 0.0);
EXPECT_FLOAT_EQ(histogram.num(12), 0.0);
histogram = cell_reader.Read();
EXPECT_FLOAT_EQ(histogram.num(), 0.0);
EXPECT_FLOAT_EQ(histogram.sum(), 0.0);
EXPECT_FLOAT_EQ(histogram.sum_squares(), 0.0);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
EXPECT_FLOAT_EQ(histogram.num(6), 0.0);
EXPECT_FLOAT_EQ(histogram.num(7), 0.0);
EXPECT_FLOAT_EQ(histogram.num(8), 0.0);
EXPECT_FLOAT_EQ(histogram.num(9), 0.0);
EXPECT_FLOAT_EQ(histogram.num(10), 0.0);
EXPECT_FLOAT_EQ(histogram.num(11), 0.0);
EXPECT_FLOAT_EQ(histogram.num(12), 0.0);
test_sampler->GetCell()->Add(0.1);
test_sampler->GetCell()->Add(0.1);
histogram = cell_reader.Delta();
EXPECT_FLOAT_EQ(histogram.num(), 2.0);
EXPECT_FLOAT_EQ(histogram.sum(), 0.2);
EXPECT_FLOAT_EQ(histogram.sum_squares(), 0.02);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 2.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
EXPECT_FLOAT_EQ(histogram.num(6), 0.0);
EXPECT_FLOAT_EQ(histogram.num(7), 0.0);
EXPECT_FLOAT_EQ(histogram.num(8), 0.0);
EXPECT_FLOAT_EQ(histogram.num(9), 0.0);
EXPECT_FLOAT_EQ(histogram.num(10), 0.0);
EXPECT_FLOAT_EQ(histogram.num(11), 0.0);
EXPECT_FLOAT_EQ(histogram.num(12), 0.0);
histogram = cell_reader.Read();
EXPECT_FLOAT_EQ(histogram.num(), 2.0);
EXPECT_FLOAT_EQ(histogram.sum(), 0.2);
EXPECT_FLOAT_EQ(histogram.sum_squares(), 0.02);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 2.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
EXPECT_FLOAT_EQ(histogram.num(6), 0.0);
EXPECT_FLOAT_EQ(histogram.num(7), 0.0);
EXPECT_FLOAT_EQ(histogram.num(8), 0.0);
EXPECT_FLOAT_EQ(histogram.num(9), 0.0);
EXPECT_FLOAT_EQ(histogram.num(10), 0.0);
EXPECT_FLOAT_EQ(histogram.num(11), 0.0);
EXPECT_FLOAT_EQ(histogram.num(12), 0.0);
test_sampler->GetCell()->Add(100);
test_sampler->GetCell()->Add(100);
histogram = cell_reader.Delta();
EXPECT_FLOAT_EQ(histogram.num(), 2.0);
EXPECT_FLOAT_EQ(histogram.sum(), 200);
EXPECT_FLOAT_EQ(histogram.sum_squares(), 20000);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
EXPECT_FLOAT_EQ(histogram.num(6), 0.0);
EXPECT_FLOAT_EQ(histogram.num(7), 0.0);
EXPECT_FLOAT_EQ(histogram.num(8), 0.0);
EXPECT_FLOAT_EQ(histogram.num(9), 0.0);
EXPECT_FLOAT_EQ(histogram.num(10), 0.0);
EXPECT_FLOAT_EQ(histogram.num(11), 2.0);
EXPECT_FLOAT_EQ(histogram.num(12), 0.0);
histogram = cell_reader.Read();
EXPECT_FLOAT_EQ(histogram.num(), 4.0);
EXPECT_FLOAT_EQ(histogram.sum(), 200.2);
EXPECT_FLOAT_EQ(histogram.sum_squares(), 20000.02);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 2.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
EXPECT_FLOAT_EQ(histogram.num(6), 0.0);
EXPECT_FLOAT_EQ(histogram.num(7), 0.0);
EXPECT_FLOAT_EQ(histogram.num(8), 0.0);
EXPECT_FLOAT_EQ(histogram.num(9), 0.0);
EXPECT_FLOAT_EQ(histogram.num(10), 0.0);
EXPECT_FLOAT_EQ(histogram.num(11), 2.0);
EXPECT_FLOAT_EQ(histogram.num(12), 0.0);
}
TEST(CellReaderTest, SamplerDeltaWithLabels) {
CellReader<Histogram> cell_reader(
"/tensorflow/monitoring/test/sampler_with_labels");
Histogram histogram = cell_reader.Delta("x1", "y1");
EXPECT_FLOAT_EQ(histogram.num(), 0.0);
EXPECT_FLOAT_EQ(histogram.sum(), 0.0);
EXPECT_FLOAT_EQ(histogram.sum_squares(), 0.0);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
histogram = cell_reader.Delta("x2", "y2");
EXPECT_FLOAT_EQ(histogram.num(), 0.0);
EXPECT_FLOAT_EQ(histogram.sum(), 0.0);
EXPECT_FLOAT_EQ(histogram.sum_squares(), 0.0);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
test_sampler_with_labels->GetCell("x1", "y1")->Add(-100);
test_sampler_with_labels->GetCell("x1", "y1")->Add(-100);
histogram = cell_reader.Delta("x1", "y1");
EXPECT_FLOAT_EQ(histogram.num(), 2.0);
EXPECT_FLOAT_EQ(histogram.sum(), -200.0);
EXPECT_FLOAT_EQ(histogram.sum_squares(), 20000.0);
EXPECT_FLOAT_EQ(histogram.num(0), 2.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
histogram = cell_reader.Delta("x2", "y2");
EXPECT_FLOAT_EQ(histogram.num(), 0.0);
EXPECT_FLOAT_EQ(histogram.sum(), 0.0);
EXPECT_FLOAT_EQ(histogram.sum_squares(), 0.0);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
test_sampler_with_labels->GetCell("x2", "y2")->Add(100);
test_sampler_with_labels->GetCell("x2", "y2")->Add(100);
histogram = cell_reader.Delta("x1", "y1");
EXPECT_FLOAT_EQ(histogram.num(), 0.0);
EXPECT_FLOAT_EQ(histogram.sum(), 0.0);
EXPECT_FLOAT_EQ(histogram.sum_squares(), 0.0);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
histogram = cell_reader.Delta("x2", "y2");
EXPECT_FLOAT_EQ(histogram.num(), 2.0);
EXPECT_FLOAT_EQ(histogram.sum(), 200.0);
EXPECT_FLOAT_EQ(histogram.sum_squares(), 20000.0);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 2.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
test_sampler_with_labels->GetCell("x1", "y1")->Add(-100000000);
test_sampler_with_labels->GetCell("x2", "y2")->Add(100000000);
histogram = cell_reader.Delta("x1", "y1");
EXPECT_FLOAT_EQ(histogram.num(), 1.0);
EXPECT_FLOAT_EQ(histogram.sum(), -100000000);
EXPECT_FLOAT_EQ(histogram.num(0), 1.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
histogram = cell_reader.Delta("x2", "y2");
EXPECT_FLOAT_EQ(histogram.num(), 1.0);
EXPECT_FLOAT_EQ(histogram.sum(), 100000000);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 1.0);
}
TEST(CellReaderTest, SamplerReadWithLabels) {
CellReader<Histogram> cell_reader(
"/tensorflow/monitoring/test/sampler_with_labels");
Histogram histogram = cell_reader.Read("x1", "y1");
EXPECT_FLOAT_EQ(histogram.num(), 0.0);
EXPECT_FLOAT_EQ(histogram.sum(), 0.0);
EXPECT_FLOAT_EQ(histogram.sum_squares(), 0.0);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
histogram = cell_reader.Read("x2", "y2");
EXPECT_FLOAT_EQ(histogram.num(), 0.0);
EXPECT_FLOAT_EQ(histogram.sum(), 0.0);
EXPECT_FLOAT_EQ(histogram.sum_squares(), 0.0);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
test_sampler_with_labels->GetCell("x1", "y1")->Add(-100);
test_sampler_with_labels->GetCell("x1", "y1")->Add(-100);
histogram = cell_reader.Read("x1", "y1");
EXPECT_FLOAT_EQ(histogram.num(), 2.0);
EXPECT_FLOAT_EQ(histogram.sum(), -200.0);
EXPECT_FLOAT_EQ(histogram.sum_squares(), 20000.0);
EXPECT_FLOAT_EQ(histogram.num(0), 2.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
histogram = cell_reader.Read("x2", "y2");
EXPECT_FLOAT_EQ(histogram.num(), 0.0);
EXPECT_FLOAT_EQ(histogram.sum(), 0.0);
EXPECT_FLOAT_EQ(histogram.sum_squares(), 0.0);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
test_sampler_with_labels->GetCell("x2", "y2")->Add(100);
test_sampler_with_labels->GetCell("x2", "y2")->Add(100);
histogram = cell_reader.Read("x1", "y1");
EXPECT_FLOAT_EQ(histogram.num(), 2.0);
EXPECT_FLOAT_EQ(histogram.sum(), -200.0);
EXPECT_FLOAT_EQ(histogram.sum_squares(), 20000.0);
EXPECT_FLOAT_EQ(histogram.num(0), 2.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
histogram = cell_reader.Read("x2", "y2");
EXPECT_FLOAT_EQ(histogram.num(), 2.0);
EXPECT_FLOAT_EQ(histogram.sum(), 200.0);
EXPECT_FLOAT_EQ(histogram.sum_squares(), 20000.0);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 2.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
test_sampler_with_labels->GetCell("x1", "y1")->Add(-100000000);
test_sampler_with_labels->GetCell("x2", "y2")->Add(100000000);
histogram = cell_reader.Read("x1", "y1");
EXPECT_FLOAT_EQ(histogram.num(), 3.0);
EXPECT_FLOAT_EQ(histogram.sum(), -100000200);
EXPECT_FLOAT_EQ(histogram.num(0), 3.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
histogram = cell_reader.Read("x2", "y2");
EXPECT_FLOAT_EQ(histogram.num(), 3.0);
EXPECT_FLOAT_EQ(histogram.sum(), 100000200);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 2.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 1.0);
}
TEST(CellReaderTest, SamplerRepeatedReads) {
CellReader<Histogram> cell_reader(
"/tensorflow/monitoring/test/sampler_with_labels");
Histogram histogram = cell_reader.Read("x1", "y1");
EXPECT_FLOAT_EQ(histogram.num(), 0.0);
EXPECT_FLOAT_EQ(histogram.sum(), 0.0);
EXPECT_FLOAT_EQ(histogram.sum_squares(), 0.0);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
histogram = cell_reader.Read("x2", "y2");
EXPECT_FLOAT_EQ(histogram.num(), 0.0);
EXPECT_FLOAT_EQ(histogram.sum(), 0.0);
EXPECT_FLOAT_EQ(histogram.sum_squares(), 0.0);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
test_sampler_with_labels->GetCell("x1", "y1")->Add(-100);
test_sampler_with_labels->GetCell("x1", "y1")->Add(-100);
test_sampler_with_labels->GetCell("x2", "y2")->Add(100);
test_sampler_with_labels->GetCell("x2", "y2")->Add(100);
test_sampler_with_labels->GetCell("x1", "y1")->Add(-100000000);
test_sampler_with_labels->GetCell("x2", "y2")->Add(100000000);
histogram = cell_reader.Delta("x1", "y1");
EXPECT_FLOAT_EQ(histogram.num(), 3.0);
EXPECT_FLOAT_EQ(histogram.sum(), -100000200);
EXPECT_FLOAT_EQ(histogram.num(0), 3.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
histogram = cell_reader.Delta("x2", "y2");
EXPECT_FLOAT_EQ(histogram.num(), 3.0);
EXPECT_FLOAT_EQ(histogram.sum(), 100000200);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 2.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 1.0);
histogram = cell_reader.Read("x1", "y1");
EXPECT_FLOAT_EQ(histogram.num(), 3.0);
EXPECT_FLOAT_EQ(histogram.sum(), -100000200);
EXPECT_FLOAT_EQ(histogram.num(0), 3.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
histogram = cell_reader.Read("x2", "y2");
EXPECT_FLOAT_EQ(histogram.num(), 3.0);
EXPECT_FLOAT_EQ(histogram.sum(), 100000200);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 2.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 1.0);
histogram = cell_reader.Delta("x1", "y1");
EXPECT_FLOAT_EQ(histogram.num(), 0.0);
EXPECT_FLOAT_EQ(histogram.sum(), 0.0);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
histogram = cell_reader.Delta("x2", "y2");
EXPECT_FLOAT_EQ(histogram.num(), 0.0);
EXPECT_FLOAT_EQ(histogram.sum(), 0.0);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
histogram = cell_reader.Read("x1", "y1");
EXPECT_FLOAT_EQ(histogram.num(), 3.0);
EXPECT_FLOAT_EQ(histogram.sum(), -100000200);
EXPECT_FLOAT_EQ(histogram.num(0), 3.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 0.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 0.0);
histogram = cell_reader.Read("x2", "y2");
EXPECT_FLOAT_EQ(histogram.num(), 3.0);
EXPECT_FLOAT_EQ(histogram.sum(), 100000200);
EXPECT_FLOAT_EQ(histogram.num(0), 0.0);
EXPECT_FLOAT_EQ(histogram.num(1), 0.0);
EXPECT_FLOAT_EQ(histogram.num(2), 0.0);
EXPECT_FLOAT_EQ(histogram.num(3), 2.0);
EXPECT_FLOAT_EQ(histogram.num(4), 0.0);
EXPECT_FLOAT_EQ(histogram.num(5), 1.0);
}
TEST(CellReaderTest, IntGaugeRead) {
CellReader<int64_t> cell_reader("/tensorflow/monitoring/test/int_gauge");
EXPECT_EQ(cell_reader.Read(), 0);
test_int_gauge->GetCell()->Set(100);
EXPECT_EQ(cell_reader.Read(), 100);
test_int_gauge->GetCell()->Set(-100);
EXPECT_EQ(cell_reader.Read(), -100);
test_int_gauge->GetCell()->Set(0);
EXPECT_EQ(cell_reader.Read(), 0);
}
TEST(CellReaderTest, IntGaugeReadWithLabels) {
CellReader<int64_t> cell_reader(
"/tensorflow/monitoring/test/int_gauge_with_labels");
EXPECT_EQ(cell_reader.Read("x1", "y1"), 0);
EXPECT_EQ(cell_reader.Read("x2", "y2"), 0);
test_int_gauge_with_labels->GetCell("x1", "y1")->Set(100000);
EXPECT_EQ(cell_reader.Read("x1", "y1"), 100000);
EXPECT_EQ(cell_reader.Read("x2", "y2"), 0);
test_int_gauge_with_labels->GetCell("x2", "y2")->Set(-100000);
EXPECT_EQ(cell_reader.Read("x1", "y1"), 100000);
EXPECT_EQ(cell_reader.Read("x2", "y2"), -100000);
test_int_gauge_with_labels->GetCell("x1", "y1")->Set(-100000);
test_int_gauge_with_labels->GetCell("x2", "y2")->Set(100000);
EXPECT_EQ(cell_reader.Read("x1", "y1"), -100000);
EXPECT_EQ(cell_reader.Read("x2", "y2"), 100000);
test_int_gauge_with_labels->GetCell("x1", "y1")->Set(0);
EXPECT_EQ(cell_reader.Read("x1", "y1"), 0);
EXPECT_EQ(cell_reader.Read("x2", "y2"), 100000);
}
TEST(CellReaderTest, IntGaugeRepeatedSetAndRead) {
CellReader<int64_t> cell_reader(
"/tensorflow/monitoring/test/int_gauge_with_labels");
test_int_gauge_with_labels->GetCell("x1", "y1")->Set(-1);
test_int_gauge_with_labels->GetCell("x2", "y2")->Set(1);
test_int_gauge_with_labels->GetCell("x1", "y1")->Set(1);
test_int_gauge_with_labels->GetCell("x2", "y2")->Set(-1);
EXPECT_EQ(cell_reader.Read("x1", "y1"), 1);
EXPECT_EQ(cell_reader.Read("x2", "y2"), -1);
EXPECT_EQ(cell_reader.Read("x1", "y1"), 1);
EXPECT_EQ(cell_reader.Read("x2", "y2"), -1);
EXPECT_EQ(cell_reader.Read("x1", "y1"), 1);
EXPECT_EQ(cell_reader.Read("x2", "y2"), -1);
test_int_gauge_with_labels->GetCell("x1", "y1")->Set(0);
test_int_gauge_with_labels->GetCell("x2", "y2")->Set(500);
test_int_gauge_with_labels->GetCell("x1", "y1")->Set(0);
test_int_gauge_with_labels->GetCell("x2", "y2")->Set(-500);
EXPECT_EQ(cell_reader.Read("x1", "y1"), 0);
EXPECT_EQ(cell_reader.Read("x2", "y2"), -500);
EXPECT_EQ(cell_reader.Read("x1", "y1"), 0);
EXPECT_EQ(cell_reader.Read("x2", "y2"), -500);
EXPECT_EQ(cell_reader.Read("x1", "y1"), 0);
EXPECT_EQ(cell_reader.Read("x2", "y2"), -500);
}
TEST(CellReaderTest, StringGaugeRead) {
CellReader<std::string> cell_reader(
"/tensorflow/monitoring/test/string_gauge");
EXPECT_EQ(cell_reader.Read(), "");
test_string_gauge->GetCell()->Set("gauge value");
EXPECT_EQ(cell_reader.Read(), "gauge value");
test_string_gauge->GetCell()->Set("Updated gauge value");
EXPECT_EQ(cell_reader.Read(), "Updated gauge value");
test_string_gauge->GetCell()->Set("");
EXPECT_EQ(cell_reader.Read(), "");
}
TEST(CellReaderTest, StringGaugeReadWithLabels) {
CellReader<std::string> cell_reader(
"/tensorflow/monitoring/test/string_gauge_with_labels");
EXPECT_EQ(cell_reader.Read("x1", "y1"), "");
EXPECT_EQ(cell_reader.Read("x2", "y2"), "");
test_string_gauge_with_labels->GetCell("x1", "y1")->Set("Value 1");
EXPECT_EQ(cell_reader.Read("x1", "y1"), "Value 1");
EXPECT_EQ(cell_reader.Read("x2", "y2"), "");
test_string_gauge_with_labels->GetCell("x2", "y2")->Set("Value 2");
EXPECT_EQ(cell_reader.Read("x1", "y1"), "Value 1");
EXPECT_EQ(cell_reader.Read("x2", "y2"), "Value 2");
test_string_gauge_with_labels->GetCell("x1", "y1")->Set("Value 3");
test_string_gauge_with_labels->GetCell("x2", "y2")->Set("Value 3");
EXPECT_EQ(cell_reader.Read("x1", "y1"), "Value 3");
EXPECT_EQ(cell_reader.Read("x2", "y2"), "Value 3");
test_string_gauge_with_labels->GetCell("x1", "y1")->Set("");
test_string_gauge_with_labels->GetCell("x2", "y2")->Set("");
EXPECT_EQ(cell_reader.Read("x1", "y1"), "");
EXPECT_EQ(cell_reader.Read("x2", "y2"), "");
}
TEST(CellReaderTest, StringGaugeRepeatedSetAndRead) {
CellReader<std::string> cell_reader(
"/tensorflow/monitoring/test/string_gauge_with_labels");
EXPECT_EQ(cell_reader.Read("x1", "y1"), "");
EXPECT_EQ(cell_reader.Read("x2", "y2"), "");
test_string_gauge_with_labels->GetCell("x1", "y1")->Set("Value 1");
test_string_gauge_with_labels->GetCell("x2", "y2")->Set("Value 2");
test_string_gauge_with_labels->GetCell("x1", "y1")->Set("Value 3");
test_string_gauge_with_labels->GetCell("x2", "y2")->Set("Value 3");
EXPECT_EQ(cell_reader.Read("x1", "y1"), "Value 3");
EXPECT_EQ(cell_reader.Read("x2", "y2"), "Value 3");
EXPECT_EQ(cell_reader.Read("x1", "y1"), "Value 3");
EXPECT_EQ(cell_reader.Read("x2", "y2"), "Value 3");
EXPECT_EQ(cell_reader.Read("x1", "y1"), "Value 3");
EXPECT_EQ(cell_reader.Read("x2", "y2"), "Value 3");
test_string_gauge_with_labels->GetCell("x1", "y1")->Set("");
test_string_gauge_with_labels->GetCell("x2", "y2")->Set("");
test_string_gauge_with_labels->GetCell("x1", "y1")->Set("-10");
test_string_gauge_with_labels->GetCell("x2", "y2")->Set("-10");
EXPECT_EQ(cell_reader.Read("x1", "y1"), "-10");
EXPECT_EQ(cell_reader.Read("x2", "y2"), "-10");
EXPECT_EQ(cell_reader.Read("x1", "y1"), "-10");
EXPECT_EQ(cell_reader.Read("x2", "y2"), "-10");
EXPECT_EQ(cell_reader.Read("x1", "y1"), "-10");
EXPECT_EQ(cell_reader.Read("x2", "y2"), "-10");
}
TEST(CellReaderTest, BoolGaugeRead) {
CellReader<bool> cell_reader("/tensorflow/monitoring/test/bool_gauge");
EXPECT_EQ(cell_reader.Read(), false);
test_bool_gauge->GetCell()->Set(true);
EXPECT_EQ(cell_reader.Read(), true);
test_bool_gauge->GetCell()->Set(false);
EXPECT_EQ(cell_reader.Read(), false);
}
TEST(CellReaderTest, BoolGaugeReadWithLabels) {
CellReader<bool> cell_reader(
"/tensorflow/monitoring/test/bool_gauge_with_labels");
EXPECT_EQ(cell_reader.Read("x1", "y1"), false);
EXPECT_EQ(cell_reader.Read("x2", "y2"), false);
test_bool_gauge_with_labels->GetCell("x1", "y1")->Set(true);
EXPECT_EQ(cell_reader.Read("x1", "y1"), true);
EXPECT_EQ(cell_reader.Read("x2", "y2"), false);
test_bool_gauge_with_labels->GetCell("x2", "y2")->Set(true);
EXPECT_EQ(cell_reader.Read("x1", "y1"), true);
EXPECT_EQ(cell_reader.Read("x2", "y2"), true);
test_bool_gauge_with_labels->GetCell("x1", "y1")->Set(false);
test_bool_gauge_with_labels->GetCell("x2", "y2")->Set(true);
EXPECT_EQ(cell_reader.Read("x1", "y1"), false);
EXPECT_EQ(cell_reader.Read("x2", "y2"), true);
test_bool_gauge_with_labels->GetCell("x1", "y1")->Set(false);
test_bool_gauge_with_labels->GetCell("x2", "y2")->Set(false);
EXPECT_EQ(cell_reader.Read("x1", "y1"), false);
EXPECT_EQ(cell_reader.Read("x2", "y2"), false);
}
TEST(CellReaderTest, BoolGaugeRepeatedSetAndRead) {
CellReader<bool> cell_reader(
"/tensorflow/monitoring/test/bool_gauge_with_labels");
EXPECT_EQ(cell_reader.Read("x1", "y1"), false);
EXPECT_EQ(cell_reader.Read("x2", "y2"), false);
test_bool_gauge_with_labels->GetCell("x1", "y1")->Set(true);
test_bool_gauge_with_labels->GetCell("x2", "y2")->Set(false);
test_bool_gauge_with_labels->GetCell("x1", "y1")->Set(true);
test_bool_gauge_with_labels->GetCell("x2", "y2")->Set(true);
EXPECT_EQ(cell_reader.Read("x1", "y1"), true);
EXPECT_EQ(cell_reader.Read("x2", "y2"), true);
EXPECT_EQ(cell_reader.Read("x1", "y1"), true);
EXPECT_EQ(cell_reader.Read("x2", "y2"), true);
EXPECT_EQ(cell_reader.Read("x1", "y1"), true);
EXPECT_EQ(cell_reader.Read("x2", "y2"), true);
test_bool_gauge_with_labels->GetCell("x1", "y1")->Set(false);
test_bool_gauge_with_labels->GetCell("x2", "y2")->Set(true);
test_bool_gauge_with_labels->GetCell("x1", "y1")->Set(false);
test_bool_gauge_with_labels->GetCell("x2", "y2")->Set(true);
EXPECT_EQ(cell_reader.Read("x1", "y1"), false);
EXPECT_EQ(cell_reader.Read("x2", "y2"), true);
EXPECT_EQ(cell_reader.Read("x1", "y1"), false);
EXPECT_EQ(cell_reader.Read("x2", "y2"), true);
EXPECT_EQ(cell_reader.Read("x1", "y1"), false);
EXPECT_EQ(cell_reader.Read("x2", "y2"), true);
}
TEST(CellReaderTest, PercentilesDeltaNoLabels) {
CellReader<Percentiles> cell_reader(
"/tensorflow/monitoring/test/percentiles");
Percentiles percentiles = cell_reader.Delta();
EXPECT_EQ(percentiles.num(), 0);
EXPECT_FLOAT_EQ(percentiles.sum(), 0.0);
test_percentiles->GetCell()->Add(1.0);
percentiles = cell_reader.Delta();
EXPECT_EQ(percentiles.num(), 1);
EXPECT_FLOAT_EQ(percentiles.sum(), 1.0);
test_percentiles->GetCell()->Add(-10.0);
percentiles = cell_reader.Delta();
EXPECT_EQ(percentiles.num(), 1);
EXPECT_FLOAT_EQ(percentiles.sum(), -10.0);
test_percentiles->GetCell()->Add(1000.0);
percentiles = cell_reader.Delta();
EXPECT_EQ(percentiles.num(), 1);
EXPECT_FLOAT_EQ(percentiles.sum(), 1000.0);
}
TEST(CellReaderTest, PercentilesReadNoLabels) {
CellReader<Percentiles> cell_reader(
"/tensorflow/monitoring/test/percentiles");
Percentiles percentiles = cell_reader.Read();
EXPECT_EQ(percentiles.num(), 0);
EXPECT_FLOAT_EQ(percentiles.sum(), 0.0);
test_percentiles->GetCell()->Add(1.0);
percentiles = cell_reader.Read();
EXPECT_EQ(percentiles.num(), 1);
EXPECT_FLOAT_EQ(percentiles.sum(), 1.0);
test_percentiles->GetCell()->Add(-10.0);
percentiles = cell_reader.Read();
EXPECT_EQ(percentiles.num(), 2);
EXPECT_FLOAT_EQ(percentiles.sum(), -9.0);
test_percentiles->GetCell()->Add(1000.0);
percentiles = cell_reader.Read();
EXPECT_EQ(percentiles.num(), 3);
EXPECT_FLOAT_EQ(percentiles.sum(), 991.0);
}
TEST(CellReaderTest, PercentilesWithLabels) {
CellReader<Percentiles> cell_reader(
"/tensorflow/monitoring/test/percentiles_with_labels");
Percentiles percentiles = cell_reader.Delta("x1", "y1");
EXPECT_EQ(percentiles.num(), 0);
EXPECT_FLOAT_EQ(percentiles.sum(), 0.0);
percentiles = cell_reader.Delta("x2", "y2");
EXPECT_EQ(percentiles.num(), 0);
EXPECT_FLOAT_EQ(percentiles.sum(), 0.0);
percentiles = cell_reader.Read("x1", "y1");
EXPECT_EQ(percentiles.num(), 0);
EXPECT_FLOAT_EQ(percentiles.sum(), 0.0);
percentiles = cell_reader.Read("x2", "y2");
EXPECT_EQ(percentiles.num(), 0);
EXPECT_FLOAT_EQ(percentiles.sum(), 0.0);
test_percentiles_with_labels->GetCell("x1", "y1")->Add(-1.0);
percentiles = cell_reader.Delta("x1", "y1");
EXPECT_EQ(percentiles.num(), 1);
EXPECT_FLOAT_EQ(percentiles.sum(), -1.0);
percentiles = cell_reader.Delta("x2", "y2");
EXPECT_EQ(percentiles.num(), 0);
EXPECT_FLOAT_EQ(percentiles.sum(), 0.0);
percentiles = cell_reader.Read("x1", "y1");
EXPECT_EQ(percentiles.num(), 1);
EXPECT_FLOAT_EQ(percentiles.sum(), -1.0);
percentiles = cell_reader.Read("x2", "y2");
EXPECT_EQ(percentiles.num(), 0);
EXPECT_FLOAT_EQ(percentiles.sum(), 0.0);
test_percentiles_with_labels->GetCell("x2", "y2")->Add(1.0);
percentiles = cell_reader.Delta("x1", "y1");
EXPECT_EQ(percentiles.num(), 0);
EXPECT_FLOAT_EQ(percentiles.sum(), 0.0);
percentiles = cell_reader.Delta("x2", "y2");
EXPECT_EQ(percentiles.num(), 1);
EXPECT_FLOAT_EQ(percentiles.sum(), 1.0);
percentiles = cell_reader.Read("x1", "y1");
EXPECT_EQ(percentiles.num(), 1);
EXPECT_FLOAT_EQ(percentiles.sum(), -1.0);
percentiles = cell_reader.Read("x2", "y2");
EXPECT_EQ(percentiles.num(), 1);
EXPECT_FLOAT_EQ(percentiles.sum(), 1.0);
test_percentiles_with_labels->GetCell("x1", "y1")->Add(100.0);
test_percentiles_with_labels->GetCell("x2", "y2")->Add(-100.0);
percentiles = cell_reader.Delta("x1", "y1");
EXPECT_EQ(percentiles.num(), 1);
EXPECT_FLOAT_EQ(percentiles.sum(), 100.0);
percentiles = cell_reader.Delta("x2", "y2");
EXPECT_EQ(percentiles.num(), 1);
EXPECT_FLOAT_EQ(percentiles.sum(), -100.0);
percentiles = cell_reader.Read("x1", "y1");
EXPECT_EQ(percentiles.num(), 2);
EXPECT_FLOAT_EQ(percentiles.sum(), 99.0);
percentiles = cell_reader.Read("x2", "y2");
EXPECT_EQ(percentiles.num(), 2);
EXPECT_FLOAT_EQ(percentiles.sum(), -99.0);
}
TEST(CellReaderTest, PercentilesRepeatedSetAndRead) {
CellReader<Percentiles> cell_reader(
"/tensorflow/monitoring/test/percentiles_with_labels");
Percentiles percentiles = cell_reader.Delta("x1", "y1");
EXPECT_EQ(percentiles.num(), 0);
EXPECT_FLOAT_EQ(percentiles.sum(), 0.0);
percentiles = cell_reader.Delta("x1", "y1");
EXPECT_EQ(percentiles.num(), 0);
EXPECT_FLOAT_EQ(percentiles.sum(), 0.0);
test_percentiles_with_labels->GetCell("x1", "y1")->Add(1.0);
test_percentiles_with_labels->GetCell("x2", "y2")->Add(-1.0);
test_percentiles_with_labels->GetCell("x1", "y1")->Add(10.0);
test_percentiles_with_labels->GetCell("x2", "y2")->Add(-10.0);
test_percentiles_with_labels->GetCell("x1", "y1")->Add(100.0);
test_percentiles_with_labels->GetCell("x2", "y2")->Add(-100.0);
percentiles = cell_reader.Delta("x1", "y1");
EXPECT_EQ(percentiles.num(), 3);
EXPECT_FLOAT_EQ(percentiles.sum(), 111.0);
percentiles = cell_reader.Read("x1", "y1");
EXPECT_EQ(percentiles.num(), 3);
EXPECT_FLOAT_EQ(percentiles.sum(), 111.0);
percentiles = cell_reader.Delta("x1", "y1");
EXPECT_EQ(percentiles.num(), 0);
EXPECT_FLOAT_EQ(percentiles.sum(), 0);
percentiles = cell_reader.Read("x1", "y1");
EXPECT_EQ(percentiles.num(), 3);
EXPECT_FLOAT_EQ(percentiles.sum(), 111.0);
percentiles = cell_reader.Delta("x2", "y2");
EXPECT_EQ(percentiles.num(), 3);
EXPECT_FLOAT_EQ(percentiles.sum(), -111.0);
percentiles = cell_reader.Read("x2", "y2");
EXPECT_EQ(percentiles.num(), 3);
EXPECT_FLOAT_EQ(percentiles.sum(), -111.0);
percentiles = cell_reader.Delta("x2", "y2");
EXPECT_EQ(percentiles.num(), 0);
EXPECT_FLOAT_EQ(percentiles.sum(), 0);
percentiles = cell_reader.Read("x2", "y2");
EXPECT_EQ(percentiles.num(), 3);
EXPECT_FLOAT_EQ(percentiles.sum(), -111.0);
}
#if GTEST_HAS_DEATH_TEST
TEST(CellReaderTest, WrongNumberOfLabels) {
CellReader<int64_t> cell_reader("/tensorflow/monitoring/test/counter");
EXPECT_EQ(cell_reader.Read(), 0);
EXPECT_DEATH(cell_reader.Read("label1"), "has 0 labels");
EXPECT_DEATH(cell_reader.Read("label1", "label2"), "has 0 labels");
EXPECT_DEATH(cell_reader.Read("label1", "label2", "label3"), "has 0 labels");
CellReader<int64_t> cell_reader_with_labels(
"/tensorflow/monitoring/test/counter_with_labels");
EXPECT_DEATH(cell_reader_with_labels.Read(), "has 2 labels");
EXPECT_DEATH(cell_reader_with_labels.Read("label1"), "has 2 labels");
EXPECT_EQ(cell_reader_with_labels.Read("label1", "label2"), 0);
EXPECT_DEATH(cell_reader_with_labels.Read("label1", "label2", "label3"),
"has 2 labels");
}
TEST(CellReaderTest, MetricIsNotFound) {
CellReader<int64_t> cell_reader("/metric/does/not/exist");
CellReader<int64_t> empty_cell_reader("");
EXPECT_DEATH(cell_reader.Read(), "Metric descriptor is not found");
EXPECT_DEATH(empty_cell_reader.Read(), "Metric descriptor is not found");
}
TEST(CellReaderTest, StringGaugeDelta) {
CellReader<std::string> cell_reader(
"/tensorflow/monitoring/test/string_gauge");
CellReader<std::string> cell_reader_with_labels(
"/tensorflow/monitoring/test/string_gauge_with_labels");
EXPECT_DEATH(cell_reader.Delta(), "Please use `Read` instead.");
EXPECT_DEATH(cell_reader_with_labels.Delta("x", "y"),
"Please use `Read` instead.");
}
TEST(CellReaderTest, BoolGaugeDelta) {
CellReader<bool> cell_reader("/tensorflow/monitoring/test/bool_gauge");
CellReader<bool> cell_reader_with_labels(
"/tensorflow/monitoring/test/bool_gauge_with_labels");
EXPECT_DEATH(cell_reader.Delta(), "Please use `Read` instead.");
EXPECT_DEATH(cell_reader_with_labels.Delta("x", "y"),
"Please use `Read` instead.");
}
TEST(CellReaderTest, InvalidType) {
CellReader<std::vector<int>> cell_reader(
"/tensorflow/monitoring/test/counter");
CellReader<std::vector<int>> cell_reader_with_labels(
"/tensorflow/monitoring/test/counter_with_labels");
EXPECT_DEATH(cell_reader.Read(),
"Tensorflow CellReader does not support type");
EXPECT_DEATH(cell_reader_with_labels.Delta("x", "y"),
"Tensorflow CellReader does not support type");
test_counter->GetCell()->IncrementBy(1);
test_counter_with_labels->GetCell("x", "y")->IncrementBy(1);
EXPECT_DEATH(cell_reader.Read(),
"Tensorflow CellReader does not support type");
EXPECT_DEATH(cell_reader_with_labels.Delta("x", "y"),
"Tensorflow CellReader does not support type");
}
#endif
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/monitoring/cell_reader.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/monitoring/cell_reader_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2d2db3f4-da87-49d6-a8e3-2b09a3889fe9 | cpp | tensorflow/tensorflow | counter | tensorflow/core/lib/monitoring/counter.h | tensorflow/core/lib/monitoring/counter_test.cc | #ifndef TENSORFLOW_CORE_LIB_MONITORING_COUNTER_H_
#define TENSORFLOW_CORE_LIB_MONITORING_COUNTER_H_
#include "xla/tsl/lib/monitoring/counter.h"
#ifdef IS_MOBILE_PLATFORM
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
#else
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/monitoring/collection_registry.h"
#include "tensorflow/core/lib/monitoring/metric_def.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/thread_annotations.h"
#endif
namespace tensorflow {
namespace monitoring {
using tsl::monitoring::Counter;
using tsl::monitoring::CounterCell;
}
}
#endif | #include "tensorflow/core/lib/monitoring/counter.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace monitoring {
namespace {
auto* counter_with_labels =
Counter<1>::New("/tensorflow/test/counter_with_labels",
"Counter with one label.", "MyLabel");
TEST(LabeledCounterTest, InitializedWithZero) {
EXPECT_EQ(0, counter_with_labels->GetCell("Empty")->value());
}
TEST(LabeledCounterTest, GetCell) {
auto* cell = counter_with_labels->GetCell("GetCellOp");
EXPECT_EQ(0, cell->value());
cell->IncrementBy(42);
EXPECT_EQ(42, cell->value());
auto* same_cell = counter_with_labels->GetCell("GetCellOp");
EXPECT_EQ(42, same_cell->value());
same_cell->IncrementBy(58);
EXPECT_EQ(100, cell->value());
EXPECT_EQ(100, same_cell->value());
}
TEST(LabeledCounterDeathTest, DiesOnDecrement) {
EXPECT_DEBUG_DEATH(
{ counter_with_labels->GetCell("DyingOp")->IncrementBy(-1); },
"decrement");
}
auto* init_counter_without_labels = Counter<0>::New(
"/tensorflow/test/init_counter_without_labels",
"Counter without any labels to check if it is initialized as 0.");
TEST(UnlabeledCounterTest, InitializedWithZero) {
EXPECT_EQ(0, init_counter_without_labels->GetCell()->value());
}
auto* counter_without_labels = Counter<0>::New(
"/tensorflow/test/counter_without_labels", "Counter without any labels.");
TEST(UnlabeledCounterTest, GetCell) {
auto* cell = counter_without_labels->GetCell();
EXPECT_EQ(0, cell->value());
cell->IncrementBy(42);
EXPECT_EQ(42, cell->value());
auto* same_cell = counter_without_labels->GetCell();
EXPECT_EQ(42, same_cell->value());
same_cell->IncrementBy(58);
EXPECT_EQ(100, cell->value());
EXPECT_EQ(100, same_cell->value());
}
auto* dead_counter_without_labels = Counter<0>::New(
"/tensorflow/test/dead_counter_without_labels",
"Counter without any labels which goes on to die on decrement.");
TEST(UnlabeledCounterDeathTest, DiesOnDecrement) {
EXPECT_DEBUG_DEATH(
{ dead_counter_without_labels->GetCell()->IncrementBy(-1); },
"decrement");
}
TEST(LabeledCounterTest, SameName) {
auto* same_counter = Counter<1>::New("/tensorflow/test/counter_with_labels",
"Counter with one label.", "MyLabel");
EXPECT_TRUE(counter_with_labels->GetStatus().ok());
EXPECT_TRUE(same_counter->GetStatus().ok());
delete same_counter;
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/monitoring/counter.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/monitoring/counter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3b4a14ae-fa50-425d-8ad4-aec55977753d | cpp | tensorflow/tensorflow | metric_def | tensorflow/core/lib/monitoring/metric_def.h | tensorflow/core/lib/monitoring/metric_def_test.cc | #ifndef TENSORFLOW_CORE_LIB_MONITORING_METRIC_DEF_H_
#define TENSORFLOW_CORE_LIB_MONITORING_METRIC_DEF_H_
#include <array>
#include <functional>
#include <string>
#include <vector>
#include "xla/tsl/lib/monitoring/metric_def.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/lib/monitoring/types.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace monitoring {
using tsl::monitoring::MetricDef;
using tsl::monitoring::MetricKind;
using tsl::monitoring::ValueType;
}
}
#endif | #include "tensorflow/core/lib/monitoring/metric_def.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace monitoring {
namespace {
TEST(MetricDefTest, Simple) {
const MetricDef<MetricKind::kCumulative, int64_t, 0> metric_def0(
"/tensorflow/metric0", "An example metric with no labels.");
const MetricDef<MetricKind::kGauge, HistogramProto, 1> metric_def1(
"/tensorflow/metric1", "An example metric with one label.", "LabelName");
EXPECT_EQ("/tensorflow/metric0", metric_def0.name());
EXPECT_EQ("/tensorflow/metric1", metric_def1.name());
EXPECT_EQ(MetricKind::kCumulative, metric_def0.kind());
EXPECT_EQ(MetricKind::kGauge, metric_def1.kind());
EXPECT_EQ("An example metric with no labels.", metric_def0.description());
EXPECT_EQ("An example metric with one label.", metric_def1.description());
EXPECT_EQ(0, metric_def0.label_descriptions().size());
ASSERT_EQ(1, metric_def1.label_descriptions().size());
EXPECT_EQ("LabelName", metric_def1.label_descriptions()[0]);
}
TEST(MetricDefTest, StringsPersist) {
string name = "/tensorflow/metric0";
string description = "test description";
string label_description = "test label description";
const MetricDef<MetricKind::kCumulative, int64_t, 1> metric_def(
name, description, label_description);
name[4] = 'A';
description[4] = 'B';
label_description[4] = 'C';
EXPECT_NE(name, metric_def.name());
EXPECT_NE(description, metric_def.description());
EXPECT_NE(label_description, metric_def.label_descriptions()[0]);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/monitoring/metric_def.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/monitoring/metric_def_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
10fac47b-79d0-4e11-8153-c2ff1b272ea0 | cpp | tensorflow/tensorflow | lock_free_queue | third_party/xla/xla/tsl/profiler/utils/lock_free_queue.h | third_party/xla/xla/tsl/profiler/utils/lock_free_queue_test.cc | #ifndef XLA_TSL_PROFILER_UTILS_LOCK_FREE_QUEUE_H_
#define XLA_TSL_PROFILER_UTILS_LOCK_FREE_QUEUE_H_
#include <stddef.h>
#include <algorithm>
#include <atomic>
#include <cstddef>
#include <optional>
#include <utility>
#include "xla/tsl/profiler/utils/no_init.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/macros.h"
namespace tsl {
namespace profiler {
namespace QueueBaseInternal {
template <typename T, size_t kBlockSize>
struct InternalBlock {
static constexpr size_t kNumSlots =
(kBlockSize -
(sizeof(size_t ) + sizeof(InternalBlock* ))) /
sizeof(NoInit<T>);
size_t start;
InternalBlock* next;
NoInit<T> slots[kNumSlots];
};
template <bool kIsAtomic>
struct Index;
template <>
struct Index<false> {
size_t value;
explicit Index(size_t pos = 0) : value(pos) {}
size_t Get() const { return value; }
void Set(size_t pos) { value = pos; }
};
template <>
struct Index<true> {
std::atomic<size_t> value;
explicit Index(size_t pos = 0) : value(pos) {}
size_t Get() const { return value.load(std::memory_order_acquire); }
void Set(size_t pos) { value.store(pos, std::memory_order_release); }
};
template <typename T, size_t kBlockSize, bool kAtomicEnd>
class BlockedQueueBase {
using Block = InternalBlock<T, kBlockSize>;
public:
static constexpr size_t kNumSlotsPerBlockForTesting = Block::kNumSlots;
BlockedQueueBase()
: start_block_(new Block{0, nullptr}),
start_(start_block_->start),
end_block_(start_block_),
end_(end_block_->start) {}
~BlockedQueueBase() {
Clear();
DCHECK(Empty());
delete end_block_;
}
void Push(T&& element) {
size_t end = End();
auto& slot = end_block_->slots[end++ - end_block_->start];
slot.Emplace(std::move(element));
if (TF_PREDICT_FALSE(end - end_block_->start == Block::kNumSlots)) {
auto* new_block = new Block{end, nullptr};
end_block_ = (end_block_->next = new_block);
}
SetEnd(end);
}
void Clear() {
size_t end = End();
while (start_ != end) {
PopImpl();
}
}
std::optional<T> Pop() {
std::optional<T> element;
size_t end = End();
if (start_ != end) {
element = PopImpl();
}
return element;
}
protected:
void SetEnd(size_t end) { end_.Set(end); }
size_t End() const { return end_.Get(); }
bool Empty() const { return (start_ == End()); }
T PopImpl() {
DCHECK(!Empty());
auto& slot = start_block_->slots[start_++ - start_block_->start];
T element = std::move(slot).Consume();
if (TF_PREDICT_FALSE(start_ - start_block_->start == Block::kNumSlots)) {
auto* old_block = std::exchange(start_block_, start_block_->next);
delete old_block;
DCHECK_EQ(start_, start_block_->start);
}
return element;
}
Block* start_block_;
size_t start_;
Block* end_block_;
Index<kAtomicEnd> end_;
};
}
template <typename T, size_t kBlockSize>
class LockFreeQueue;
template <typename T, size_t kBlockSize = 1 << 16 >
class BlockedQueue final
: public QueueBaseInternal::BlockedQueueBase<T, kBlockSize, false> {
using Block = QueueBaseInternal::InternalBlock<T, kBlockSize>;
friend class LockFreeQueue<T, kBlockSize>;
public:
BlockedQueue() = default;
BlockedQueue(BlockedQueue&& src) { *this = std::move(src); }
BlockedQueue& operator=(BlockedQueue&& src) {
this->Clear();
std::swap(this->start_block_, src.start_block_);
std::swap(this->start_, src.start_);
std::swap(this->end_block_, src.end_block_);
auto origin_end = this->End();
this->SetEnd(src.End());
src.SetEnd(origin_end);
return *this;
}
class Iterator {
public:
bool operator==(const Iterator& another) const {
return (index_ == another.index_) && (queue_ == another.queue_);
}
bool operator!=(const Iterator& another) const {
return !(*this == another);
}
T& operator*() const {
DCHECK(block_ != nullptr);
DCHECK_GE(index_, block_->start);
DCHECK_LT(index_, block_->start + Block::kNumSlots);
DCHECK_LT(index_, queue_->End());
return block_->slots[index_ - block_->start].value;
}
T* operator->() const { return &(this->operator*()); }
Iterator& operator++() {
DCHECK(queue_ != nullptr);
DCHECK(block_ != nullptr);
if (index_ < queue_->End()) {
++index_;
auto next_block_start = block_->start + Block::kNumSlots;
DCHECK_LE(index_, next_block_start);
if (index_ == next_block_start) {
block_ = block_->next;
DCHECK_NE(block_, nullptr);
}
}
return (*this);
}
Iterator operator++(int) {
auto temp(*this);
this->operator++();
return temp;
}
private:
friend class BlockedQueue;
Iterator(BlockedQueue* queue, BlockedQueue::Block* block, size_t index)
: queue_(queue), block_(block), index_(index) {};
BlockedQueue* queue_ = nullptr;
BlockedQueue::Block* block_ = nullptr;
size_t index_ = 0;
};
Iterator begin() { return Iterator(this, this->start_block_, this->start_); }
Iterator end() { return Iterator(this, this->end_block_, this->End()); }
};
template <typename T, size_t kBlockSize = 1 << 16 >
class LockFreeQueue final
: public QueueBaseInternal::BlockedQueueBase<T, kBlockSize, true> {
using Block = QueueBaseInternal::InternalBlock<T, kBlockSize>;
public:
BlockedQueue<T, kBlockSize> PopAll() {
BlockedQueue<T, kBlockSize> result;
auto* empty_block = result.start_block_;
result.start_block_ = result.end_block_ = nullptr;
result.start_ = this->start_;
size_t end = this->End();
result.SetEnd(end);
while (this->start_block_->start + Block::kNumSlots <= end) {
auto* old_block =
std::exchange(this->start_block_, this->start_block_->next);
this->start_ = this->start_block_->start;
old_block->next = nullptr;
if (result.end_block_) {
result.end_block_->next = old_block;
} else {
result.start_block_ = old_block;
}
result.end_block_ = old_block;
}
empty_block->start = this->start_block_->start;
if (result.end_block_ == nullptr) {
result.end_block_ = result.start_block_ = empty_block;
} else {
result.end_block_->next = empty_block;
result.end_block_ = empty_block;
}
size_t bs = this->start_block_->start;
for (size_t i = std::max(this->start_, bs); i < end; i++) {
auto& src_slot = this->start_block_->slots[i - bs];
auto& dst_slot = result.end_block_->slots[i - bs];
dst_slot.Emplace(std::move(src_slot).Consume());
}
this->start_ = end;
return result;
}
};
}
}
#endif | #include "xla/tsl/profiler/utils/lock_free_queue.h"
#include <cstddef>
#include <cstdint>
#include <string>
#include <vector>
#include "absl/synchronization/notification.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace profiler {
namespace {
template <typename T, size_t block_size_in_bytes>
void RetriveEvents(LockFreeQueue<T, block_size_in_bytes>& queue,
absl::Notification& stopped, std::vector<T>& result) {
result.clear();
do {
while (auto event = queue.Pop()) {
result.emplace_back(*event);
}
} while (!stopped.HasBeenNotified());
while (auto event = queue.Pop()) {
result.emplace_back(*event);
}
}
template <typename T, size_t block_size_in_bytes, typename Generator>
void FillEvents2Stage(LockFreeQueue<T, block_size_in_bytes>& queue,
Generator gen, size_t event_count1, size_t event_count2,
absl::Notification& stage1_filled,
absl::Notification& stage1_grabbed,
absl::Notification& stage2_filled,
std::vector<T>& expected1, std::vector<T>& expected2) {
expected1.clear();
expected2.clear();
for (size_t i = 0; i < event_count1; ++i) {
T event = gen(i);
expected1.emplace_back(event);
queue.Push(std::move(event));
}
stage1_filled.Notify();
stage1_grabbed.WaitForNotification();
for (size_t i = 0; i < event_count2; ++i) {
T event = gen(i + event_count1);
expected2.emplace_back(event);
queue.Push(std::move(event));
}
stage2_filled.Notify();
}
template <typename T, size_t block_size_in_bytes, typename Generator>
void TestProducerConsumer(size_t event_count1, size_t event_count2,
Generator gen) {
LockFreeQueue<T, block_size_in_bytes> queue;
std::vector<T> expected1;
std::vector<T> expected2;
absl::Notification stage1_filled;
absl::Notification stage1_grabbed;
absl::Notification stage2_filled;
auto producer = absl::WrapUnique(Env::Default()->StartThread(
ThreadOptions(), "producer", [&, gen, event_count1, event_count2]() {
FillEvents2Stage(queue, gen, event_count1, event_count2, stage1_filled,
stage1_grabbed, stage2_filled, expected1, expected2);
}));
std::vector<T> result1;
auto consumer1 = absl::WrapUnique(Env::Default()->StartThread(
ThreadOptions(), "consumer1", [&queue, &result1, &stage1_filled]() {
RetriveEvents(queue, stage1_filled, result1);
}));
consumer1.reset();
EXPECT_THAT(result1, ::testing::ContainerEq(expected1));
stage1_grabbed.Notify();
std::vector<T> result2;
auto consumer2 = absl::WrapUnique(Env::Default()->StartThread(
ThreadOptions(), "consumer2", [&queue, &result2, &stage2_filled]() {
RetriveEvents(queue, stage2_filled, result2);
}));
consumer2.reset();
EXPECT_THAT(result2, ::testing::ContainerEq(expected2));
producer.reset();
}
template <typename T, size_t block_size_in_bytes, typename Generator>
void TestPopAll(size_t event_count1, size_t event_count2, Generator gen) {
using TLockFreeQueue = LockFreeQueue<T, block_size_in_bytes>;
using TBlockedQueue = BlockedQueue<T, block_size_in_bytes>;
TLockFreeQueue queue;
std::vector<T> expected1;
std::vector<T> expected2;
absl::Notification stage1_filled;
absl::Notification stage1_grabbed;
absl::Notification stage2_filled;
auto producer = absl::WrapUnique(Env::Default()->StartThread(
ThreadOptions(), "producer", [&, gen, event_count1, event_count2]() {
FillEvents2Stage(queue, gen, event_count1, event_count2, stage1_filled,
stage1_grabbed, stage2_filled, expected1, expected2);
}));
stage1_filled.WaitForNotification();
TBlockedQueue dumped_queue1 = queue.PopAll();
std::vector<T> result1;
while (auto event = dumped_queue1.Pop()) {
result1.emplace_back(*event);
}
EXPECT_THAT(result1, ::testing::ContainerEq(expected1));
stage1_grabbed.Notify();
producer.reset();
TBlockedQueue dumped_queue2 = queue.PopAll();
std::vector<T> result2;
for (auto it = dumped_queue2.begin(), ite = dumped_queue2.end(); it != ite;
++it) {
result2.emplace_back(*it);
}
EXPECT_THAT(result2, ::testing::ContainerEq(expected2));
}
template <typename T, size_t block_size_in_bytes, typename Generator>
void TestIterator(size_t event_count1, size_t event_count2, Generator gen) {
BlockedQueue<T, block_size_in_bytes> queue;
std::vector<T> expected1;
for (size_t i = 0; i < event_count1; i++) {
queue.Push(gen(i));
expected1.emplace_back(gen(i));
}
std::vector<T> result1;
while (auto event = queue.Pop()) {
result1.emplace_back(*event);
}
EXPECT_THAT(result1, ::testing::ContainerEq(expected1));
std::vector<T> expected2;
for (size_t i = 0; i < event_count2; i++) {
queue.Push(gen(i + event_count1));
expected2.emplace_back(gen(i + event_count1));
}
std::vector<T> result2;
for (auto it = queue.begin(), ite = queue.end(); it != ite; ++it) {
result2.emplace_back(*it);
}
EXPECT_THAT(result2, ::testing::ContainerEq(expected2));
}
TEST(LockFreeQueueTest, Int64Event_ProducerConsumer) {
auto gen = [](size_t i) -> int64_t { return static_cast<int64_t>(i); };
using T = decltype(gen(0));
constexpr size_t kBS = 512;
using G = decltype(gen);
constexpr size_t kNumSlots =
LockFreeQueue<T, kBS>::kNumSlotsPerBlockForTesting;
EXPECT_GE(kNumSlots, 10);
TestProducerConsumer<T, kBS, G>(kNumSlots - 3, 2, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots - 3, 3, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots - 3, 5, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots - 3, kNumSlots + 3, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots - 3, kNumSlots * 2 + 5, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots, 2, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots, kNumSlots - 1, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots, kNumSlots, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots, kNumSlots + 1, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots * 2, kNumSlots + 1, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots * 3, kNumSlots - 1, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots + 3, 2, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots + 3, kNumSlots - 4, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots + 3, kNumSlots - 3, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots + 3, kNumSlots - 2, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots - 5, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots + 3, gen);
}
TEST(LockFreeQueueTest, StringEvent_ProducerConsumer) {
auto gen = [](size_t i) { return std::to_string(i); };
using T = decltype(gen(0));
constexpr size_t kBS = 512;
using G = decltype(gen);
constexpr size_t kNumSlots =
LockFreeQueue<T, kBS>::kNumSlotsPerBlockForTesting;
EXPECT_GE(kNumSlots, 10);
TestProducerConsumer<T, kBS, G>(kNumSlots - 3, 2, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots - 3, 3, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots - 3, 5, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots - 3, kNumSlots + 3, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots - 3, kNumSlots * 2 + 5, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots, 2, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots, kNumSlots - 1, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots, kNumSlots, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots, kNumSlots + 1, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots * 2, kNumSlots + 1, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots * 3, kNumSlots - 1, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots + 3, 2, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots + 3, kNumSlots - 4, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots + 3, kNumSlots - 3, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots + 3, kNumSlots - 2, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots - 5, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots, gen);
TestProducerConsumer<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots + 3, gen);
}
TEST(LockFreeQueueTest, Int64Event_PopAll) {
auto gen = [](size_t i) -> int64_t { return static_cast<int64_t>(i); };
using T = decltype(gen(0));
constexpr size_t kBS = 512;
using G = decltype(gen);
constexpr size_t kNumSlots =
LockFreeQueue<T, kBS>::kNumSlotsPerBlockForTesting;
EXPECT_GE(kNumSlots, 10);
TestPopAll<T, kBS, G>(kNumSlots - 3, 2, gen);
TestPopAll<T, kBS, G>(kNumSlots - 3, 3, gen);
TestPopAll<T, kBS, G>(kNumSlots - 3, 5, gen);
TestPopAll<T, kBS, G>(kNumSlots - 3, kNumSlots + 3, gen);
TestPopAll<T, kBS, G>(kNumSlots - 3, kNumSlots * 2 + 5, gen);
TestPopAll<T, kBS, G>(kNumSlots, 2, gen);
TestPopAll<T, kBS, G>(kNumSlots, kNumSlots - 1, gen);
TestPopAll<T, kBS, G>(kNumSlots, kNumSlots, gen);
TestPopAll<T, kBS, G>(kNumSlots, kNumSlots + 1, gen);
TestPopAll<T, kBS, G>(kNumSlots * 2, kNumSlots + 1, gen);
TestPopAll<T, kBS, G>(kNumSlots * 3, kNumSlots - 1, gen);
TestPopAll<T, kBS, G>(kNumSlots + 3, 2, gen);
TestPopAll<T, kBS, G>(kNumSlots + 3, kNumSlots - 4, gen);
TestPopAll<T, kBS, G>(kNumSlots + 3, kNumSlots - 3, gen);
TestPopAll<T, kBS, G>(kNumSlots + 3, kNumSlots - 2, gen);
TestPopAll<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots - 5, gen);
TestPopAll<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots, gen);
TestPopAll<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots + 3, gen);
}
TEST(LockFreeQueueTest, StringEvent_PopAll) {
auto gen = [](size_t i) -> std::string { return std::to_string(i); };
using T = decltype(gen(0));
constexpr size_t kBS = 512;
using G = decltype(gen);
constexpr size_t kNumSlots =
LockFreeQueue<T, kBS>::kNumSlotsPerBlockForTesting;
EXPECT_GE(kNumSlots, 10);
TestPopAll<T, kBS, G>(kNumSlots - 3, 2, gen);
TestPopAll<T, kBS, G>(kNumSlots - 3, 3, gen);
TestPopAll<T, kBS, G>(kNumSlots - 3, 5, gen);
TestPopAll<T, kBS, G>(kNumSlots - 3, kNumSlots + 3, gen);
TestPopAll<T, kBS, G>(kNumSlots - 3, kNumSlots * 2 + 5, gen);
TestPopAll<T, kBS, G>(kNumSlots, 2, gen);
TestPopAll<T, kBS, G>(kNumSlots, kNumSlots - 1, gen);
TestPopAll<T, kBS, G>(kNumSlots, kNumSlots, gen);
TestPopAll<T, kBS, G>(kNumSlots, kNumSlots + 1, gen);
TestPopAll<T, kBS, G>(kNumSlots * 2, kNumSlots + 1, gen);
TestPopAll<T, kBS, G>(kNumSlots * 3, kNumSlots - 1, gen);
TestPopAll<T, kBS, G>(kNumSlots + 3, 2, gen);
TestPopAll<T, kBS, G>(kNumSlots + 3, kNumSlots - 4, gen);
TestPopAll<T, kBS, G>(kNumSlots + 3, kNumSlots - 3, gen);
TestPopAll<T, kBS, G>(kNumSlots + 3, kNumSlots - 2, gen);
TestPopAll<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots - 5, gen);
TestPopAll<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots, gen);
TestPopAll<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots + 3, gen);
}
TEST(LockFreeQueueTest, Int64Event_Iterator) {
auto gen = [](size_t i) -> int64_t { return static_cast<int64_t>(i); };
using T = decltype(gen(0));
constexpr size_t kBS = 512;
using G = decltype(gen);
constexpr size_t kNumSlots =
LockFreeQueue<T, kBS>::kNumSlotsPerBlockForTesting;
EXPECT_GE(kNumSlots, 10);
TestIterator<T, kBS, G>(kNumSlots - 3, 2, gen);
TestIterator<T, kBS, G>(kNumSlots - 3, 3, gen);
TestIterator<T, kBS, G>(kNumSlots - 3, 5, gen);
TestIterator<T, kBS, G>(kNumSlots - 3, kNumSlots + 3, gen);
TestIterator<T, kBS, G>(kNumSlots - 3, kNumSlots * 2 + 5, gen);
TestIterator<T, kBS, G>(kNumSlots, 2, gen);
TestIterator<T, kBS, G>(kNumSlots, kNumSlots - 1, gen);
TestIterator<T, kBS, G>(kNumSlots, kNumSlots, gen);
TestIterator<T, kBS, G>(kNumSlots, kNumSlots + 1, gen);
TestIterator<T, kBS, G>(kNumSlots * 2, kNumSlots + 1, gen);
TestIterator<T, kBS, G>(kNumSlots * 3, kNumSlots - 1, gen);
TestIterator<T, kBS, G>(kNumSlots + 3, 2, gen);
TestIterator<T, kBS, G>(kNumSlots + 3, kNumSlots - 4, gen);
TestIterator<T, kBS, G>(kNumSlots + 3, kNumSlots - 3, gen);
TestIterator<T, kBS, G>(kNumSlots + 3, kNumSlots - 2, gen);
TestIterator<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots - 5, gen);
TestIterator<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots, gen);
TestIterator<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots + 3, gen);
}
TEST(LockFreeQueueTest, StringEvent_Iterator) {
auto gen = [](size_t i) { return std::to_string(i); };
using T = decltype(gen(0));
constexpr size_t kBS = 512;
using G = decltype(gen);
constexpr size_t kNumSlots =
LockFreeQueue<T, kBS>::kNumSlotsPerBlockForTesting;
EXPECT_GE(kNumSlots, 10);
TestIterator<T, kBS, G>(kNumSlots - 3, 2, gen);
TestIterator<T, kBS, G>(kNumSlots - 3, 3, gen);
TestIterator<T, kBS, G>(kNumSlots - 3, 5, gen);
TestIterator<T, kBS, G>(kNumSlots - 3, kNumSlots + 3, gen);
TestIterator<T, kBS, G>(kNumSlots - 3, kNumSlots * 2 + 5, gen);
TestIterator<T, kBS, G>(kNumSlots, 2, gen);
TestIterator<T, kBS, G>(kNumSlots, kNumSlots - 1, gen);
TestIterator<T, kBS, G>(kNumSlots, kNumSlots, gen);
TestIterator<T, kBS, G>(kNumSlots, kNumSlots + 1, gen);
TestIterator<T, kBS, G>(kNumSlots * 2, kNumSlots + 1, gen);
TestIterator<T, kBS, G>(kNumSlots * 3, kNumSlots - 1, gen);
TestIterator<T, kBS, G>(kNumSlots + 3, 2, gen);
TestIterator<T, kBS, G>(kNumSlots + 3, kNumSlots - 4, gen);
TestIterator<T, kBS, G>(kNumSlots + 3, kNumSlots - 3, gen);
TestIterator<T, kBS, G>(kNumSlots + 3, kNumSlots - 2, gen);
TestIterator<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots - 5, gen);
TestIterator<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots, gen);
TestIterator<T, kBS, G>(kNumSlots * 2 + 3, kNumSlots + 3, gen);
}
TEST(LockFreeQueueTest, Iterator_Basics) {
BlockedQueue<int32_t, 512> queue;
auto it = queue.begin();
EXPECT_EQ(it, queue.end());
EXPECT_EQ(++it, queue.end());
queue.Push(1);
it = queue.begin();
EXPECT_NE(it, queue.end());
++it;
EXPECT_EQ(it, queue.end());
it = queue.begin();
auto it2 = it++;
EXPECT_NE(it2, queue.end());
EXPECT_EQ(it, queue.end());
it2 = it++;
EXPECT_EQ(it2, queue.end());
EXPECT_EQ(it, queue.end());
queue.Push(2);
queue.Pop();
it = queue.begin();
EXPECT_NE(it, queue.end());
++it;
EXPECT_EQ(it, queue.end());
it = queue.begin();
it2 = it++;
EXPECT_NE(it2, queue.end());
EXPECT_EQ(it, queue.end());
it2 = it++;
EXPECT_EQ(it2, queue.end());
EXPECT_EQ(it, queue.end());
BlockedQueue<std::string, 512> str_queue;
str_queue.Push("abcd");
auto str_it = str_queue.begin();
EXPECT_EQ(*str_it, std::string("abcd"));
EXPECT_EQ(str_it->size(), 4);
str_queue.Push("123456");
str_it++;
EXPECT_EQ(*str_it, std::string("123456"));
EXPECT_EQ(str_it->size(), 6);
str_it++;
EXPECT_EQ(str_it, str_queue.end());
const auto const_str_it = str_queue.begin();
EXPECT_EQ(*const_str_it, std::string("abcd"));
EXPECT_EQ(const_str_it->size(), 4);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/lock_free_queue.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/lock_free_queue_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1e32686d-4ea4-44d1-9ca4-98c76bc91348 | cpp | tensorflow/tensorflow | per_thread | third_party/xla/xla/tsl/profiler/utils/per_thread.h | third_party/xla/xla/tsl/profiler/utils/per_thread_test.cc | #ifndef XLA_TSL_PROFILER_UTILS_PER_THREAD_H_
#define XLA_TSL_PROFILER_UTILS_PER_THREAD_H_
#include <memory>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/synchronization/mutex.h"
namespace tsl {
namespace profiler {
template <typename T>
class PerThread {
public:
static T& Get() {
static thread_local ThreadLocalPtr thread;
return thread.Get();
}
static std::vector<std::shared_ptr<T>> StartRecording() {
return Registry::Get().StartRecording();
}
static std::vector<std::shared_ptr<T>> StopRecording() {
return Registry::Get().StopRecording();
}
private:
PerThread() = delete;
~PerThread() = delete;
class Registry {
public:
static Registry& Get() {
static Registry* singleton = new Registry();
return *singleton;
}
std::vector<std::shared_ptr<T>> StartRecording() {
std::vector<std::shared_ptr<T>> threads;
absl::MutexLock lock(&mutex_);
threads.reserve(threads_.size());
for (auto iter = threads_.begin(); iter != threads_.end(); ++iter) {
threads.push_back(iter->first);
}
recording_ = true;
return threads;
}
std::vector<std::shared_ptr<T>> StopRecording() {
std::vector<std::shared_ptr<T>> threads;
absl::MutexLock lock(&mutex_);
threads.reserve(threads_.size());
for (auto iter = threads_.begin(); iter != threads_.end();) {
if (!iter->second) {
threads.push_back(std::move(iter->first));
threads_.erase(iter++);
} else {
threads.push_back(iter->first);
++iter;
}
}
recording_ = false;
return threads;
}
void Register(std::shared_ptr<T> thread) {
absl::MutexLock lock(&mutex_);
threads_.insert_or_assign(std::move(thread), true);
}
void Unregister(const std::shared_ptr<T>& thread) {
absl::MutexLock lock(&mutex_);
if (!recording_) {
threads_.erase(thread);
} else {
if (auto it = threads_.find(thread); it != threads_.end()) {
it->second = false;
}
}
}
private:
Registry() = default;
Registry(const Registry&) = delete;
void operator=(const Registry&) = delete;
absl::Mutex mutex_;
absl::flat_hash_map<std::shared_ptr<T>, bool> threads_
ABSL_GUARDED_BY(mutex_);
bool recording_ ABSL_GUARDED_BY(mutex_) = false;
};
class ThreadLocalPtr {
public:
ThreadLocalPtr() : ptr_(std::make_shared<T>()) {
Registry::Get().Register(ptr_);
}
~ThreadLocalPtr() { Registry::Get().Unregister(ptr_); }
T& Get() { return *ptr_; }
private:
std::shared_ptr<T> ptr_;
};
};
}
}
#endif | #include "xla/tsl/profiler/utils/per_thread.h"
#include <cstdint>
#include <list>
#include <memory>
#include <string>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/blocking_counter.h"
#include "absl/synchronization/notification.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace profiler {
namespace {
enum ProfilingStage {
kBeforeT1 = 1,
kDuringT1T2 = 2,
kAfterT2 = 3,
kNever = 4
};
struct ThreadSyncControl {
ThreadSyncControl()
: could_start_profiling_1(4),
could_stop_profiling_1(6),
could_exit_all(6) {}
absl::Notification profiling_1_started;
absl::Notification profiling_1_stopped;
absl::Notification exiting_all;
absl::BlockingCounter could_start_profiling_1;
absl::BlockingCounter could_stop_profiling_1;
absl::BlockingCounter could_exit_all;
};
static ThreadSyncControl& GetSyncContols() {
static ThreadSyncControl* control = new ThreadSyncControl();
return *control;
}
void ThreadMain(int32_t id, ProfilingStage firstUseStage,
ProfilingStage exitStage) {
if (firstUseStage == kBeforeT1) {
auto& td = PerThread<int32_t>::Get();
td = id;
GetSyncContols().could_start_profiling_1.DecrementCount();
}
if (exitStage == kBeforeT1) {
return;
}
GetSyncContols().profiling_1_started.WaitForNotification();
if (firstUseStage == kDuringT1T2) {
auto& td = PerThread<int32_t>::Get();
td = id;
GetSyncContols().could_stop_profiling_1.DecrementCount();
}
if (exitStage == kDuringT1T2) {
return;
}
GetSyncContols().profiling_1_stopped.WaitForNotification();
if (firstUseStage == kAfterT2) {
auto& td = PerThread<int32_t>::Get();
td = id;
GetSyncContols().could_exit_all.DecrementCount();
}
if (exitStage == kAfterT2) {
return;
}
GetSyncContols().exiting_all.WaitForNotification();
}
class ThreadFactory {
public:
ThreadFactory() : threads_existing_at_(kNever + 1) {}
void Start(int32_t id, ProfilingStage firstUseStage,
ProfilingStage exitStage) {
std::string name = absl::StrCat("thread_", id);
threads_existing_at_[exitStage].emplace_back(absl::WrapUnique(
Env::Default()->StartThread(ThreadOptions(), name, [=]() {
ThreadMain(id, firstUseStage, exitStage);
})));
}
void StopAllAt(ProfilingStage exitStage) {
threads_existing_at_[exitStage].clear();
}
private:
std::vector<std::list<std::unique_ptr<tsl::Thread>>> threads_existing_at_;
};
using ::testing::ElementsAre;
using ::testing::WhenSorted;
TEST(PerThreadRecordingTest, Lifecycles) {
auto get_ids = [](std::vector<std::shared_ptr<int32_t>>& threads_data) {
std::vector<int> threads_values;
for (const auto& ptd : threads_data) {
threads_values.push_back(*ptd);
}
return threads_values;
};
ThreadFactory thread_factory;
auto threads_data = PerThread<int32_t>::StartRecording();
auto threads_values = get_ids(threads_data);
EXPECT_THAT(threads_values, ::testing::SizeIs(0));
thread_factory.Start(111, kBeforeT1, kBeforeT1);
thread_factory.Start(112, kBeforeT1, kDuringT1T2);
thread_factory.Start(113, kBeforeT1, kAfterT2);
thread_factory.Start(114, kBeforeT1, kNever);
thread_factory.Start(122, kDuringT1T2, kDuringT1T2);
thread_factory.Start(123, kDuringT1T2, kAfterT2);
thread_factory.Start(124, kDuringT1T2, kNever);
thread_factory.Start(133, kAfterT2, kAfterT2);
thread_factory.Start(134, kAfterT2, kNever);
thread_factory.Start(141, kNever, kBeforeT1);
thread_factory.Start(142, kNever, kDuringT1T2);
thread_factory.Start(143, kNever, kAfterT2);
thread_factory.Start(144, kNever, kNever);
GetSyncContols().could_start_profiling_1.Wait();
thread_factory.StopAllAt(kBeforeT1);
threads_data = PerThread<int32_t>::StopRecording();
threads_values = get_ids(threads_data);
EXPECT_THAT(threads_values, WhenSorted(ElementsAre(111, 112, 113, 114)));
threads_data = PerThread<int32_t>::StartRecording();
threads_values = get_ids(threads_data);
EXPECT_THAT(threads_values, WhenSorted(ElementsAre(112, 113, 114)));
GetSyncContols().profiling_1_started.Notify();
thread_factory.Start(222, kDuringT1T2, kDuringT1T2);
thread_factory.Start(223, kDuringT1T2, kAfterT2);
thread_factory.Start(224, kDuringT1T2, kNever);
thread_factory.Start(233, kAfterT2, kAfterT2);
thread_factory.Start(234, kAfterT2, kNever);
thread_factory.Start(242, kNever, kDuringT1T2);
thread_factory.Start(243, kNever, kAfterT2);
thread_factory.Start(244, kNever, kNever);
GetSyncContols().could_stop_profiling_1.Wait();
thread_factory.StopAllAt(kDuringT1T2);
threads_data = PerThread<int32_t>::StopRecording();
threads_values = get_ids(threads_data);
EXPECT_THAT(threads_values, WhenSorted(ElementsAre(112, 113, 114, 122, 123,
124, 222, 223, 224)));
threads_data = PerThread<int32_t>::StartRecording();
threads_values = get_ids(threads_data);
EXPECT_THAT(threads_values,
WhenSorted(ElementsAre(113, 114, 123, 124, 223, 224)));
GetSyncContols().profiling_1_stopped.Notify();
thread_factory.Start(333, kAfterT2, kAfterT2);
thread_factory.Start(334, kAfterT2, kNever);
thread_factory.Start(343, kNever, kAfterT2);
thread_factory.Start(344, kNever, kNever);
GetSyncContols().could_exit_all.Wait();
thread_factory.StopAllAt(kAfterT2);
threads_data = PerThread<int32_t>::StopRecording();
threads_values = get_ids(threads_data);
EXPECT_THAT(threads_values,
WhenSorted(ElementsAre(113, 114, 123, 124, 133, 134, 223, 224,
233, 234, 333, 334)));
GetSyncContols().exiting_all.Notify();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/per_thread.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/per_thread_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0b4ecda2-0703-4893-9d1d-fe839e027d1e | cpp | tensorflow/tensorflow | timespan | third_party/xla/xla/tsl/profiler/utils/timespan.h | third_party/xla/xla/tsl/profiler/utils/timespan_test.cc | #ifndef XLA_TSL_PROFILER_UTILS_TIMESPAN_H_
#define XLA_TSL_PROFILER_UTILS_TIMESPAN_H_
#include <algorithm>
#include <string>
#include "absl/strings/str_cat.h"
#include "xla/tsl/profiler/utils/math_utils.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace profiler {
class Timespan {
public:
static Timespan FromEndPoints(uint64 begin_ps, uint64 end_ps) {
if (begin_ps > end_ps) {
return Timespan(begin_ps, 0);
}
return Timespan(begin_ps, end_ps - begin_ps);
}
explicit Timespan(uint64 begin_ps = 0, uint64 duration_ps = 0)
: begin_ps_(begin_ps), duration_ps_(duration_ps) {}
uint64 begin_ps() const { return begin_ps_; }
uint64 middle_ps() const { return begin_ps_ + duration_ps_ / 2; }
uint64 end_ps() const { return begin_ps_ + duration_ps_; }
uint64 duration_ps() const { return duration_ps_; }
bool Instant() const { return duration_ps() == 0; }
bool Empty() const { return begin_ps() == 0 && duration_ps() == 0; }
bool Overlaps(const Timespan& other) const {
return begin_ps() <= other.end_ps() && other.begin_ps() <= end_ps();
}
bool Includes(const Timespan& other) const {
return begin_ps() <= other.begin_ps() && other.end_ps() <= end_ps();
}
bool Includes(uint64 time_ps) const { return Includes(Timespan(time_ps)); }
uint64 OverlappedDurationPs(const Timespan& other) const {
if (!Overlaps(other)) return 0;
return std::min(end_ps(), other.end_ps()) -
std::max(begin_ps(), other.begin_ps());
}
void ExpandToInclude(const Timespan& other) {
*this = FromEndPoints(std::min(begin_ps(), other.begin_ps()),
std::max(end_ps(), other.end_ps()));
}
bool operator<(const Timespan& other) const {
if (begin_ps_ < other.begin_ps_) return true;
if (begin_ps_ > other.begin_ps_) return false;
return duration_ps_ > other.duration_ps_;
}
bool operator==(const Timespan& other) const {
return begin_ps_ == other.begin_ps_ && duration_ps_ == other.duration_ps_;
}
std::string DebugString() const {
return absl::StrCat("[", begin_ps(), ", ", end_ps(), "]");
}
static bool ByDuration(const Timespan& a, const Timespan& b) {
if (a.duration_ps_ < b.duration_ps_) return true;
if (a.duration_ps_ > b.duration_ps_) return false;
return a.begin_ps_ < b.begin_ps_;
}
private:
uint64 begin_ps_;
uint64 duration_ps_;
};
inline Timespan PicoSpan(uint64 start_ps, uint64 end_ps) {
return Timespan::FromEndPoints(start_ps, end_ps);
}
inline Timespan MilliSpan(double start_ms, double end_ms) {
return PicoSpan(MilliToPico(start_ms), MilliToPico(end_ms));
}
}
}
#endif | #include "xla/tsl/profiler/utils/timespan.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace profiler {
TEST(TimespanTests, NonInstantSpanIncludesSingleTimeTests) {
EXPECT_TRUE(Timespan(10, 2).Includes(12));
EXPECT_TRUE(Timespan(12, 1).Includes(12));
}
TEST(TimespanTests, NonInstantSpanIncludesInstantSpanTests) {
EXPECT_TRUE(Timespan(10, 2).Includes(Timespan(10, 0)));
EXPECT_TRUE(Timespan(10, 2).Includes(Timespan(12, 0)));
}
TEST(TimespanTests, NonInstantSpanIncludesNonInstantSpanTests) {
EXPECT_TRUE(Timespan(10, 5).Includes(Timespan(10, 4)));
EXPECT_TRUE(Timespan(10, 5).Includes(Timespan(10, 5)));
EXPECT_FALSE(Timespan(10, 5).Includes(Timespan(10, 6)));
}
TEST(TimespanTests, InstantSpanIncludesSingleTimeTests) {
EXPECT_TRUE(Timespan(10, 0).Includes(10));
EXPECT_FALSE(Timespan(10, 0).Includes(9));
}
TEST(TimespanTests, InstantSpanIncludesInstantSpanTests) {
EXPECT_TRUE(Timespan(10, 0).Includes(Timespan(10, 0)));
EXPECT_FALSE(Timespan(10, 0).Includes(Timespan(8, 0)));
}
TEST(TimespanTests, InstantSpanIncludesNonInstantSpanTests) {
EXPECT_FALSE(Timespan(10, 0).Includes(Timespan(10, 1)));
EXPECT_FALSE(Timespan(12, 0).Includes(Timespan(9, 100)));
}
TEST(TimespanTests, NonInstantSpanInstantSpanOverlappedDuration) {
EXPECT_EQ(0, Timespan(12, 2).OverlappedDurationPs(Timespan(8, 0)));
EXPECT_EQ(0, Timespan(12, 2).OverlappedDurationPs(Timespan(13, 0)));
EXPECT_EQ(0, Timespan(12, 2).OverlappedDurationPs(Timespan(14, 0)));
}
TEST(TimespanTests, NonInstantSpanNonInstantSpanOverlappedDuration) {
EXPECT_EQ(0, Timespan(12, 2).OverlappedDurationPs(Timespan(9, 3)));
EXPECT_EQ(1, Timespan(12, 2).OverlappedDurationPs(Timespan(9, 4)));
EXPECT_EQ(2, Timespan(12, 2).OverlappedDurationPs(Timespan(9, 5)));
EXPECT_EQ(2, Timespan(12, 2).OverlappedDurationPs(Timespan(9, 6)));
EXPECT_EQ(1, Timespan(12, 2).OverlappedDurationPs(Timespan(13, 1)));
EXPECT_EQ(1, Timespan(12, 2).OverlappedDurationPs(Timespan(13, 2)));
EXPECT_EQ(0, Timespan(12, 2).OverlappedDurationPs(Timespan(14, 1)));
EXPECT_EQ(0, Timespan(12, 2).OverlappedDurationPs(Timespan(14, 2)));
EXPECT_EQ(2, Timespan(12, 5).OverlappedDurationPs(Timespan(13, 2)));
EXPECT_EQ(2, Timespan(12, 2).OverlappedDurationPs(Timespan(12, 2)));
}
TEST(TimespanTests, InstantSpanInstantSpanOverlappedDuration) {
EXPECT_EQ(0, Timespan(12, 0).OverlappedDurationPs(Timespan(9, 0)));
EXPECT_EQ(0, Timespan(12, 0).OverlappedDurationPs(Timespan(12, 0)));
}
TEST(TimespanTests, InstantSpanNonInstantSpanOverlappedDuration) {
EXPECT_EQ(0, Timespan(12, 0).OverlappedDurationPs(Timespan(8, 3)));
EXPECT_EQ(0, Timespan(12, 0).OverlappedDurationPs(Timespan(8, 16)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/timespan.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/timespan_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
97fd9238-bf9e-4cb6-a0bf-0dc0fcda564c | cpp | tensorflow/tensorflow | xla_op_utils | third_party/xla/xla/tsl/profiler/convert/xla_op_utils.h | third_party/xla/xla/tsl/profiler/convert/xla_op_utils_test.cc | #ifndef XLA_TSL_PROFILER_CONVERT_XLA_OP_UTILS_H_
#define XLA_TSL_PROFILER_CONVERT_XLA_OP_UTILS_H_
#include <string>
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
namespace tsl {
namespace profiler {
inline bool IsFusion(absl::string_view category) {
return absl::EndsWith(category, " fusion");
}
inline std::string HloModuleNameWithProgramId(absl::string_view hlo_module_name,
uint64_t program_id) {
return absl::StrCat(hlo_module_name, "(", program_id, ")");
}
}
}
#endif | #include "xla/tsl/profiler/convert/xla_op_utils.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace profiler {
namespace {
TEST(XlaOpUtilsTest, HloModuleNameWithProgramId) {
EXPECT_EQ("module(123)", HloModuleNameWithProgramId("module", 123));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/convert/xla_op_utils.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/convert/xla_op_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c282ff11-8b1f-4ce2-90b0-556c13f12219 | cpp | tensorflow/tensorflow | concurrency | third_party/xla/xla/backends/cpu/runtime/concurrency.h | third_party/xla/xla/backends/cpu/runtime/concurrency_test.cc | #ifndef XLA_BACKENDS_CPU_RUNTIME_CONCURRENCY_H_
#define XLA_BACKENDS_CPU_RUNTIME_CONCURRENCY_H_
#include <cstdint>
#include <functional>
#include <memory>
#include <type_traits>
#include "tsl/platform/logging.h"
#define EIGEN_USE_THREADS
#include "unsupported/Eigen/CXX11/Tensor"
#include "unsupported/Eigen/CXX11/ThreadPool"
namespace xla::cpu {
template <typename F,
std::enable_if_t<std::is_invocable_v<F, int64_t>>* = nullptr>
void ScheduleAll(const Eigen::ThreadPoolDevice* intra_op_threadpool, int64_t n,
F&& f) {
DCHECK(n >= 0) << "n must be non-negative";
if (n == 0) return;
if (n == 1) {
f(0);
return;
}
struct State {
State(const Eigen::ThreadPoolDevice* intra_op_threadpool, F&& f)
: intra_op_threadpool(intra_op_threadpool), f(std::forward<F>(f)) {}
void Execute(std::shared_ptr<State> self, int64_t start, int64_t end) {
while (end - start > 1) {
uint64_t mid = (start + end) / 2;
intra_op_threadpool->getPool()->Schedule(
std::bind(&State::Execute, this, self, mid, end));
end = mid;
}
f(start);
}
const Eigen::ThreadPoolDevice* intra_op_threadpool;
F f;
};
auto s = std::make_shared<State>(intra_op_threadpool, std::forward<F>(f));
s->Execute(std::move(s), 0, n);
}
}
#endif | #include "xla/backends/cpu/runtime/concurrency.h"
#include <cstdint>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/synchronization/blocking_counter.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace xla::cpu {
namespace {
TEST(ConcurrencyTest, ScheduleAll) {
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), "test", 10);
std::vector<int64_t> tasks(64, 0);
Eigen::ThreadPoolDevice device(thread_pool.AsEigenThreadPool(),
thread_pool.NumThreads());
absl::BlockingCounter counter(64);
ScheduleAll(&device, 64, [&](int64_t index) {
tasks[index] += 1;
counter.DecrementCount();
});
counter.Wait();
ASSERT_TRUE(absl::c_all_of(tasks, [](int64_t task) { return task == 1; }));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/concurrency.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/concurrency_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cc7acb88-4403-4bed-a00b-7e0ce9b987e8 | cpp | tensorflow/tensorflow | ffi | third_party/xla/xla/ffi/api/ffi.h | third_party/xla/xla/ffi/api/ffi_test.cc | #ifndef XLA_FFI_API_FFI_H_
#define XLA_FFI_API_FFI_H_
#ifdef XLA_FFI_FFI_H_
#error Two different XLA FFI implementations cannot be included together. \
See README.md for more details.
#endif
#include <algorithm>
#include <atomic>
#include <cassert>
#include <complex>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <functional>
#include <iostream>
#include <limits>
#include <memory>
#include <numeric>
#include <optional>
#include <ostream>
#include <string>
#include <string_view>
#include <type_traits>
#include <utility>
#include <variant>
#include <vector>
#include "xla/ffi/api/c_api.h"
#include "xla/ffi/api/api.h"
namespace xla::ffi {
using TypeId = XLA_FFI_TypeId;
enum class DataType : uint8_t {
INVALID = XLA_FFI_DataType_INVALID,
PRED = XLA_FFI_DataType_PRED,
S8 = XLA_FFI_DataType_S8,
S16 = XLA_FFI_DataType_S16,
S32 = XLA_FFI_DataType_S32,
S64 = XLA_FFI_DataType_S64,
U8 = XLA_FFI_DataType_U8,
U16 = XLA_FFI_DataType_U16,
U32 = XLA_FFI_DataType_U32,
U64 = XLA_FFI_DataType_U64,
F16 = XLA_FFI_DataType_F16,
F32 = XLA_FFI_DataType_F32,
F64 = XLA_FFI_DataType_F64,
BF16 = XLA_FFI_DataType_BF16,
C64 = XLA_FFI_DataType_C64,
C128 = XLA_FFI_DataType_C128,
TOKEN = XLA_FFI_DataType_TOKEN,
F8E5M2 = XLA_FFI_DataType_F8E5M2,
F8E4M3 = XLA_FFI_DataType_F8E4M3,
F8E4M3FN = XLA_FFI_DataType_F8E4M3FN,
F8E4M3B11FNUZ = XLA_FFI_DataType_F8E4M3B11FNUZ,
F8E5M2FNUZ = XLA_FFI_DataType_F8E5M2FNUZ,
F8E4M3FNUZ = XLA_FFI_DataType_F8E4M3FNUZ,
F8E3M4 = XLA_FFI_DataType_F8E3M4,
};
inline constexpr DataType PRED = DataType::PRED;
inline constexpr DataType S8 = DataType::S8;
inline constexpr DataType S16 = DataType::S16;
inline constexpr DataType S32 = DataType::S32;
inline constexpr DataType S64 = DataType::S64;
inline constexpr DataType U8 = DataType::U8;
inline constexpr DataType U16 = DataType::U16;
inline constexpr DataType U32 = DataType::U32;
inline constexpr DataType U64 = DataType::U64;
inline constexpr DataType F16 = DataType::F16;
inline constexpr DataType F32 = DataType::F32;
inline constexpr DataType F64 = DataType::F64;
inline constexpr DataType BF16 = DataType::BF16;
inline constexpr DataType C64 = DataType::C64;
inline constexpr DataType C128 = DataType::C128;
inline constexpr DataType TOKEN = DataType::TOKEN;
inline constexpr DataType F8E5M2 = DataType::F8E5M2;
inline constexpr DataType F8E4M3 = DataType::F8E4M3;
inline constexpr DataType F8E4M3FN = DataType::F8E4M3FN;
inline constexpr DataType F8E4M3B11FNUZ = DataType::F8E4M3B11FNUZ;
inline constexpr DataType F8E5M2FNUZ = DataType::F8E5M2FNUZ;
inline constexpr DataType F8E4M3FNUZ = DataType::F8E4M3FNUZ;
inline constexpr DataType F8E3M4 = DataType::F8E3M4;
inline std::ostream& operator<<(std::ostream& os, const DataType dtype) {
return os << static_cast<XLA_FFI_DataType>(dtype);
}
constexpr size_t ByteWidth(DataType dtype) {
switch (dtype) {
case DataType::INVALID:
case DataType::TOKEN:
return 0;
case DataType::PRED:
return 1;
case DataType::S8:
case DataType::U8:
case DataType::F8E5M2:
case DataType::F8E4M3:
case DataType::F8E4M3FN:
case DataType::F8E4M3B11FNUZ:
case DataType::F8E5M2FNUZ:
case DataType::F8E4M3FNUZ:
case DataType::F8E3M4:
return 1;
case DataType::S16:
case DataType::U16:
case DataType::F16:
case DataType::BF16:
return 2;
case DataType::S32:
case DataType::U32:
case DataType::F32:
return 4;
case DataType::S64:
case DataType::U64:
case DataType::F64:
return 8;
case DataType::C64:
return 8;
case DataType::C128:
return 16;
}
}
template <typename T>
class Span {
public:
constexpr Span() : data_(nullptr), size_(0) {}
Span(T* data, size_t size) : data_(data), size_(size) {}
Span(const std::vector<std::remove_const_t<T>>& vec)
: Span(vec.data(), vec.size()) {}
T& operator[](size_t index) const { return data_[index]; }
bool operator==(const Span<T>& other) const {
return size() == other.size() && std::equal(begin(), end(), other.begin());
}
T& front() const { return data_[0]; }
T& back() const { return data_[size_ - 1]; }
Span<T> first(size_t n) const { return Span<T>(data_, n); }
Span<T> last(size_t n) const { return Span<T>(data_ + size_ - n, n); }
size_t size() const { return size_; }
T* begin() const { return data_; }
T* end() const { return data_ + size_; }
private:
T* data_;
size_t size_;
};
enum class ErrorCode : uint8_t {
kOk = XLA_FFI_Error_Code_OK,
kCancelled = XLA_FFI_Error_Code_CANCELLED,
kUnknown = XLA_FFI_Error_Code_UNKNOWN,
kInvalidArgument = XLA_FFI_Error_Code_INVALID_ARGUMENT,
kDeadlineExceeded = XLA_FFI_Error_Code_DEADLINE_EXCEEDED,
kNotFound = XLA_FFI_Error_Code_NOT_FOUND,
kAlreadyExists = XLA_FFI_Error_Code_ALREADY_EXISTS,
kPermissionDenied = XLA_FFI_Error_Code_PERMISSION_DENIED,
kResourceExhausted = XLA_FFI_Error_Code_RESOURCE_EXHAUSTED,
kFailedPrecondition = XLA_FFI_Error_Code_FAILED_PRECONDITION,
kAborted = XLA_FFI_Error_Code_ABORTED,
kOutOfRange = XLA_FFI_Error_Code_OUT_OF_RANGE,
kUnimplemented = XLA_FFI_Error_Code_UNIMPLEMENTED,
kInternal = XLA_FFI_Error_Code_INTERNAL,
kUnavailable = XLA_FFI_Error_Code_UNAVAILABLE,
kDataLoss = XLA_FFI_Error_Code_DATA_LOSS,
kUnauthenticated = XLA_FFI_Error_Code_UNAUTHENTICATED,
};
class Error {
public:
Error() = default;
Error(ErrorCode errc, std::string message)
: errc_(errc), message_(std::move(message)) {}
Error(XLA_FFI_Error_Code errc, std::string message)
: Error(static_cast<ErrorCode>(errc), std::move(message)) {}
bool success() const { return errc_ == ErrorCode::kOk; }
bool failure() const { return !success(); }
std::optional<ErrorCode> errc() const { return errc_; }
const std::string& message() const { return message_; }
static Error Success() { return Error(); }
static Error Internal(std::string message) {
return Error(ErrorCode::kInternal, std::move(message));
}
static Error InvalidArgument(std::string message) {
return Error(ErrorCode::kInvalidArgument, std::move(message));
}
private:
ErrorCode errc_ = ErrorCode::kOk;
std::string message_;
};
template <typename E>
class Unexpected;
template <typename T, typename E>
class Expected {
public:
constexpr Expected(T value) : data_(std::move(value)) {}
constexpr Expected(Unexpected<E> u);
constexpr operator bool() const {
return has_value();
}
constexpr T& operator*() & { return value(); }
constexpr const T& operator*() const& { return value(); }
constexpr T&& operator*() && { return std::move(value()); }
constexpr const T& operator*() const&& { return std::move(value()); }
constexpr T* operator->() { return &value(); }
constexpr const T* operator->() const { return &value(); }
constexpr bool has_value() const { return std::holds_alternative<T>(data_); }
constexpr bool has_error() const { return std::holds_alternative<E>(data_); }
constexpr T& value() & { return std::get<T>(data_); }
constexpr const T& value() const& { return std::get<T>(data_); }
constexpr T&& value() && { return std::get<T>(std::move(data_)); }
constexpr const T& value() const&& { return std::get<T>(std::move(data_)); }
constexpr E& error() & { return std::get<E>(data_); }
constexpr const E& error() const& { return std::get<E>(data_); }
constexpr E&& error() && { return std::get<E>(std::move(data_)); }
constexpr const E&& error() const&& { return std::get<E>(std::move(data_)); }
private:
std::variant<T, E> data_;
};
template <typename E>
class Unexpected {
public:
constexpr Unexpected(E error) : error_(std::move(error)) {}
private:
template <typename, typename>
friend class Expected;
E error_;
};
Unexpected(const char*) -> Unexpected<std::string>;
template <typename T, typename E>
constexpr Expected<T, E>::Expected(Unexpected<E> u)
: data_(std::move(u.error_)) {}
template <typename T>
class ErrorOr : public Expected<T, Error> {
public:
using Expected<T, Error>::Expected;
};
class Promise;
class Future {
public:
explicit Future(const Promise& promise);
Future(Future&&) = default;
Future& operator=(Future&&) = default;
template <typename F>
void OnReady(F&& f);
private:
friend class Promise;
using Waiter = std::function<void(const std::optional<Error>& error)>;
enum class State : uint8_t { kPending, kAvailable, kError };
struct WaiterAndState {
static_assert(alignof(std::max_align_t) >= 8 && sizeof(Waiter*) == 8);
static constexpr uint64_t kStateMask = (1ull << 2) - 1;
static constexpr uint64_t kPointerMask = ~kStateMask;
WaiterAndState(Waiter* ptr, State state) {
value = (reinterpret_cast<uintptr_t>(ptr) & kPointerMask) |
(static_cast<uintptr_t>(state) & kStateMask);
}
WaiterAndState() : WaiterAndState(nullptr, State::kPending) {}
State state() const { return static_cast<State>(value & kStateMask); }
Waiter* waiter() const {
return reinterpret_cast<Waiter*>(value & kPointerMask);
}
uintptr_t value;
};
static_assert(std::atomic<WaiterAndState>::is_always_lock_free,
"WaiterAndState atomic must be lock-free");
struct Data {
std::atomic<WaiterAndState> waiter_and_state = WaiterAndState();
std::optional<Error> error;
};
std::shared_ptr<Data> data_;
};
class Promise {
public:
Promise() : data_(std::make_shared<Future::Data>()) {}
Promise(Promise&&) = default;
Promise& operator=(Promise&&) = default;
void SetAvailable();
void SetError(Error error);
private:
friend class Future;
void SetCompleted(Future::State state);
std::shared_ptr<Future::Data> data_;
};
inline Future::Future(const Promise& promise) : data_(promise.data_) {
assert(data_.use_count() == 2 &&
"Promise can be used to create at most one Future");
}
template <typename F>
void Future::OnReady(F&& f) {
static_assert(std::is_invocable_v<F, const std::optional<Error>&>,
"F must be compatible with Waiter signature");
WaiterAndState old_value =
data_->waiter_and_state.load(std::memory_order_acquire);
if (old_value.state() != State::kPending) {
f(data_->error);
return;
}
auto* waiter = new Waiter(std::forward<F>(f));
auto new_value = WaiterAndState(waiter, State::kPending);
while (!data_->waiter_and_state.compare_exchange_weak(
old_value, new_value, std::memory_order_acq_rel,
std::memory_order_acquire)) {
if (old_value.state() != State::kPending) {
assert(old_value.waiter() == nullptr);
(*waiter)(data_->error);
delete waiter;
return;
}
}
assert(old_value.state() == State::kPending);
}
inline void Promise::SetAvailable() { SetCompleted(Future::State::kAvailable); }
inline void Promise::SetError(Error error) {
assert(error.errc() != ErrorCode::kOk);
assert(data_->error == std::nullopt);
data_->error = std::move(error);
SetCompleted(Future::State::kError);
}
inline void Promise::SetCompleted(Future::State state) {
Future::WaiterAndState old_value = data_->waiter_and_state.exchange(
{nullptr, state}, std::memory_order_acq_rel);
assert(old_value.state() == Future::State::kPending);
if (Future::Waiter* waiter = old_value.waiter()) {
(*waiter)(data_->error);
delete waiter;
}
}
class AnyBuffer {
public:
using Dimensions = Span<const int64_t>;
explicit AnyBuffer(const XLA_FFI_Buffer* buf) : buf_(buf) {
assert(buf != nullptr && "XLA_FFI_Buffer must be non-null");
}
DataType element_type() const { return DataType(buf_->dtype); }
Dimensions dimensions() const { return Dimensions(buf_->dims, buf_->rank); }
XLA_FFI_ATTRIBUTE_ALWAYS_INLINE size_t size_bytes() const {
return ByteWidth(element_type()) * element_count();
}
XLA_FFI_ATTRIBUTE_ALWAYS_INLINE size_t element_count() const {
Dimensions dims = dimensions();
return std::accumulate(dims.begin(), dims.end(), int64_t{1},
std::multiplies<>());
}
void* untyped_data() const { return buf_->data; }
private:
const XLA_FFI_Buffer* buf_;
};
namespace internal {
template <DataType dtype>
struct always_false : std::false_type {};
template <DataType dtype>
struct DataTypeToNative {
static_assert(always_false<dtype>::value, "unsupported data type");
};
#define XLA_FFI_REGISTER_DATATYPE_MAPPING(data_type_value, actual_type) \
template <> \
struct DataTypeToNative<data_type_value> { \
using type = actual_type; \
};
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::PRED, bool);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::U8, uint8_t);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::U16, uint16_t);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::U32, uint32_t);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::U64, uint64_t);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::S8, int8_t);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::S16, int16_t);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::S32, int32_t);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::S64, int64_t);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::F16, uint16_t);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::F32, float);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::F64, double);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::BF16, uint16_t);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::C64, std::complex<float>);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::C128, std::complex<double>);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::TOKEN, void);
#undef XLA_FFI_REGISTER_DATATYPE_MAPPING
inline constexpr size_t kDynamicRank = std::numeric_limits<size_t>::max();
}
constexpr DataType ToComplex(DataType dtype) {
switch (dtype) {
case DataType::F32:
return DataType::C64;
case DataType::F64:
return DataType::C128;
default:
return DataType::INVALID;
}
}
constexpr DataType ToReal(DataType dtype) {
switch (dtype) {
case DataType::C64:
return DataType::F32;
case DataType::C128:
return DataType::F64;
default:
return dtype;
}
}
constexpr DataType ToImag(DataType dtype) {
switch (dtype) {
case DataType::C64:
return DataType::F32;
case DataType::C128:
return DataType::F64;
default:
return dtype;
}
}
template <DataType dtype>
using NativeType = typename internal::DataTypeToNative<dtype>::type;
template <DataType dtype>
constexpr bool IsComplexType() {
return std::is_same_v<NativeType<dtype>,
std::complex<NativeType<ToReal(dtype)>>>;
}
static_assert(ToReal(DataType::C64) == DataType::F32);
static_assert(ToReal(DataType::C128) == DataType::F64);
static_assert(ToReal(DataType::F32) == DataType::F32);
static_assert(ToComplex(DataType::F32) == DataType::C64);
static_assert(ToComplex(DataType::F64) == DataType::C128);
static_assert(ToComplex(DataType::S32) == DataType::INVALID);
static_assert(ToComplex(ToReal(DataType::C64)) == DataType::C64);
static_assert(ToComplex(ToImag(DataType::C128)) == DataType::C128);
static_assert(IsComplexType<DataType::C64>());
static_assert(IsComplexType<DataType::C128>());
static_assert(!IsComplexType<DataType::F32>());
template <DataType dtype, size_t rank = internal::kDynamicRank>
class Buffer {
public:
using Dimensions = AnyBuffer::Dimensions;
explicit Buffer(const XLA_FFI_Buffer* buf) : buf_(buf) {
assert(buf_ != nullptr && "XLA_FFI_Buffer must be non-null");
}
DataType element_type() const { return dtype; }
Dimensions dimensions() const {
return Dimensions(buf_->dims,
rank == internal::kDynamicRank ? buf_->rank : rank);
}
XLA_FFI_ATTRIBUTE_ALWAYS_INLINE size_t size_bytes() const {
return ByteWidth(dtype) * element_count();
}
XLA_FFI_ATTRIBUTE_ALWAYS_INLINE size_t element_count() const {
Dimensions dims = dimensions();
return std::accumulate(dims.begin(), dims.end(), int64_t{1},
std::multiplies<>());
}
void* untyped_data() const { return buf_->data; }
NativeType<dtype>* typed_data() const {
return reinterpret_cast<NativeType<dtype>*>(untyped_data());
}
private:
const XLA_FFI_Buffer* buf_;
};
template <DataType dtype> using BufferR0 = Buffer<dtype, 0>;
template <DataType dtype> using BufferR1 = Buffer<dtype, 1>;
template <DataType dtype> using BufferR2 = Buffer<dtype, 2>;
template <DataType dtype> using BufferR3 = Buffer<dtype, 3>;
template <DataType dtype> using BufferR4 = Buffer<dtype, 4>;
using Token = BufferR0<DataType::TOKEN>;
namespace internal {
template <DataType dtype, size_t rank>
XLA_FFI_ATTRIBUTE_ALWAYS_INLINE std::optional<Buffer<dtype, rank>> DecodeBuffer(
XLA_FFI_Buffer* buf, DiagnosticEngine& diagnostic) {
if (auto buf_dtype = static_cast<DataType>(buf->dtype);
XLA_FFI_PREDICT_FALSE(buf_dtype != dtype)) {
return diagnostic.Emit("Wrong buffer dtype: expected ")
<< dtype << " but got " << buf_dtype;
}
if constexpr (rank != internal::kDynamicRank) {
if (XLA_FFI_PREDICT_FALSE(buf->rank != rank)) {
return diagnostic.Emit("Wrong buffer rank: expected ")
<< rank << " but got " << buf->rank;
}
}
return Buffer<dtype, rank>(buf);
}
}
template <DataType dtype, size_t rank = internal::kDynamicRank>
using ResultBuffer = Result<Buffer<dtype, rank>>;
template <DataType dtype> using ResultBufferR0 = ResultBuffer<dtype, 0>;
template <DataType dtype> using ResultBufferR1 = ResultBuffer<dtype, 1>;
template <DataType dtype> using ResultBufferR2 = ResultBuffer<dtype, 2>;
template <DataType dtype> using ResultBufferR3 = ResultBuffer<dtype, 3>;
template <DataType dtype> using ResultBufferR4 = ResultBuffer<dtype, 4>;
template <>
struct ArgBinding<AnyBuffer> {
using Arg = AnyBuffer;
};
template <DataType dtype, size_t rank>
struct ArgBinding<Buffer<dtype, rank>> {
using Arg = Buffer<dtype, rank>;
};
template <>
struct RetBinding<Result<AnyBuffer>> {
using Ret = AnyBuffer;
};
template <DataType dtype, size_t rank>
struct RetBinding<Result<Buffer<dtype, rank>>> {
using Ret = Buffer<dtype, rank>;
};
inline std::ostream& operator<<(std::ostream& os, const XLA_FFI_ArgType type) {
switch (type) {
case XLA_FFI_ArgType_BUFFER:
return os << "buffer";
}
}
template <>
struct ArgDecoding<AnyBuffer> {
XLA_FFI_ATTRIBUTE_ALWAYS_INLINE
static std::optional<AnyBuffer> Decode(XLA_FFI_ArgType type, void* arg,
DiagnosticEngine& diagnostic) {
if (XLA_FFI_PREDICT_FALSE(type != XLA_FFI_ArgType_BUFFER)) {
return diagnostic.Emit("Wrong argument type: expected ")
<< XLA_FFI_ArgType_BUFFER << " but got " << type;
}
return AnyBuffer(reinterpret_cast<XLA_FFI_Buffer*>(arg));
}
};
template <DataType dtype, size_t rank>
struct ArgDecoding<Buffer<dtype, rank>> {
XLA_FFI_ATTRIBUTE_ALWAYS_INLINE
static std::optional<Buffer<dtype, rank>> Decode(
XLA_FFI_ArgType type, void* arg, DiagnosticEngine& diagnostic) {
if (XLA_FFI_PREDICT_FALSE(type != XLA_FFI_ArgType_BUFFER)) {
return diagnostic.Emit("Wrong argument type: expected ")
<< XLA_FFI_ArgType_BUFFER << " but got " << type;
}
return internal::DecodeBuffer<dtype, rank>(
reinterpret_cast<XLA_FFI_Buffer*>(arg), diagnostic);
}
};
class RemainingArgs : public internal::RemainingArgsBase {
public:
using internal::RemainingArgsBase::RemainingArgsBase;
template <typename T>
ErrorOr<T> get(size_t index) const {
size_t idx = offset() + index;
if (XLA_FFI_PREDICT_FALSE(idx >= args()->size)) {
return Unexpected(
Error(ErrorCode::kInvalidArgument, "Index out of range"));
}
DiagnosticEngine diagnostic;
std::optional<T> value = ArgDecoding<T>::Decode(
args()->types[idx], args()->args[idx], diagnostic);
if (XLA_FFI_PREDICT_FALSE(!value.has_value())) {
return Unexpected(Error::Internal(diagnostic.Result()));
}
return *value;
}
};
template <>
struct internal::Decode<internal::RemainingArgsTag> {
static std::optional<RemainingArgs> call(DecodingOffsets& offsets,
DecodingContext& ctx,
DiagnosticEngine& diagnostic) {
return RemainingArgs(&ctx.call_frame->args, offsets.args);
}
};
inline std::ostream& operator<<(std::ostream& os, const XLA_FFI_RetType type) {
switch (type) {
case XLA_FFI_RetType_BUFFER:
return os << "buffer";
}
}
template <>
struct RetDecoding<AnyBuffer> {
XLA_FFI_ATTRIBUTE_ALWAYS_INLINE
static std::optional<Result<AnyBuffer>> Decode(XLA_FFI_RetType type,
void* ret,
DiagnosticEngine& diagnostic) {
if (XLA_FFI_PREDICT_FALSE(type != XLA_FFI_RetType_BUFFER)) {
return diagnostic.Emit("Wrong result type: expected ")
<< XLA_FFI_RetType_BUFFER << " but got " << type;
}
return AnyBuffer(reinterpret_cast<XLA_FFI_Buffer*>(ret));
}
};
template <DataType dtype, size_t rank>
struct RetDecoding<Buffer<dtype, rank>> {
XLA_FFI_ATTRIBUTE_ALWAYS_INLINE
static std::optional<Result<Buffer<dtype, rank>>> Decode(
XLA_FFI_RetType type, void* ret, DiagnosticEngine& diagnostic) {
if (XLA_FFI_PREDICT_FALSE(type != XLA_FFI_RetType_BUFFER)) {
return diagnostic.Emit("Wrong result type: expected ")
<< XLA_FFI_RetType_BUFFER << " but got " << type;
}
return internal::DecodeBuffer<dtype, rank>(
reinterpret_cast<XLA_FFI_Buffer*>(ret), diagnostic);
}
};
class RemainingRets : public internal::RemainingRetsBase {
public:
using internal::RemainingRetsBase::RemainingRetsBase;
template <typename T>
ErrorOr<Result<T>> get(size_t index) const {
size_t idx = offset() + index;
if (XLA_FFI_PREDICT_FALSE(idx >= rets()->size)) {
return Unexpected(
Error(ErrorCode::kInvalidArgument, "Index out of range"));
}
DiagnosticEngine diagnostic;
std::optional<Result<T>> value = RetDecoding<T>::Decode(
rets()->types[idx], rets()->rets[idx], diagnostic);
if (XLA_FFI_PREDICT_FALSE(!value.has_value())) {
return Unexpected(Error::Internal(diagnostic.Result()));
}
return *value;
}
};
template <>
struct internal::Decode<internal::RemainingRetsTag> {
static std::optional<RemainingRets> call(DecodingOffsets& offsets,
DecodingContext& ctx,
DiagnosticEngine& diagnostic) {
return RemainingRets(&ctx.call_frame->rets, offsets.rets);
}
};
#define XLA_FFI_REGISTER_ARRAY_ATTR_DECODING(T, TYPE) \
template <> \
struct AttrDecoding<Span<const T>> { \
using Type = Span<const T>; \
static std::optional<Type> Decode(XLA_FFI_AttrType type, void* attr, \
DiagnosticEngine& diagnostic) { \
if (XLA_FFI_PREDICT_FALSE(type != XLA_FFI_AttrType_ARRAY)) { \
return diagnostic.Emit("Wrong attribute type: expected ") \
<< XLA_FFI_AttrType_ARRAY << " but got " << type; \
} \
\
auto* array = reinterpret_cast<XLA_FFI_Array*>(attr); \
if (XLA_FFI_PREDICT_FALSE(array->dtype != TYPE)) { \
return diagnostic.Emit("Wrong array data type: expected ") \
<< TYPE << " but got " << array->dtype; \
} \
\
return Span<const T>(reinterpret_cast<T*>(array->data), array->size); \
} \
}
XLA_FFI_REGISTER_ARRAY_ATTR_DECODING(int8_t, XLA_FFI_DataType_S8);
XLA_FFI_REGISTER_ARRAY_ATTR_DECODING(int16_t, XLA_FFI_DataType_S16);
XLA_FFI_REGISTER_ARRAY_ATTR_DECODING(int32_t, XLA_FFI_DataType_S32);
XLA_FFI_REGISTER_ARRAY_ATTR_DECODING(int64_t, XLA_FFI_DataType_S64);
XLA_FFI_REGISTER_ARRAY_ATTR_DECODING(uint8_t, XLA_FFI_DataType_U8);
XLA_FFI_REGISTER_ARRAY_ATTR_DECODING(uint16_t, XLA_FFI_DataType_U16);
XLA_FFI_REGISTER_ARRAY_ATTR_DECODING(uint32_t, XLA_FFI_DataType_U32);
XLA_FFI_REGISTER_ARRAY_ATTR_DECODING(uint64_t, XLA_FFI_DataType_U64);
XLA_FFI_REGISTER_ARRAY_ATTR_DECODING(float, XLA_FFI_DataType_F32);
XLA_FFI_REGISTER_ARRAY_ATTR_DECODING(double, XLA_FFI_DataType_F64);
#undef XLA_FFI_REGISTER_ARRAY_ATTR_DECODING
template <typename T>
struct Pointer {};
template <typename T>
struct AttrDecoding<Pointer<T>> {
using Type = T*;
static std::optional<Type> Decode(XLA_FFI_AttrType type, void* attr,
DiagnosticEngine& diagnostic) {
auto* scalar = reinterpret_cast<XLA_FFI_Scalar*>(attr);
if (XLA_FFI_PREDICT_FALSE(type != XLA_FFI_AttrType_SCALAR ||
scalar->dtype != XLA_FFI_DataType_S64)) {
return diagnostic.Emit("Wrong attribute type: ")
<< "expected i64 scalar for passing pointer but got " << type;
}
static_assert(sizeof(uintptr_t) == sizeof(int64_t));
uintptr_t ptr = *reinterpret_cast<uintptr_t*>(scalar->value);
return reinterpret_cast<Type>(ptr);
}
};
class Dictionary : public internal::DictionaryBase {
public:
using internal::DictionaryBase::DictionaryBase;
template <typename T>
ErrorOr<T> get(std::string_view name) const {
DiagnosticEngine diagnostic;
std::optional<T> value = internal::DictionaryBase::get<T>(name, diagnostic);
if (!value.has_value()) {
return Unexpected(Error::Internal(diagnostic.Result()));
}
return *value;
}
};
template <>
struct internal::Decode<internal::AttrsTag<Dictionary>> {
static std::optional<Dictionary> call(DecodingOffsets& offsets,
DecodingContext& ctx,
DiagnosticEngine& diagnostic) {
return Dictionary(&ctx.call_frame->attrs);
}
};
template <>
struct AttrDecoding<Dictionary> {
using Type = Dictionary;
static std::optional<Dictionary> Decode(XLA_FFI_AttrType type, void* attr,
DiagnosticEngine& diagnostic) {
if (XLA_FFI_PREDICT_FALSE(type != XLA_FFI_AttrType_DICTIONARY)) {
return diagnostic.Emit("Wrong attribute type: expected ")
<< XLA_FFI_AttrType_DICTIONARY << " but got " << type;
}
return Dictionary(reinterpret_cast<XLA_FFI_Attrs*>(attr));
}
};
namespace internal {
inline XLA_FFI_Error* CreateError(const XLA_FFI_Api* api, const Error& error) {
XLA_FFI_Error_Create_Args args;
args.struct_size = XLA_FFI_Error_Create_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.errc = static_cast<XLA_FFI_Error_Code>(*error.errc());
args.message = error.message().c_str();
return api->XLA_FFI_Error_Create(&args);
}
inline void DestroyError(const XLA_FFI_Api* api, XLA_FFI_Error* error) {
XLA_FFI_Error_Destroy_Args args;
args.struct_size = XLA_FFI_Error_Destroy_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.error = error;
api->XLA_FFI_Error_Destroy(&args);
}
inline const char* GetErrorMessage(const XLA_FFI_Api* api,
XLA_FFI_Error* error) {
XLA_FFI_Error_GetMessage_Args args;
args.struct_size = XLA_FFI_Error_GetMessage_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.error = error;
api->XLA_FFI_Error_GetMessage(&args);
return args.message;
}
}
template <ExecutionStage stage>
struct ResultEncoding<stage, Error> {
static XLA_FFI_Error* Encode(const XLA_FFI_Api* api,
XLA_FFI_ExecutionContext* ctx, Error error) {
if (XLA_FFI_PREDICT_TRUE(error.success())) {
return nullptr;
}
return internal::CreateError(api, error);
}
};
template <typename T>
struct ResultEncoding<ExecutionStage::kInstantiate,
ErrorOr<std::unique_ptr<T>>> {
static_assert(std::is_same_v<decltype(T::id), TypeId>,
"State type must have a static `TypeId id` field");
static XLA_FFI_Error* Encode(const XLA_FFI_Api* api,
XLA_FFI_ExecutionContext* ctx,
ErrorOr<std::unique_ptr<T>> state) {
if (XLA_FFI_PREDICT_TRUE(state.has_value())) {
XLA_FFI_State_Set_Args args;
args.struct_size = XLA_FFI_State_Set_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.ctx = ctx;
args.type_id = &T::id;
args.state = state.value().release();
args.deleter = +[](void* state) { delete reinterpret_cast<T*>(state); };
return api->XLA_FFI_State_Set(&args);
}
return internal::CreateError(api, state.error());
}
};
template <ExecutionStage stage>
struct ResultEncoding<stage, Future> {
static std::variant<XLA_FFI_Error*, XLA_FFI_Future*> Encode(
const XLA_FFI_Api* api, XLA_FFI_ExecutionContext* ctx, Future future) {
XLA_FFI_Future_Create_Args args;
args.struct_size = XLA_FFI_Future_Create_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.future = nullptr;
if (auto* err = api->XLA_FFI_Future_Create(&args)) {
return err;
}
assert(args.future != nullptr && "XLA_FFI_Future_Create failed");
future.OnReady([api, f = args.future](const std::optional<Error>& error) {
auto abort_on_error = [api](XLA_FFI_Error* err) {
if (XLA_FFI_PREDICT_TRUE(err == nullptr)) {
return;
}
std::cerr << "Failed to signal XLA_FFI_Future completion: "
<< internal::GetErrorMessage(api, err) << std::endl;
internal::DestroyError(api, err);
std::abort();
};
if (XLA_FFI_PREDICT_FALSE(error.has_value())) {
XLA_FFI_Future_SetError_Args args;
args.struct_size = XLA_FFI_Future_SetError_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.future = f;
args.error = internal::CreateError(api, *error);
abort_on_error(api->XLA_FFI_Future_SetError(&args));
} else {
XLA_FFI_Future_SetAvailable_Args args;
args.struct_size = XLA_FFI_Future_SetAvailable_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.future = f;
abort_on_error(api->XLA_FFI_Future_SetAvailable(&args));
}
});
return args.future;
}
};
template <typename T>
struct PlatformStream {};
template <typename T>
struct CtxDecoding<PlatformStream<T>> {
using Type = T;
static_assert(std::is_pointer_v<T>, "stream type must be a pointer");
static std::optional<Type> Decode(const XLA_FFI_Api* api,
XLA_FFI_ExecutionContext* ctx,
DiagnosticEngine& diagnostic) {
XLA_FFI_Stream_Get_Args args;
args.struct_size = XLA_FFI_Stream_Get_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.ctx = ctx;
args.stream = nullptr;
if (XLA_FFI_Error* error = api->XLA_FFI_Stream_Get(&args)) {
diagnostic.Emit("Failed to get platform stream: ")
<< internal::GetErrorMessage(api, error);
internal::DestroyError(api, error);
return std::nullopt;
}
return reinterpret_cast<T>(args.stream);
}
};
class ScratchAllocator {
public:
~ScratchAllocator();
ScratchAllocator(ScratchAllocator&&) = default;
ScratchAllocator& operator=(ScratchAllocator&&) = default;
std::optional<void*> Allocate(size_t size, size_t alignment = 1);
private:
friend struct CtxDecoding<ScratchAllocator>;
ScratchAllocator(const XLA_FFI_Api* api, XLA_FFI_ExecutionContext* ctx,
DiagnosticEngine& diagnostic);
struct Allocation {
size_t size;
void* data;
};
const XLA_FFI_Api* api_;
XLA_FFI_ExecutionContext* ctx_;
DiagnosticEngine& diagnostic_;
std::vector<Allocation> allocations_;
};
template <>
struct CtxDecoding<ScratchAllocator> {
using Type = ScratchAllocator;
static std::optional<Type> Decode(const XLA_FFI_Api* api,
XLA_FFI_ExecutionContext* ctx,
DiagnosticEngine& diagnostic) {
return ScratchAllocator(api, ctx, diagnostic);
}
};
inline std::optional<void*> ScratchAllocator::Allocate(size_t size,
size_t alignment) {
XLA_FFI_DeviceMemory_Allocate_Args args;
args.struct_size = XLA_FFI_DeviceMemory_Allocate_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.ctx = ctx_;
args.size = size;
args.alignment = alignment;
args.data = nullptr;
if (XLA_FFI_Error* error = api_->XLA_FFI_DeviceMemory_Allocate(&args)) {
diagnostic_.Emit("Failed to allocate scratch memory: ")
<< internal::GetErrorMessage(api_, error);
internal::DestroyError(api_, error);
return std::nullopt;
}
allocations_.push_back({size, args.data});
return args.data;
}
inline ScratchAllocator::ScratchAllocator(const XLA_FFI_Api* api,
XLA_FFI_ExecutionContext* ctx,
DiagnosticEngine& diagnostic)
: api_(api), ctx_(ctx), diagnostic_(diagnostic) {}
inline ScratchAllocator::~ScratchAllocator() {
for (Allocation& alloc : allocations_) {
XLA_FFI_DeviceMemory_Free_Args args;
args.struct_size = XLA_FFI_DeviceMemory_Free_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.ctx = ctx_;
args.size = alloc.size;
args.data = alloc.data;
if (XLA_FFI_Error* error = api_->XLA_FFI_DeviceMemory_Free(&args)) {
diagnostic_.Emit("Failed to free scratch memory: ")
<< internal::GetErrorMessage(api_, error);
internal::DestroyError(api_, error);
}
}
}
class ThreadPool {
public:
template <typename F>
void Schedule(F&& f) {
XLA_FFI_Task* task = +[](void* data) {
auto* f = reinterpret_cast<F*>(data);
(*f)();
delete f;
};
F* data = new F(std::forward<F>(f));
XLA_FFI_ThreadPool_Schedule_Args args;
args.struct_size = XLA_FFI_ThreadPool_Schedule_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.ctx = ctx_;
args.task = task;
args.data = data;
if (XLA_FFI_Error* error = api_->XLA_FFI_ThreadPool_Schedule(&args)) {
diagnostic_.Emit("Failed to schedule task on a thread pool: ")
<< internal::GetErrorMessage(api_, error);
internal::DestroyError(api_, error);
task(data);
}
}
private:
friend struct CtxDecoding<ThreadPool>;
ThreadPool(const XLA_FFI_Api* api, XLA_FFI_ExecutionContext* ctx,
DiagnosticEngine& diagnostic);
const XLA_FFI_Api* api_;
XLA_FFI_ExecutionContext* ctx_;
DiagnosticEngine& diagnostic_;
};
template <>
struct CtxDecoding<ThreadPool> {
using Type = ThreadPool;
static std::optional<Type> Decode(const XLA_FFI_Api* api,
XLA_FFI_ExecutionContext* ctx,
DiagnosticEngine& diagnostic) {
return ThreadPool(api, ctx, diagnostic);
}
};
inline ThreadPool::ThreadPool(const XLA_FFI_Api* api,
XLA_FFI_ExecutionContext* ctx,
DiagnosticEngine& diagnostic)
: api_(api), ctx_(ctx), diagnostic_(diagnostic) {}
namespace internal {
inline XLA_FFI_Error* RegisterType(const XLA_FFI_Api* api,
std::string_view name,
XLA_FFI_TypeId* type_id) {
XLA_FFI_TypeId_Register_Args args;
args.struct_size = XLA_FFI_TypeId_Register_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.name = XLA_FFI_ByteSpan{name.data(), name.size()};
args.type_id = type_id;
return api->XLA_FFI_TypeId_Register(&args);
}
}
#define XLA_FFI_REGISTER_TYPE(API, NAME, TYPE_ID) \
XLA_FFI_REGISTER_TYPE_(API, NAME, TYPE_ID, __COUNTER__)
#define XLA_FFI_REGISTER_TYPE_(API, NAME, TYPE_ID, N) \
XLA_FFI_REGISTER_TYPE__(API, NAME, TYPE_ID, N)
#define XLA_FFI_REGISTER_TYPE__(API, NAME, TYPE_ID, N) \
XLA_FFI_ATTRIBUTE_UNUSED static const XLA_FFI_Error* \
xla_ffi_type_##N##_registered_ = [] { \
return ::xla::ffi::internal::RegisterType(API, NAME, TYPE_ID); \
}()
template <typename T>
struct UserData {};
template <typename T>
struct CtxDecoding<UserData<T>> {
using Type = T*;
static_assert(std::is_same_v<decltype(T::id), TypeId>,
"UserData type must have a static `TypeId id` field");
static std::optional<Type> Decode(const XLA_FFI_Api* api,
XLA_FFI_ExecutionContext* ctx,
DiagnosticEngine& diagnostic) {
XLA_FFI_ExecutionContext_Get_Args args;
args.struct_size = XLA_FFI_ExecutionContext_Get_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.ctx = ctx;
args.type_id = &T::id;
args.data = nullptr;
assert(args.type_id->type_id > 0 && "type must be registered with XLA FFI");
if (XLA_FFI_Error* err = api->XLA_FFI_ExecutionContext_Get(&args); err) {
diagnostic.Emit("Failed to get user data from execution context: ")
<< internal::GetErrorMessage(api, err);
internal::DestroyError(api, err);
return std::nullopt;
}
return static_cast<Type>(args.data);
}
};
template <typename T>
struct State {};
template <typename T>
struct CtxDecoding<State<T>> {
using Type = T*;
static_assert(std::is_same_v<decltype(T::id), TypeId>,
"State type must have a static `TypeId id` field");
static std::optional<Type> Decode(const XLA_FFI_Api* api,
XLA_FFI_ExecutionContext* ctx,
DiagnosticEngine& diagnostic) {
XLA_FFI_State_Get_Args args;
args.struct_size = XLA_FFI_State_Get_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.ctx = ctx;
args.type_id = &T::id;
args.state = nullptr;
assert(args.type_id->type_id > 0 && "type must be registered with XLA FFI");
if (XLA_FFI_Error* err = api->XLA_FFI_State_Get(&args); err) {
diagnostic.Emit("Failed to get state from execution context: ")
<< internal::GetErrorMessage(api, err);
internal::DestroyError(api, err);
return std::nullopt;
}
return static_cast<Type>(args.state);
}
};
}
#endif | #include "xla/ffi/api/ffi.h"
#include <cstddef>
#include <cstdint>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/synchronization/blocking_counter.h"
#include "xla/ffi/api/c_api.h"
#include "xla/ffi/call_frame.h"
#include "xla/ffi/execution_context.h"
#include "xla/ffi/execution_state.h"
#include "xla/ffi/ffi_api.h"
#include "xla/ffi/type_id_registry.h"
#include "xla/primitive_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "xla/tsl/concurrency/chain.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
#include "tsl/platform/threadpool.h"
#define EIGEN_USE_THREADS
#include "unsupported/Eigen/CXX11/Tensor"
namespace xla::ffi {
enum class Int32BasedEnum : int32_t {
kOne = 1,
kTwo = 2,
};
static constexpr int64_t kI32MaxValue = std::numeric_limits<int32_t>::max();
enum class Int64BasedEnum : int64_t {
kOne = kI32MaxValue + 1,
kTwo = kI32MaxValue + 2,
};
}
XLA_FFI_REGISTER_ENUM_ATTR_DECODING(::xla::ffi::Int32BasedEnum);
XLA_FFI_REGISTER_ENUM_ATTR_DECODING(::xla::ffi::Int64BasedEnum);
namespace xla::ffi {
struct PairOfI32AndF32 {
int32_t i32;
float f32;
};
struct TupleOfI32 {
int32_t i32_0;
int32_t i32_1;
int32_t i32_2;
int32_t i32_3;
};
}
XLA_FFI_REGISTER_STRUCT_ATTR_DECODING(::xla::ffi::PairOfI32AndF32,
::xla::ffi::StructMember<int32_t>("i32"),
::xla::ffi::StructMember<float>("f32"));
XLA_FFI_REGISTER_STRUCT_ATTR_DECODING(
::xla::ffi::TupleOfI32, ::xla::ffi::StructMember<int32_t>("i32_0"),
::xla::ffi::StructMember<int32_t>("i32_1"),
::xla::ffi::StructMember<int32_t>("i32_2"),
::xla::ffi::StructMember<int32_t>("i32_3"));
namespace xla::ffi {
using ::testing::HasSubstr;
using ::tsl::testing::StatusIs;
TEST(FfiTest, DataTypeEnumValue) {
auto encoded = [](auto value) { return static_cast<uint8_t>(value); };
EXPECT_EQ(encoded(PrimitiveType::PRED), encoded(DataType::PRED));
EXPECT_EQ(encoded(PrimitiveType::S8), encoded(DataType::S8));
EXPECT_EQ(encoded(PrimitiveType::S16), encoded(DataType::S16));
EXPECT_EQ(encoded(PrimitiveType::S32), encoded(DataType::S32));
EXPECT_EQ(encoded(PrimitiveType::S64), encoded(DataType::S64));
EXPECT_EQ(encoded(PrimitiveType::U8), encoded(DataType::U8));
EXPECT_EQ(encoded(PrimitiveType::U16), encoded(DataType::U16));
EXPECT_EQ(encoded(PrimitiveType::U32), encoded(DataType::U32));
EXPECT_EQ(encoded(PrimitiveType::U64), encoded(DataType::U64));
EXPECT_EQ(encoded(PrimitiveType::F16), encoded(DataType::F16));
EXPECT_EQ(encoded(PrimitiveType::F32), encoded(DataType::F32));
EXPECT_EQ(encoded(PrimitiveType::F64), encoded(DataType::F64));
EXPECT_EQ(encoded(PrimitiveType::BF16), encoded(DataType::BF16));
EXPECT_EQ(encoded(PrimitiveType::C64), encoded(DataType::C64));
EXPECT_EQ(encoded(PrimitiveType::C128), encoded(DataType::C128));
EXPECT_EQ(encoded(PrimitiveType::TOKEN), encoded(DataType::TOKEN));
EXPECT_EQ(encoded(PrimitiveType::F8E5M2), encoded(DataType::F8E5M2));
EXPECT_EQ(encoded(PrimitiveType::F8E4M3), encoded(DataType::F8E4M3));
EXPECT_EQ(encoded(PrimitiveType::F8E4M3FN), encoded(DataType::F8E4M3FN));
EXPECT_EQ(encoded(PrimitiveType::F8E4M3B11FNUZ),
encoded(DataType::F8E4M3B11FNUZ));
EXPECT_EQ(encoded(PrimitiveType::F8E5M2FNUZ), encoded(DataType::F8E5M2FNUZ));
EXPECT_EQ(encoded(PrimitiveType::F8E4M3FNUZ), encoded(DataType::F8E4M3FNUZ));
EXPECT_EQ(encoded(PrimitiveType::F8E3M4), encoded(DataType::F8E3M4));
}
TEST(FfiTest, DataTypeByteWidth) {
EXPECT_EQ(0, ByteWidth(DataType::TOKEN));
EXPECT_EQ(0, ByteWidth(DataType::INVALID));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::PRED),
ByteWidth(DataType::PRED));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::S8),
ByteWidth(DataType::S8));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::S16),
ByteWidth(DataType::S16));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::S32),
ByteWidth(DataType::S32));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::S64),
ByteWidth(DataType::S64));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::U8),
ByteWidth(DataType::U8));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::U16),
ByteWidth(DataType::U16));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::U32),
ByteWidth(DataType::U32));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::U64),
ByteWidth(DataType::U64));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::F16),
ByteWidth(DataType::F16));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::F32),
ByteWidth(DataType::F32));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::F64),
ByteWidth(DataType::F64));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::BF16),
ByteWidth(DataType::BF16));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::C64),
ByteWidth(DataType::C64));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::C128),
ByteWidth(DataType::C128));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::F8E5M2),
ByteWidth(DataType::F8E5M2));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::F8E4M3),
ByteWidth(DataType::F8E4M3));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::F8E4M3FN),
ByteWidth(DataType::F8E4M3FN));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::F8E4M3B11FNUZ),
ByteWidth(DataType::F8E4M3B11FNUZ));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::F8E5M2FNUZ),
ByteWidth(DataType::F8E5M2FNUZ));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::F8E4M3FNUZ),
ByteWidth(DataType::F8E4M3FNUZ));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::F8E3M4),
ByteWidth(DataType::F8E3M4));
}
TEST(FfiTest, ErrorEnumValue) {
auto encoded = [](auto value) { return static_cast<uint8_t>(value); };
EXPECT_EQ(encoded(absl::StatusCode::kOk), encoded(ErrorCode::kOk));
EXPECT_EQ(encoded(absl::StatusCode::kCancelled),
encoded(ErrorCode::kCancelled));
EXPECT_EQ(encoded(absl::StatusCode::kUnknown), encoded(ErrorCode::kUnknown));
EXPECT_EQ(encoded(absl::StatusCode::kInvalidArgument),
encoded(ErrorCode::kInvalidArgument));
EXPECT_EQ(encoded(absl::StatusCode::kNotFound),
encoded(ErrorCode::kNotFound));
EXPECT_EQ(encoded(absl::StatusCode::kAlreadyExists),
encoded(ErrorCode::kAlreadyExists));
EXPECT_EQ(encoded(absl::StatusCode::kPermissionDenied),
encoded(ErrorCode::kPermissionDenied));
EXPECT_EQ(encoded(absl::StatusCode::kResourceExhausted),
encoded(ErrorCode::kResourceExhausted));
EXPECT_EQ(encoded(absl::StatusCode::kFailedPrecondition),
encoded(ErrorCode::kFailedPrecondition));
EXPECT_EQ(encoded(absl::StatusCode::kAborted), encoded(ErrorCode::kAborted));
EXPECT_EQ(encoded(absl::StatusCode::kOutOfRange),
encoded(ErrorCode::kOutOfRange));
EXPECT_EQ(encoded(absl::StatusCode::kUnimplemented),
encoded(ErrorCode::kUnimplemented));
EXPECT_EQ(encoded(absl::StatusCode::kInternal),
encoded(ErrorCode::kInternal));
EXPECT_EQ(encoded(absl::StatusCode::kUnavailable),
encoded(ErrorCode::kUnavailable));
EXPECT_EQ(encoded(absl::StatusCode::kDataLoss),
encoded(ErrorCode::kDataLoss));
EXPECT_EQ(encoded(absl::StatusCode::kUnauthenticated),
encoded(ErrorCode::kUnauthenticated));
}
TEST(FfiTest, Expected) {
ErrorOr<int32_t> value(42);
EXPECT_TRUE(value.has_value());
EXPECT_FALSE(value.has_error());
EXPECT_EQ(*value, 42);
ErrorOr<int32_t> error(Error(ErrorCode::kInternal, "Test error"));
EXPECT_FALSE(error.has_value());
EXPECT_TRUE(error.has_error());
EXPECT_THAT(error.error().message(), HasSubstr("Test error"));
}
TEST(FfiTest, FutureSetAvailable) {
Promise promise;
Future future(promise);
promise.SetAvailable();
future.OnReady([](const std::optional<Error>& error) {
EXPECT_FALSE(error.has_value());
});
}
TEST(FfiTest, FutureSetError) {
Promise promise;
Future future(promise);
promise.SetError(Error(ErrorCode::kInternal, "Test error"));
future.OnReady([](const std::optional<Error>& error) {
EXPECT_TRUE(error.has_value());
EXPECT_THAT(error->message(), HasSubstr("Test error"));
});
}
TEST(FfiTest, FutureSetAvailableFromThreadPool) {
tsl::thread::ThreadPool pool(tsl::Env::Default(), "ffi-test", 2);
Promise promise;
Future future(promise);
int32_t value = 0;
absl::BlockingCounter counter(1);
future.OnReady([&](const std::optional<Error>& error) {
EXPECT_FALSE(error.has_value());
EXPECT_EQ(value, 42);
counter.DecrementCount();
});
pool.Schedule([&]() {
value = 42;
promise.SetAvailable();
});
counter.Wait();
}
TEST(FfiTest, FutureSetErrorFromThreadPool) {
tsl::thread::ThreadPool pool(tsl::Env::Default(), "ffi-test", 2);
Promise promise;
Future future(promise);
int32_t value = 0;
absl::BlockingCounter counter(1);
future.OnReady([&](const std::optional<Error>& error) {
EXPECT_TRUE(error.has_value());
EXPECT_THAT(error->message(), HasSubstr("Test error"));
EXPECT_EQ(value, 42);
counter.DecrementCount();
});
pool.Schedule([&]() {
value = 42;
promise.SetError(Error(ErrorCode::kInternal, "Test error"));
});
counter.Wait();
}
TEST(FfiTest, FutureRace) {
tsl::thread::ThreadPool pool(tsl::Env::Default(), "ffi-test", 2);
for (int32_t i = 0; i < 1000; ++i) {
Promise promise;
Future future(promise);
absl::BlockingCounter counter(1);
pool.Schedule([&]() { promise.SetAvailable(); });
pool.Schedule([&]() {
future.OnReady([&](const std::optional<Error>& error) {
EXPECT_FALSE(error.has_value());
counter.DecrementCount();
});
});
counter.Wait();
}
}
TEST(FfiTest, ReturnError) {
CallFrameBuilder builder(0, 0);
auto call_frame = builder.Build();
auto handler = Ffi::Bind().To(
[]() { return Error(ErrorCode::kInternal, "Test error"); });
auto status = Call(*handler, call_frame);
EXPECT_EQ(status, absl::InternalError("Test error"));
}
TEST(FfiTest, AnyBufferArgument) {
std::vector<float> storage(4, 0.0f);
se::DeviceMemoryBase memory(storage.data(), 4 * sizeof(float));
CallFrameBuilder builder(1, 0);
builder.AddBufferArg(memory, PrimitiveType::F32, {2, 2});
auto call_frame = builder.Build();
auto handler = Ffi::Bind().Arg<AnyBuffer>().To([&](auto buffer) {
EXPECT_EQ(buffer.untyped_data(), storage.data());
EXPECT_EQ(buffer.dimensions().size(), 2);
return Error::Success();
});
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, BufferArgument) {
std::vector<float> storage(4, 0.0f);
se::DeviceMemoryBase memory(storage.data(), 4 * sizeof(float));
CallFrameBuilder builder(1, 0);
builder.AddBufferArg(memory, PrimitiveType::F32, {2, 2});
auto call_frame = builder.Build();
auto handler = Ffi::Bind().Arg<BufferR2<F32>>().To([&](auto buffer) {
EXPECT_EQ(buffer.typed_data(), storage.data());
EXPECT_EQ(buffer.dimensions().size(), 2);
return Error::Success();
});
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, AnyBufferResult) {
std::vector<float> storage(4, 0.0f);
se::DeviceMemoryBase memory(storage.data(), 4 * sizeof(float));
CallFrameBuilder builder(0, 1);
builder.AddBufferRet(memory, PrimitiveType::F32, {2, 2});
auto call_frame = builder.Build();
auto handler = Ffi::Bind().Ret<AnyBuffer>().To([&](Result<AnyBuffer> buffer) {
EXPECT_EQ(buffer->untyped_data(), storage.data());
EXPECT_EQ(buffer->dimensions().size(), 2);
return Error::Success();
});
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, MissingBufferArgument) {
CallFrameBuilder builder(0, 0);
auto call_frame = builder.Build();
auto handler = Ffi::Bind().Arg<BufferR1<F32>>().To(
[](auto) { return Error::Success(); });
auto status = Call(*handler, call_frame);
EXPECT_THAT(status, StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Wrong number of arguments")));
}
TEST(FfiTest, WrongRankBufferArgument) {
std::vector<int32_t> storage(4, 0.0);
se::DeviceMemoryBase memory(storage.data(), 4 * sizeof(int32_t));
CallFrameBuilder builder(1, 0);
builder.AddBufferArg(memory, PrimitiveType::F32, {2, 2});
auto call_frame = builder.Build();
auto handler = Ffi::Bind().Arg<BufferR1<F32>>().To(
[](auto) { return Error::Success(); });
auto status = Call(*handler, call_frame);
EXPECT_THAT(status,
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Wrong buffer rank: expected 1 but got 2")));
}
TEST(FfiTest, WrongTypeBufferArgument) {
std::vector<int32_t> storage(4, 0.0);
se::DeviceMemoryBase memory(storage.data(), 4 * sizeof(int32_t));
CallFrameBuilder builder(1, 0);
builder.AddBufferArg(memory, PrimitiveType::S32, {2, 2});
auto call_frame = builder.Build();
auto handler = Ffi::Bind().Arg<BufferR2<F32>>().To(
[](auto) { return Error::Success(); });
auto status = Call(*handler, call_frame);
EXPECT_THAT(
status,
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Wrong buffer dtype: expected F32 but got S32")));
}
TEST(FfiTest, TokenArgument) {
CallFrameBuilder builder(1, 0);
builder.AddBufferArg(se::DeviceMemoryBase(), PrimitiveType::TOKEN,
{});
auto call_frame = builder.Build();
auto fn = [&](Token tok) {
EXPECT_EQ(tok.typed_data(), nullptr);
EXPECT_EQ(tok.dimensions().size(), 0);
return Error::Success();
};
auto handler = Ffi::Bind().Arg<Token>().To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, RemainingArgs) {
std::vector<float> storage(4, 0.0f);
se::DeviceMemoryBase memory(storage.data(), 4 * sizeof(float));
CallFrameBuilder builder(1, 0);
builder.AddBufferArg(memory, PrimitiveType::F32, {2, 2});
auto call_frame = builder.Build();
auto fn = [&](RemainingArgs args) {
EXPECT_EQ(args.size(), 1);
ErrorOr<AnyBuffer> arg0 = args.get<AnyBuffer>(0);
ErrorOr<AnyBuffer> arg1 = args.get<AnyBuffer>(1);
EXPECT_TRUE(arg0.has_value());
EXPECT_FALSE(arg1.has_value());
return Error::Success();
};
auto handler = Ffi::Bind().RemainingArgs().To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, RemainingRets) {
std::vector<float> storage(4, 0.0f);
se::DeviceMemoryBase memory(storage.data(), 4 * sizeof(float));
CallFrameBuilder builder(0, 2);
builder.AddBufferRet(memory, PrimitiveType::F32, {2, 2});
builder.AddBufferRet(memory, PrimitiveType::F32, {2, 2});
auto call_frame = builder.Build();
auto fn = [&](Result<AnyBuffer> ret, RemainingRets rets) {
EXPECT_EQ(rets.size(), 1);
ErrorOr<Result<AnyBuffer>> ret0 = rets.get<AnyBuffer>(0);
ErrorOr<Result<AnyBuffer>> ret1 = rets.get<AnyBuffer>(1);
EXPECT_TRUE(ret0.has_value());
EXPECT_FALSE(ret1.has_value());
return Error::Success();
};
auto handler = Ffi::Bind().Ret<AnyBuffer>().RemainingRets().To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, OptionalArgs) {
std::vector<float> storage(4, 0.0f);
se::DeviceMemoryBase memory(storage.data(), 4 * sizeof(float));
CallFrameBuilder builder(1, 0);
builder.AddBufferArg(memory, PrimitiveType::F32, {2, 2});
auto call_frame = builder.Build();
{
auto fn = [&](std::optional<AnyBuffer> arg0) {
EXPECT_TRUE(arg0.has_value());
return Error::Success();
};
auto handler = Ffi::Bind().OptionalArg<AnyBuffer>().To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
{
auto fn = [&](std::optional<AnyBuffer> arg0,
std::optional<AnyBuffer> arg1) {
EXPECT_TRUE(arg0.has_value());
EXPECT_FALSE(arg1.has_value());
return Error::Success();
};
auto handler =
Ffi::Bind().OptionalArg<AnyBuffer>().OptionalArg<AnyBuffer>().To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
{
auto fn = [&](AnyBuffer arg0, std::optional<AnyBuffer> arg1) {
EXPECT_FALSE(arg1.has_value());
return Error::Success();
};
auto handler = Ffi::Bind().Arg<AnyBuffer>().OptionalArg<AnyBuffer>().To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
{
auto fn = [&](std::optional<AnyBuffer> arg0, RemainingArgs args) {
EXPECT_TRUE(arg0.has_value());
EXPECT_EQ(args.size(), 0);
return Error::Success();
};
auto handler = Ffi::Bind().OptionalArg<AnyBuffer>().RemainingArgs().To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
}
TEST(FfiTest, OptionalRets) {
std::vector<float> storage(4, 0.0f);
se::DeviceMemoryBase memory(storage.data(), 4 * sizeof(float));
CallFrameBuilder builder(0, 1);
builder.AddBufferRet(memory, PrimitiveType::F32, {2, 2});
auto call_frame = builder.Build();
{
auto fn = [&](std::optional<Result<AnyBuffer>> ret0) {
EXPECT_TRUE(ret0.has_value());
return Error::Success();
};
auto handler = Ffi::Bind().OptionalRet<AnyBuffer>().To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
{
auto fn = [&](std::optional<Result<AnyBuffer>> ret0,
std::optional<Result<AnyBuffer>> ret1) {
EXPECT_TRUE(ret0.has_value());
EXPECT_FALSE(ret1.has_value());
return Error::Success();
};
auto handler =
Ffi::Bind().OptionalRet<AnyBuffer>().OptionalRet<AnyBuffer>().To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
{
auto fn = [&](Result<AnyBuffer> ret0,
std::optional<Result<AnyBuffer>> ret1) {
EXPECT_FALSE(ret1.has_value());
return Error::Success();
};
auto handler = Ffi::Bind().Ret<AnyBuffer>().OptionalRet<AnyBuffer>().To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
{
auto fn = [&](std::optional<Result<AnyBuffer>> ret0, RemainingRets rets) {
EXPECT_TRUE(ret0.has_value());
EXPECT_EQ(rets.size(), 0);
return Error::Success();
};
auto handler = Ffi::Bind().OptionalRet<AnyBuffer>().RemainingRets().To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
}
TEST(FfiTest, AutoBinding) {
static constexpr char kI32[] = "i32";
auto handler = Ffi::BindTo(+[](AnyBuffer buffer, Attr<int32_t, kI32> foo) {
EXPECT_EQ(*foo, 42);
return Error::Success();
});
std::vector<float> storage(4, 0.0f);
se::DeviceMemoryBase memory(storage.data(), 4 * sizeof(float));
CallFrameBuilder::AttributesBuilder attrs;
attrs.Insert(kI32, 42);
CallFrameBuilder builder(1, 0);
builder.AddBufferArg(memory, PrimitiveType::F32, {2, 2});
builder.AddAttributes(attrs.Build());
auto call_frame = builder.Build();
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, AutoBindingResult) {
auto handler =
Ffi::BindTo(+[](Result<AnyBuffer> buffer) { return Error::Success(); });
CallFrameBuilder builder(0, 1);
builder.AddBufferRet(se::DeviceMemoryBase(), PrimitiveType::F32, {});
auto call_frame = builder.Build();
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, AutoBindingStructs) {
auto handler = Ffi::BindTo(+[](PairOfI32AndF32 attrs) {
EXPECT_EQ(attrs.i32, 42);
EXPECT_EQ(attrs.f32, 42.0f);
return Error::Success();
});
CallFrameBuilder::AttributesBuilder attrs;
attrs.Insert("i32", 42);
attrs.Insert("f32", 42.0f);
CallFrameBuilder builder(0, 0);
builder.AddAttributes(attrs.Build());
auto call_frame = builder.Build();
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, AutoBindingDictionary) {
auto handler = Ffi::BindTo(+[](Dictionary attrs) {
EXPECT_EQ(*attrs.get<int32_t>("i32"), 42);
EXPECT_EQ(*attrs.get<float>("f32"), 42.0f);
return Error::Success();
});
CallFrameBuilder::AttributesBuilder attrs;
attrs.Insert("i32", 42);
attrs.Insert("f32", 42.0f);
CallFrameBuilder builder(0, 0);
builder.AddAttributes(attrs.Build());
auto call_frame = builder.Build();
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
struct TestStreamSt;
using TestStream = TestStreamSt*;
template <>
struct CtxBinding<TestStream> {
using Ctx = PlatformStream<TestStream>;
};
TEST(FfiTest, BindingPlatformStreamInference) {
(void)Ffi::BindTo(+[](TestStream stream) { return Error::Success(); });
}
TEST(FfiTest, ArrayAttr) {
CallFrameBuilder::AttributesBuilder attrs;
attrs.Insert("arr0", std::vector<int8_t>({1, 2, 3, 4}));
attrs.Insert("arr1", std::vector<int16_t>({1, 2, 3, 4}));
attrs.Insert("arr2", std::vector<int32_t>({1, 2, 3, 4}));
attrs.Insert("arr3", std::vector<int64_t>({1, 2, 3, 4}));
attrs.Insert("arr4", std::vector<uint8_t>({1, 2, 3, 4}));
attrs.Insert("arr5", std::vector<uint16_t>({1, 2, 3, 4}));
attrs.Insert("arr6", std::vector<uint32_t>({1, 2, 3, 4}));
attrs.Insert("arr7", std::vector<uint64_t>({1, 2, 3, 4}));
attrs.Insert("arr8", std::vector<float>({1, 2, 3, 4}));
attrs.Insert("arr9", std::vector<double>({1, 2, 3, 4}));
CallFrameBuilder builder(0, 0);
builder.AddAttributes(attrs.Build());
auto call_frame = builder.Build();
auto fn = [&](auto arr0, auto arr1, auto arr2, auto arr3, auto arr4,
auto arr5, auto arr6, auto arr7, auto arr8, auto arr9) {
EXPECT_EQ(arr0, Span<const int8_t>({1, 2, 3, 4}));
EXPECT_EQ(arr1, Span<const int16_t>({1, 2, 3, 4}));
EXPECT_EQ(arr2, Span<const int32_t>({1, 2, 3, 4}));
EXPECT_EQ(arr3, Span<const int64_t>({1, 2, 3, 4}));
EXPECT_EQ(arr4, Span<const uint8_t>({1, 2, 3, 4}));
EXPECT_EQ(arr5, Span<const uint16_t>({1, 2, 3, 4}));
EXPECT_EQ(arr6, Span<const uint32_t>({1, 2, 3, 4}));
EXPECT_EQ(arr7, Span<const uint64_t>({1, 2, 3, 4}));
EXPECT_EQ(arr8, Span<const float>({1, 2, 3, 4}));
EXPECT_EQ(arr9, Span<const double>({1, 2, 3, 4}));
return Error::Success();
};
auto handler = Ffi::Bind()
.Attr<Span<const int8_t>>("arr0")
.Attr<Span<const int16_t>>("arr1")
.Attr<Span<const int32_t>>("arr2")
.Attr<Span<const int64_t>>("arr3")
.Attr<Span<const uint8_t>>("arr4")
.Attr<Span<const uint16_t>>("arr5")
.Attr<Span<const uint32_t>>("arr6")
.Attr<Span<const uint64_t>>("arr7")
.Attr<Span<const float>>("arr8")
.Attr<Span<const double>>("arr9")
.To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, AttrsAsDictionary) {
CallFrameBuilder::AttributesBuilder attrs;
attrs.Insert("i32", 42);
attrs.Insert("f32", 42.0f);
attrs.Insert("str", "foo");
CallFrameBuilder builder(0, 0);
builder.AddAttributes(attrs.Build());
auto call_frame = builder.Build();
auto fn = [&](Dictionary dict) {
EXPECT_EQ(dict.size(), 3);
EXPECT_TRUE(dict.contains("i32"));
EXPECT_TRUE(dict.contains("f32"));
EXPECT_TRUE(dict.contains("str"));
ErrorOr<int32_t> i32 = dict.get<int32_t>("i32");
ErrorOr<float> f32 = dict.get<float>("f32");
ErrorOr<std::string_view> str = dict.get<std::string_view>("str");
EXPECT_TRUE(i32.has_value());
EXPECT_TRUE(f32.has_value());
EXPECT_TRUE(str.has_value());
if (i32.has_value()) EXPECT_EQ(*i32, 42);
if (f32.has_value()) EXPECT_EQ(*f32, 42.0f);
if (str.has_value()) EXPECT_EQ(*str, "foo");
EXPECT_FALSE(dict.contains("i64"));
EXPECT_FALSE(dict.get<int64_t>("i32").has_value());
EXPECT_FALSE(dict.get<int64_t>("i64").has_value());
return Error::Success();
};
auto handler = Ffi::Bind().Attrs().To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, DictionaryAttr) {
CallFrameBuilder::AttributesMap dict0;
dict0.try_emplace("i32", 42);
CallFrameBuilder::AttributesMap dict1;
dict1.try_emplace("f32", 42.0f);
CallFrameBuilder::AttributesBuilder attrs;
attrs.Insert("dict0", dict0);
attrs.Insert("dict1", dict1);
CallFrameBuilder builder(0, 0);
builder.AddAttributes(attrs.Build());
auto call_frame = builder.Build();
auto fn = [&](Dictionary dict0, Dictionary dict1) {
EXPECT_EQ(dict0.size(), 1);
EXPECT_EQ(dict1.size(), 1);
EXPECT_TRUE(dict0.contains("i32"));
EXPECT_TRUE(dict1.contains("f32"));
ErrorOr<int32_t> i32 = dict0.get<int32_t>("i32");
ErrorOr<float> f32 = dict1.get<float>("f32");
EXPECT_TRUE(i32.has_value());
EXPECT_TRUE(f32.has_value());
if (i32.has_value()) EXPECT_EQ(*i32, 42);
if (f32.has_value()) EXPECT_EQ(*f32, 42.0f);
return Error::Success();
};
auto handler =
Ffi::Bind().Attr<Dictionary>("dict0").Attr<Dictionary>("dict1").To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, StructAttr) {
CallFrameBuilder::AttributesMap dict;
dict.try_emplace("i32", 42);
dict.try_emplace("f32", 42.0f);
CallFrameBuilder::AttributesBuilder attrs;
attrs.Insert("str", "foo");
attrs.Insert("i32_and_f32", dict);
CallFrameBuilder builder(0, 0);
builder.AddAttributes(attrs.Build());
auto call_frame = builder.Build();
auto fn = [&](std::string_view str, PairOfI32AndF32 i32_and_f32) {
EXPECT_EQ(str, "foo");
EXPECT_EQ(i32_and_f32.i32, 42);
EXPECT_EQ(i32_and_f32.f32, 42.0f);
return Error::Success();
};
auto handler = Ffi::Bind()
.Attr<std::string_view>("str")
.Attr<PairOfI32AndF32>("i32_and_f32")
.To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, AttrsAsStruct) {
CallFrameBuilder::AttributesBuilder attrs;
attrs.Insert("i32", 42);
attrs.Insert("f32", 42.0f);
CallFrameBuilder builder(0, 0);
builder.AddAttributes(attrs.Build());
auto call_frame = builder.Build();
auto fn = [&](PairOfI32AndF32 i32_and_f32) {
EXPECT_EQ(i32_and_f32.i32, 42);
EXPECT_EQ(i32_and_f32.f32, 42.0f);
return Error::Success();
};
auto handler = Ffi::Bind().Attrs<PairOfI32AndF32>().To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, PointerAttr) {
std::string foo = "foo";
auto ptr = reinterpret_cast<uintptr_t>(&foo);
static_assert(sizeof(ptr) == sizeof(int64_t));
CallFrameBuilder::AttributesBuilder attrs;
attrs.Insert("ptr", static_cast<int64_t>(ptr));
CallFrameBuilder builder(0, 0);
builder.AddAttributes(attrs.Build());
auto call_frame = builder.Build();
auto fn = [&](const std::string* str) {
EXPECT_EQ(*str, "foo");
return Error::Success();
};
auto handler = Ffi::Bind().Attr<Pointer<std::string>>("ptr").To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, EnumAttr) {
CallFrameBuilder::AttributesBuilder attrs;
attrs.Insert("i32_one", static_cast<std::underlying_type_t<Int32BasedEnum>>(
Int32BasedEnum::kOne));
attrs.Insert("i32_two", static_cast<std::underlying_type_t<Int32BasedEnum>>(
Int32BasedEnum::kTwo));
attrs.Insert("i64_one", static_cast<std::underlying_type_t<Int64BasedEnum>>(
Int64BasedEnum::kOne));
attrs.Insert("i64_two", static_cast<std::underlying_type_t<Int64BasedEnum>>(
Int64BasedEnum::kTwo));
CallFrameBuilder builder(0, 0);
builder.AddAttributes(attrs.Build());
auto call_frame = builder.Build();
auto fn = [&](Int32BasedEnum i32_one, Int32BasedEnum i32_two,
Int64BasedEnum i64_one, Int64BasedEnum i64_two) {
EXPECT_EQ(i32_one, Int32BasedEnum::kOne);
EXPECT_EQ(i32_two, Int32BasedEnum::kTwo);
EXPECT_EQ(i64_one, Int64BasedEnum::kOne);
EXPECT_EQ(i64_two, Int64BasedEnum::kTwo);
return Error::Success();
};
auto handler = Ffi::Bind()
.Attr<Int32BasedEnum>("i32_one")
.Attr<Int32BasedEnum>("i32_two")
.Attr<Int64BasedEnum>("i64_one")
.Attr<Int64BasedEnum>("i64_two")
.To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, WrongEnumAttrType) {
CallFrameBuilder::AttributesMap dict;
dict.try_emplace("i32", 42);
CallFrameBuilder::AttributesBuilder attrs;
attrs.Insert("i32_enum1", dict);
attrs.Insert("i32_enum0", 42u);
CallFrameBuilder builder(0, 0);
builder.AddAttributes(attrs.Build());
auto call_frame = builder.Build();
auto fn = [](Int32BasedEnum, Int32BasedEnum) { return Error::Success(); };
auto handler = Ffi::Bind()
.Attr<Int32BasedEnum>("i32_enum0")
.Attr<Int32BasedEnum>("i32_enum1")
.To(fn);
auto status = Call(*handler, call_frame);
EXPECT_TRUE(absl::StrContains(
status.message(),
"Failed to decode all FFI handler operands (bad operands at: 0, 1)"))
<< "status.message():\n"
<< status.message() << "\n";
EXPECT_TRUE(absl::StrContains(status.message(),
"Wrong scalar data type: expected S32 but got"))
<< "status.message():\n"
<< status.message() << "\n";
EXPECT_TRUE(absl::StrContains(
status.message(),
"Wrong attribute type: expected scalar but got dictionary"))
<< "status.message():\n"
<< status.message() << "\n";
}
struct MyData {
static TypeId id;
std::string str;
};
TypeId MyData::id = {};
XLA_FFI_REGISTER_TYPE(GetXlaFfiApi(), "my_data", &MyData::id);
TEST(FfiTest, UserData) {
MyData data{"foo"};
ExecutionContext execution_context;
TF_ASSERT_OK(execution_context.Insert(
TypeIdRegistry::TypeId(MyData::id.type_id), &data));
CallFrameBuilder builder(0, 0);
auto call_frame = builder.Build();
auto fn = [&](MyData* data) {
EXPECT_EQ(data->str, "foo");
return Error::Success();
};
auto handler = Ffi::Bind().Ctx<UserData<MyData>>().To(fn);
CallOptions options;
options.execution_context = &execution_context;
auto status = Call(*handler, call_frame, options);
TF_ASSERT_OK(status);
}
struct MyState {
static TypeId id;
explicit MyState(int32_t value) : value(value) {}
int32_t value;
};
TypeId MyState::id = {};
XLA_FFI_REGISTER_TYPE(GetXlaFfiApi(), "state", &MyState::id);
TEST(FfiTest, StatefulHandler) {
ExecutionState execution_state;
CallFrameBuilder builder(0, 0);
auto call_frame = builder.Build();
CallOptions options;
options.execution_state = &execution_state;
auto instantiate =
Ffi::BindInstantiate().To([]() -> ErrorOr<std::unique_ptr<MyState>> {
return std::make_unique<MyState>(42);
});
auto execute = Ffi::Bind().Ctx<State<MyState>>().To([](MyState* state) {
EXPECT_EQ(state->value, 42);
return Error::Success();
});
TF_ASSERT_OK(
Call(*instantiate, call_frame, options, ExecutionStage::kInstantiate));
TF_ASSERT_OK(Call(*execute, call_frame, options));
}
TEST(FfiTest, ScratchAllocator) {
static void* kAddr = reinterpret_cast<void*>(0xDEADBEEF);
struct TestDeviceMemoryAllocator final : public se::DeviceMemoryAllocator {
size_t count;
TestDeviceMemoryAllocator()
: se::DeviceMemoryAllocator(nullptr), count(0) {}
absl::StatusOr<se::OwningDeviceMemory> Allocate(int, uint64_t size, bool,
int64_t) final {
count++;
return se::OwningDeviceMemory(se::DeviceMemoryBase(kAddr, size), 0, this);
}
absl::Status Deallocate(int, se::DeviceMemoryBase mem) final {
count--;
EXPECT_EQ(mem.opaque(), kAddr);
return absl::OkStatus();
}
absl::StatusOr<se::Stream*> GetStream(int) final {
return absl::UnimplementedError("Not implemented");
}
};
auto fn = [&](ScratchAllocator scratch_allocator) {
auto mem = scratch_allocator.Allocate(1024);
EXPECT_EQ(*mem, kAddr);
return Error::Success();
};
TestDeviceMemoryAllocator allocator;
auto handler = Ffi::Bind().Ctx<ScratchAllocator>().To(fn);
CallFrame call_frame =
CallFrameBuilder(0, 0).Build();
CallOptions options;
options.backend_options = CallOptions::GpuOptions{nullptr, &allocator};
auto status = Call(*handler, call_frame, options);
TF_ASSERT_OK(status);
EXPECT_EQ(allocator.count, 0);
}
TEST(FfiTest, ScratchAllocatorUnimplemented) {
auto fn = [&](ScratchAllocator scratch_allocator) {
auto mem = scratch_allocator.Allocate(1024);
EXPECT_FALSE(mem.has_value());
return Error::Success();
};
auto handler = Ffi::Bind().Ctx<ScratchAllocator>().To(fn);
CallFrame call_frame =
CallFrameBuilder(0, 0).Build();
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, ThreadPool) {
tsl::thread::ThreadPool pool(tsl::Env::Default(), "ffi-test", 2);
Eigen::ThreadPoolDevice device(pool.AsEigenThreadPool(), pool.NumThreads());
auto fn = [&](ThreadPool thread_pool) {
absl::BlockingCounter prepare(1);
absl::BlockingCounter execute(1);
thread_pool.Schedule([&] {
prepare.Wait();
execute.DecrementCount();
});
prepare.DecrementCount();
execute.Wait();
return Error::Success();
};
auto handler = Ffi::Bind().Ctx<ThreadPool>().To(fn);
CallFrame call_frame =
CallFrameBuilder(0, 0).Build();
CallOptions options;
options.backend_options = CallOptions::CpuOptions{&device};
auto status = Call(*handler, call_frame, options);
TF_ASSERT_OK(status);
}
TEST(FfiTest, AsyncHandler) {
tsl::thread::ThreadPool pool(tsl::Env::Default(), "ffi-test", 2);
Eigen::ThreadPoolDevice device(pool.AsEigenThreadPool(), pool.NumThreads());
int32_t value = 0;
auto fn = [&](ThreadPool thread_pool) -> Future {
Promise promise;
Future future(promise);
thread_pool.Schedule([&, promise = std::move(promise)]() mutable {
value = 42;
promise.SetAvailable();
});
return future;
};
auto handler = Ffi::Bind().Ctx<ThreadPool>().To(fn);
CallFrame call_frame =
CallFrameBuilder(0, 0).Build();
CallOptions options;
options.backend_options = CallOptions::CpuOptions{&device};
{
absl::Status status = Call(*handler, call_frame, options);
TF_ASSERT_OK(status);
EXPECT_EQ(value, 42);
}
value = 0;
{
tsl::AsyncValueRef<tsl::Chain> async_value =
CallAsync(*handler, call_frame, options);
tsl::BlockUntilReady(async_value);
ASSERT_TRUE(async_value.IsConcrete());
EXPECT_EQ(value, 42);
}
}
TEST(FfiTest, Metadata) {
auto api = GetXlaFfiApi();
auto handler = Ffi::BindTo([]() { return Error::Success(); });
auto maybe_metadata = GetMetadata(*handler);
EXPECT_TRUE(maybe_metadata.ok());
auto metadata = maybe_metadata.value();
EXPECT_EQ(metadata.api_version.major_version, api->api_version.major_version);
EXPECT_EQ(metadata.api_version.minor_version, api->api_version.minor_version);
EXPECT_EQ(metadata.traits, 0);
}
TEST(FfiTest, MetadataTraits) {
auto handler = Ffi::BindTo([]() { return Error::Success(); },
{Traits::kCmdBufferCompatible});
auto maybe_metadata = GetMetadata(*handler);
EXPECT_TRUE(maybe_metadata.ok());
auto metadata = maybe_metadata.value();
EXPECT_EQ(metadata.api_version.major_version, XLA_FFI_API_MAJOR);
EXPECT_EQ(metadata.api_version.minor_version, XLA_FFI_API_MINOR);
EXPECT_EQ(metadata.traits, XLA_FFI_HANDLER_TRAITS_COMMAND_BUFFER_COMPATIBLE);
}
static CallFrameBuilder WithBufferArgs(size_t num_args, size_t rank = 4) {
se::DeviceMemoryBase memory;
std::vector<int64_t> dims(4, 1);
CallFrameBuilder builder(num_args, 0);
for (size_t i = 0; i < num_args; ++i) {
builder.AddBufferArg(memory, PrimitiveType::F32, dims);
}
return builder;
}
void BM_AnyBufferArgX1(benchmark::State& state) {
auto call_frame = WithBufferArgs(1).Build();
auto handler = Ffi::Bind().Arg<AnyBuffer>().To([](auto buffer) {
benchmark::DoNotOptimize(buffer);
return Error::Success();
});
for (auto _ : state) {
CHECK_OK(Call(*handler, call_frame));
}
}
BENCHMARK(BM_AnyBufferArgX1);
void BM_AnyBufferArgX4(benchmark::State& state) {
auto call_frame = WithBufferArgs(4).Build();
auto handler = Ffi::Bind()
.Arg<AnyBuffer>()
.Arg<AnyBuffer>()
.Arg<AnyBuffer>()
.Arg<AnyBuffer>()
.To([](auto b0, auto b1, auto b2, auto b3) {
benchmark::DoNotOptimize(b0);
benchmark::DoNotOptimize(b1);
benchmark::DoNotOptimize(b2);
benchmark::DoNotOptimize(b3);
return Error::Success();
});
for (auto _ : state) {
CHECK_OK(Call(*handler, call_frame));
}
}
BENCHMARK(BM_AnyBufferArgX4);
void BM_AsyncAnyBufferArgX1(benchmark::State& state) {
auto call_frame = WithBufferArgs(1).Build();
auto handler = Ffi::Bind().Arg<AnyBuffer>().To([](auto buffer) {
benchmark::DoNotOptimize(buffer);
Promise promise;
promise.SetAvailable();
return Future(promise);
});
for (auto _ : state) {
CHECK_OK(Call(*handler, call_frame));
}
}
BENCHMARK(BM_AsyncAnyBufferArgX1);
void BM_BufferArgX1(benchmark::State& state) {
auto call_frame = WithBufferArgs(1).Build();
auto handler = Ffi::Bind().Arg<BufferR4<F32>>().To([](auto buffer) {
benchmark::DoNotOptimize(buffer);
return Error::Success();
});
for (auto _ : state) {
CHECK_OK(Call(*handler, call_frame));
}
}
BENCHMARK(BM_BufferArgX1);
void BM_BufferArgX4(benchmark::State& state) {
auto call_frame = WithBufferArgs(4).Build();
auto handler = Ffi::Bind()
.Arg<BufferR4<F32>>()
.Arg<BufferR4<F32>>()
.Arg<BufferR4<F32>>()
.Arg<BufferR4<F32>>()
.To([](auto b0, auto b1, auto b2, auto b3) {
benchmark::DoNotOptimize(b0);
benchmark::DoNotOptimize(b1);
benchmark::DoNotOptimize(b2);
benchmark::DoNotOptimize(b3);
return Error::Success();
});
for (auto _ : state) {
CHECK_OK(Call(*handler, call_frame));
}
}
BENCHMARK(BM_BufferArgX4);
void BM_BufferArgX8(benchmark::State& state) {
auto call_frame = WithBufferArgs(8).Build();
auto handler = Ffi::Bind()
.Arg<BufferR4<F32>>()
.Arg<BufferR4<F32>>()
.Arg<BufferR4<F32>>()
.Arg<BufferR4<F32>>()
.Arg<BufferR4<F32>>()
.Arg<BufferR4<F32>>()
.Arg<BufferR4<F32>>()
.Arg<BufferR4<F32>>()
.To([](auto b0, auto b1, auto b2, auto b3, auto b4,
auto b5, auto b6, auto b7) {
benchmark::DoNotOptimize(b0);
benchmark::DoNotOptimize(b1);
benchmark::DoNotOptimize(b2);
benchmark::DoNotOptimize(b3);
benchmark::DoNotOptimize(b4);
benchmark::DoNotOptimize(b5);
benchmark::DoNotOptimize(b6);
benchmark::DoNotOptimize(b7);
return Error::Success();
});
for (auto _ : state) {
CHECK_OK(Call(*handler, call_frame));
}
}
BENCHMARK(BM_BufferArgX8);
void BM_TupleOfI32Attrs(benchmark::State& state) {
CallFrameBuilder::AttributesBuilder attrs;
attrs.Insert("i32_0", 1);
attrs.Insert("i32_1", 2);
attrs.Insert("i32_2", 3);
attrs.Insert("i32_3", 4);
CallFrameBuilder builder(0, 0);
builder.AddAttributes(attrs.Build());
auto call_frame = builder.Build();
auto handler = Ffi::Bind().Attrs<TupleOfI32>().To([](auto tuple) {
benchmark::DoNotOptimize(tuple);
return Error::Success();
});
for (auto _ : state) {
CHECK_OK(Call(*handler, call_frame));
}
}
BENCHMARK(BM_TupleOfI32Attrs);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/ffi/api/ffi.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/ffi/api/ffi_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
73a594f2-3fb4-4581-a0fc-baaa11a9b95c | cpp | tensorflow/tensorflow | hlo_casting_utils | third_party/xla/xla/hlo/ir/hlo_casting_utils.h | third_party/xla/xla/service/hlo_casting_utils_test.cc | #ifndef XLA_HLO_IR_HLO_CASTING_UTILS_H_
#define XLA_HLO_IR_HLO_CASTING_UTILS_H_
#include <type_traits>
#include "xla/hlo/ir/hlo_instruction.h"
#include "tsl/platform/logging.h"
namespace xla {
template <class T>
using EnableIfDerivedFromHlo =
typename std::enable_if<std::is_base_of<HloInstruction, T>::value>::type;
template <class T, EnableIfDerivedFromHlo<T>* = nullptr>
const T* Cast(const HloInstruction* instruction) {
CHECK(instruction != nullptr);
CHECK(T::ClassOf(instruction))
<< "Invalid HloInstruction casting. Destination type: "
<< typeid(T).name() << ". Instruction: " << instruction->name();
const T* casted = static_cast<const T*>(instruction);
#ifndef NDEBUG
const T* dynamic_casted = dynamic_cast<const T*>(instruction);
CHECK(dynamic_casted != nullptr)
<< "Invalid HloInstruction casting. Destination type: "
<< typeid(T).name() << ". Instruction: " << instruction->name();
#endif
return casted;
}
template <class T, EnableIfDerivedFromHlo<T>* = nullptr>
T* Cast(HloInstruction* instruction) {
return const_cast<T*>(
Cast<T>(const_cast<const HloInstruction*>(instruction)));
}
template <class T, EnableIfDerivedFromHlo<T>* = nullptr>
const T* CastOrNull(const HloInstruction* instruction) {
return instruction != nullptr ? Cast<T>(instruction) : nullptr;
}
template <class T, EnableIfDerivedFromHlo<T>* = nullptr>
T* CastOrNull(HloInstruction* instruction) {
return const_cast<T*>(
CastOrNull<T>(const_cast<const HloInstruction*>(instruction)));
}
template <class T, EnableIfDerivedFromHlo<T>* = nullptr>
const T* DynCast(const HloInstruction* instruction) {
CHECK(instruction != nullptr);
const T* casted =
T::ClassOf(instruction) ? static_cast<const T*>(instruction) : nullptr;
#ifndef NDEBUG
CHECK_EQ(casted, dynamic_cast<const T*>(instruction));
#endif
return casted;
}
template <class T, EnableIfDerivedFromHlo<T>* = nullptr>
T* DynCast(HloInstruction* instruction) {
return const_cast<T*>(
DynCast<T>(const_cast<const HloInstruction*>(instruction)));
}
template <class T, EnableIfDerivedFromHlo<T>* = nullptr>
const T* DynCastOrNull(const HloInstruction* instruction) {
return instruction != nullptr ? DynCast<T>(instruction) : nullptr;
}
template <class T, EnableIfDerivedFromHlo<T>* = nullptr>
T* DynCastOrNull(HloInstruction* instruction) {
return const_cast<T*>(
DynCastOrNull<T>(const_cast<const HloInstruction*>(instruction)));
}
}
#endif | #include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class DummyInstruction : public HloInstruction {
public:
DummyInstruction()
: HloInstruction(HloOpcode::kConstant, ShapeUtil::MakeShape(F32, {})) {}
static bool ClassOf(const HloInstruction* hlo) {
return hlo->opcode() == HloOpcode::kConstant;
}
};
class AnotherDummyInstruction : public HloInstruction {
public:
AnotherDummyInstruction()
: HloInstruction(HloOpcode::kParameter, ShapeUtil::MakeShape(F32, {})) {}
static bool ClassOf(const HloInstruction* hlo) {
return hlo->opcode() == HloOpcode::kParameter;
}
};
TEST(HloCastingUtilsTest, CastSucceeds) {
DummyInstruction instruction;
DummyInstruction* casted =
Cast<DummyInstruction>(static_cast<HloInstruction*>(&instruction));
ASSERT_EQ(casted, &instruction);
}
TEST(HloCastingUtilsTest, CastDiesForWrongType) {
AnotherDummyInstruction instruction;
ASSERT_DEATH(
Cast<DummyInstruction>(static_cast<HloInstruction*>(&instruction)), "");
}
TEST(HloCastingUtilsTest, CastDiesForNullptr) {
HloInstruction* null = nullptr;
ASSERT_DEATH(Cast<DummyInstruction>(null), "");
}
TEST(HloCastingUtilsTest, CastOrNullSucceeds) {
DummyInstruction instruction;
DummyInstruction* casted =
Cast<DummyInstruction>(static_cast<HloInstruction*>(&instruction));
ASSERT_EQ(casted, &instruction);
}
TEST(HloCastingUtilsTest, CastOrNullDiesForWrongType) {
AnotherDummyInstruction instruction;
ASSERT_DEATH(
Cast<DummyInstruction>(static_cast<HloInstruction*>(&instruction)), "");
}
TEST(HloCastingUtilsTest, CastOrNullReturnsNullptrForNullptr) {
HloInstruction* null = nullptr;
DummyInstruction* casted = CastOrNull<DummyInstruction>(null);
ASSERT_EQ(casted, nullptr);
}
TEST(HloCastingUtilsTest, DynCastSucceeds) {
DummyInstruction instruction;
DummyInstruction* casted =
DynCast<DummyInstruction>(static_cast<HloInstruction*>(&instruction));
ASSERT_EQ(casted, &instruction);
}
TEST(HloCastingUtilsTest, DynCastReturnsNullptrForWrongType) {
AnotherDummyInstruction instruction;
DummyInstruction* casted =
DynCast<DummyInstruction>(static_cast<HloInstruction*>(&instruction));
ASSERT_EQ(casted, nullptr);
}
TEST(HloCastingUtilsTest, DynCastDiesForNullptr) {
HloInstruction* null = nullptr;
ASSERT_DEATH(DynCast<DummyInstruction>(null), "");
}
TEST(HloCastingUtilsTest, DynCastOrNullSucceeds) {
DummyInstruction instruction;
DummyInstruction* casted = DynCastOrNull<DummyInstruction>(
static_cast<HloInstruction*>(&instruction));
ASSERT_EQ(casted, &instruction);
}
TEST(HloCastingUtilsTest, DynCastOrNullReturnsNullptrForWrongType) {
AnotherDummyInstruction instruction;
DummyInstruction* casted = DynCastOrNull<DummyInstruction>(
static_cast<HloInstruction*>(&instruction));
ASSERT_EQ(casted, nullptr);
}
TEST(HloCastingUtilsTest, DynCastOrNullReturnsNullptrForNullptr) {
HloInstruction* null = nullptr;
DummyInstruction* casted = DynCastOrNull<DummyInstruction>(null);
ASSERT_EQ(casted, nullptr);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/hlo_casting_utils.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_casting_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c6506a74-564b-4421-9121-be913bba8ef3 | cpp | tensorflow/tensorflow | dfs_hlo_visitor_with_default | third_party/xla/xla/hlo/ir/dfs_hlo_visitor_with_default.h | third_party/xla/xla/service/dfs_hlo_visitor_with_default_test.cc | #ifndef XLA_HLO_IR_DFS_HLO_VISITOR_WITH_DEFAULT_H_
#define XLA_HLO_IR_DFS_HLO_VISITOR_WITH_DEFAULT_H_
#include <memory>
#include <utility>
#include "absl/base/optimization.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "tsl/platform/status.h"
namespace xla {
template <typename HloInstructionPtr>
class DfsHloVisitorWithDefaultBase
: public DfsHloVisitorBase<HloInstructionPtr> {
public:
DfsHloVisitorWithDefaultBase() = default;
~DfsHloVisitorWithDefaultBase() override = default;
virtual absl::Status DefaultAction(HloInstructionPtr hlo_instruction) = 0;
absl::Status HandleElementwiseUnary(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleElementwiseBinary(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleBatchNormTraining(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleBatchNormInference(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleBatchNormGrad(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleClamp(HloInstructionPtr clamp) override {
return DefaultAction(clamp);
}
absl::Status HandleConcatenate(HloInstructionPtr concatenate) override {
return DefaultAction(concatenate);
}
absl::Status HandleSelect(HloInstructionPtr select) override {
return DefaultAction(select);
}
absl::Status HandleDot(HloInstructionPtr dot) override {
return DefaultAction(dot);
}
absl::Status HandleConvolution(HloInstructionPtr convolution) override {
return DefaultAction(convolution);
}
absl::Status HandleFft(HloInstructionPtr fft) override {
return DefaultAction(fft);
}
absl::Status HandleTriangularSolve(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleCholesky(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleOptimizationBarrier(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleAllGather(HloInstructionPtr crs) override {
return DefaultAction(crs);
}
absl::Status HandleAllGatherStart(HloInstructionPtr crs) override {
return DefaultAction(crs);
}
absl::Status HandleAllGatherDone(HloInstructionPtr crs) override {
return DefaultAction(crs);
}
absl::Status HandleAllReduce(HloInstructionPtr crs) override {
return DefaultAction(crs);
}
absl::Status HandleReduceScatter(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleAllReduceStart(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleAllReduceDone(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleAllToAll(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleCollectiveBroadcast(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleCollectivePermute(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleCollectivePermuteStart(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleCollectivePermuteDone(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleReplicaId(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandlePartitionId(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleRng(HloInstructionPtr random) override {
return DefaultAction(random);
}
absl::Status HandleRngBitGenerator(HloInstructionPtr random) override {
return DefaultAction(random);
}
absl::Status HandleRngGetAndUpdateState(HloInstructionPtr random) override {
return DefaultAction(random);
}
absl::Status HandleInfeed(HloInstructionPtr infeed) override {
return DefaultAction(infeed);
}
absl::Status HandleOutfeed(HloInstructionPtr outfeed) override {
return DefaultAction(outfeed);
}
absl::Status HandleReverse(HloInstructionPtr reverse) override {
return DefaultAction(reverse);
}
absl::Status HandleSort(HloInstructionPtr sort) override {
return DefaultAction(sort);
}
absl::Status HandleConstant(HloInstructionPtr constant) override {
return DefaultAction(constant);
}
absl::Status HandleIota(HloInstructionPtr iota) override {
return DefaultAction(iota);
}
absl::Status HandleGetTupleElement(
HloInstructionPtr get_tuple_element) override {
return DefaultAction(get_tuple_element);
}
absl::Status HandleParameter(HloInstructionPtr parameter) override {
return DefaultAction(parameter);
}
absl::Status HandleFusion(HloInstructionPtr fusion) override {
return DefaultAction(fusion);
}
absl::Status HandleCall(HloInstructionPtr call) override {
return DefaultAction(call);
}
absl::Status HandleCustomCall(HloInstructionPtr custom_call) override {
return DefaultAction(custom_call);
}
absl::Status HandleSlice(HloInstructionPtr slice) override {
return DefaultAction(slice);
}
absl::Status HandleDynamicSlice(HloInstructionPtr dynamic_slice) override {
return DefaultAction(dynamic_slice);
}
absl::Status HandleDynamicUpdateSlice(
HloInstructionPtr dynamic_update_slice) override {
return DefaultAction(dynamic_update_slice);
}
absl::Status HandleTuple(HloInstructionPtr tuple) override {
return DefaultAction(tuple);
}
absl::Status HandleMap(HloInstructionPtr map) override {
return DefaultAction(map);
}
absl::Status HandleReduce(HloInstructionPtr reduce) override {
return DefaultAction(reduce);
}
absl::Status HandleReduceWindow(HloInstructionPtr reduce_window) override {
return DefaultAction(reduce_window);
}
absl::Status HandleSelectAndScatter(
HloInstructionPtr select_and_scatter) override {
return DefaultAction(select_and_scatter);
}
absl::Status HandleBitcast(HloInstructionPtr bitcast) override {
return DefaultAction(bitcast);
}
absl::Status HandleBroadcast(HloInstructionPtr broadcast) override {
return DefaultAction(broadcast);
}
absl::Status HandlePad(HloInstructionPtr pad) override {
return DefaultAction(pad);
}
absl::Status HandleDynamicReshape(
HloInstructionPtr dynamic_reshape) override {
return DefaultAction(dynamic_reshape);
}
absl::Status HandleReshape(HloInstructionPtr reshape) override {
return DefaultAction(reshape);
}
absl::Status HandleTranspose(HloInstructionPtr transpose) override {
return DefaultAction(transpose);
}
absl::Status HandleWhile(HloInstructionPtr xla_while) override {
return DefaultAction(xla_while);
}
absl::Status HandleConditional(HloInstructionPtr conditional) override {
return DefaultAction(conditional);
}
absl::Status HandleAsyncStart(HloInstructionPtr async_start) override {
return DefaultAction(async_start);
}
absl::Status HandleAsyncUpdate(HloInstructionPtr async_update) override {
return DefaultAction(async_update);
}
absl::Status HandleAsyncDone(HloInstructionPtr async_done) override {
return DefaultAction(async_done);
}
absl::Status HandleCopyStart(HloInstructionPtr copy_start) override {
return DefaultAction(copy_start);
}
absl::Status HandleCopyDone(HloInstructionPtr copy_done) override {
return DefaultAction(copy_done);
}
absl::Status HandleRecv(HloInstructionPtr recv) override {
return DefaultAction(recv);
}
absl::Status HandleRecvDone(HloInstructionPtr recv_done) override {
return DefaultAction(recv_done);
}
absl::Status HandleSend(HloInstructionPtr send) override {
return DefaultAction(send);
}
absl::Status HandleTopK(HloInstructionPtr topk) override {
return DefaultAction(topk);
}
absl::Status HandleSendDone(HloInstructionPtr send_done) override {
return DefaultAction(send_done);
}
absl::Status HandleGather(HloInstructionPtr gather) override {
return DefaultAction(gather);
}
absl::Status HandleScatter(HloInstructionPtr scatter) override {
return DefaultAction(scatter);
}
absl::Status HandleAfterAll(HloInstructionPtr token) override {
return DefaultAction(token);
}
absl::Status HandleGetDimensionSize(HloInstructionPtr get_size) override {
return DefaultAction(get_size);
}
absl::Status HandleSetDimensionSize(HloInstructionPtr get_size) override {
return DefaultAction(get_size);
}
absl::Status HandleAddDependency(HloInstructionPtr add_dependency) override {
return DefaultAction(add_dependency);
}
absl::Status FinishVisit(HloInstructionPtr ) override {
return absl::OkStatus();
}
private:
DfsHloVisitorWithDefaultBase(const DfsHloVisitorWithDefaultBase&) = delete;
DfsHloVisitorWithDefaultBase& operator=(const DfsHloVisitorWithDefaultBase&) =
delete;
};
using DfsHloVisitorWithDefault = DfsHloVisitorWithDefaultBase<HloInstruction*>;
using ConstDfsHloVisitorWithDefault =
DfsHloVisitorWithDefaultBase<const HloInstruction*>;
class DfsHloRewriteVisitor : public DfsHloVisitorWithDefault {
public:
absl::StatusOr<bool> RunOnModule(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads = {}) {
absl::Status status;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
status = computation->Accept(this);
if (ABSL_PREDICT_FALSE(!status.ok())) return status;
}
return changed();
}
absl::Status DefaultAction(HloInstruction* ) override {
return absl::OkStatus();
}
bool changed() const { return changed_; }
protected:
absl::Status ReplaceWithNewInstruction(
HloInstruction* old_instruction,
std::unique_ptr<HloInstruction> new_instruction) {
VLOG(3) << "Replacing instruction:" << "\n old: "
<< old_instruction->ToString()
<< "\n new: " << new_instruction->ToString();
absl::Status status = old_instruction->parent()->ReplaceWithNewInstruction(
old_instruction, std::move(new_instruction));
if (ABSL_PREDICT_TRUE(status.ok())) {
changed_ = true;
}
return status;
}
absl::StatusOr<bool> ReplaceInstruction(HloInstruction* old_instruction,
HloInstruction* new_instruction,
bool preserve_sharding) {
VLOG(3) << "Replacing instruction:" << "\n old: "
<< old_instruction->ToString()
<< "\n new: " << new_instruction->ToString();
absl::StatusOr<bool> changed_or =
old_instruction->parent()->ReplaceInstruction(
old_instruction, new_instruction, preserve_sharding);
if (ABSL_PREDICT_TRUE(changed_or.ok())) {
changed_ |= changed_or.value();
}
return changed_or;
}
absl::Status ReplaceInstruction(HloInstruction* old_instruction,
HloInstruction* new_instruction) {
absl::StatusOr<bool> changed_or =
ReplaceInstruction(old_instruction, new_instruction,
false);
if (ABSL_PREDICT_TRUE(changed_or.ok())) {
DCHECK(changed_or.value());
}
return changed_or.status();
}
void MarkAsChanged() { changed_ = true; }
private:
bool changed_ = false;
};
template <typename HloInstructionPtr>
class FunctionVisitorBase
: public DfsHloVisitorWithDefaultBase<HloInstructionPtr> {
public:
explicit FunctionVisitorBase(
std::function<absl::Status(HloInstructionPtr)> visitor_func)
: visitor_func_(std::move(visitor_func)) {}
absl::Status DefaultAction(HloInstructionPtr hlo_instruction) override {
return visitor_func_(hlo_instruction);
}
private:
FunctionVisitorBase(const FunctionVisitorBase&) = delete;
FunctionVisitorBase& operator=(const FunctionVisitorBase&) = delete;
std::function<absl::Status(HloInstructionPtr)> visitor_func_;
};
using FunctionVisitor = FunctionVisitorBase<HloInstruction*>;
using ConstFunctionVisitor = FunctionVisitorBase<const HloInstruction*>;
}
#endif | #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_runner.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
class DfsHloVisitorWithDefaultTest : public HloTestBase {};
TEST_F(DfsHloVisitorWithDefaultTest, DefaultElementwiseTest) {
class ElementwiseTestVisitor : public DfsHloVisitorWithDefault {
public:
absl::Status DefaultAction(HloInstruction* hlo) override {
TF_RET_CHECK(!(hlo->IsElementwise() && hlo->operand_count() == 2))
<< hlo->ToString();
TF_RET_CHECK(!(hlo->IsElementwise() && hlo->operand_count() == 1))
<< hlo->ToString();
return absl::OkStatus();
}
absl::Status HandleElementwiseBinary(HloInstruction* hlo) override {
TF_RET_CHECK(hlo->IsElementwise() && hlo->operand_count() == 2)
<< hlo->ToString();
return absl::OkStatus();
}
absl::Status HandleElementwiseUnary(HloInstruction* hlo) override {
TF_RET_CHECK(hlo->IsElementwise() && hlo->operand_count() == 1)
<< hlo->ToString();
return absl::OkStatus();
}
};
const std::string& hlo_string = R"(
HloModule TestModule
ENTRY TestComputation {
arg = f32[] parameter(0)
tuple = (f32[]) tuple(arg)
gte = f32[] get-tuple-element(tuple), index=0
abs = f32[] abs(arg)
add = f32[] add(arg, gte)
broadcast = f32[42] broadcast(add), dimensions={}
slice = f32[1] slice(broadcast), slice={[1:2]}
copy = f32[] copy(arg)
eq = pred[] compare(arg, gte), direction=EQ
neg = f32[] negate(arg)
ROOT convert = f64[] convert(f32[] arg)
})";
std::unique_ptr<HloModule> module =
ParseAndReturnVerifiedModule(hlo_string).value();
ElementwiseTestVisitor visitor;
TF_EXPECT_OK(module->entry_computation()->Accept(&visitor));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/dfs_hlo_visitor_with_default.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dfs_hlo_visitor_with_default_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ef2e10a4-b4e8-4a54-96cd-ad5096524559 | cpp | tensorflow/tensorflow | ptrvec | third_party/xla/xla/hlo/ir/ptrvec.h | third_party/xla/xla/hlo/ir/ptrvec_test.cc | #ifndef XLA_HLO_IR_PTRVEC_H_
#define XLA_HLO_IR_PTRVEC_H_
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <limits>
#include <type_traits>
#include <vector>
#include "absl/log/check.h"
#include "tsl/platform/logging.h"
namespace xla {
template <typename T>
class PtrVec {
public:
static_assert(std::is_pointer<T>::value);
PtrVec();
~PtrVec();
PtrVec(const PtrVec& x);
PtrVec& operator=(const PtrVec& x);
PtrVec(PtrVec&& x);
PtrVec& operator=(PtrVec&& x);
using difference_type = std::ptrdiff_t;
using value_type = T;
using pointer = T*;
using reference = T&;
using const_reference = T const&;
using const_iterator = T const*;
const_iterator begin() const;
const_iterator end() const;
size_t size() const;
bool empty() const;
T* data();
T const* data() const;
T& operator[](size_t i);
T operator[](size_t i) const;
T at(size_t i) const;
T front() const;
T back() const;
void clear();
void pop_back();
void push_back(T x);
void erase(const_iterator iter);
operator std::vector<T>() const;
private:
static constexpr uintptr_t kEmptyTag = 0x1;
static constexpr uintptr_t kBigTag = 0x3;
static constexpr uintptr_t kTagMask = 0x3;
struct Big {
size_t size;
size_t capacity;
T data[];
};
inline static bool can_inline(T ptr) {
if constexpr (alignof(decltype(*ptr)) >= 2) {
DCHECK_EQ(reinterpret_cast<uintptr_t>(ptr) & 0x1, 0);
return true;
}
return ((reinterpret_cast<uintptr_t>(ptr) & 0x1) == 0);
}
inline bool is_big() const { return (rep_ & kTagMask) == kBigTag; }
inline Big* big() const {
DCHECK(is_big());
return reinterpret_cast<Big*>(rep_ & ~kTagMask);
}
inline static size_t big_size(size_t n) {
static constexpr size_t kMaxFit =
(std::numeric_limits<size_t>::max() - sizeof(Big)) / sizeof(T);
DCHECK_LE(n, kMaxFit);
const size_t result = sizeof(Big) + n * sizeof(T);
DCHECK_GE(result, sizeof(Big));
return result;
}
inline Big* MakeBig(size_t capacity) {
Big* big = static_cast<Big*>(malloc(big_size(capacity)));
big->size = 0;
big->capacity = capacity;
rep_ = reinterpret_cast<uintptr_t>(big) | kBigTag;
return big;
}
inline static void FreeBig(Big* big) { free(big); }
uintptr_t rep_;
};
template <class T>
inline PtrVec<T>::PtrVec() : rep_(kEmptyTag) {}
template <class T>
inline PtrVec<T>::~PtrVec() {
if (is_big()) FreeBig(big());
}
template <class T>
inline PtrVec<T>::PtrVec(const PtrVec& x) : rep_(kEmptyTag) {
*this = x;
}
template <class T>
inline PtrVec<T>& PtrVec<T>::operator=(const PtrVec& x) {
if (this == &x) {
return *this;
}
const size_t n = x.size();
Big* b;
if (!is_big()) {
if (n < 2) {
if (n == 0) {
rep_ = kEmptyTag;
return *this;
}
T single = x.front();
if (can_inline(single)) {
rep_ = reinterpret_cast<uintptr_t>(single);
DCHECK(!empty());
DCHECK(!is_big());
return *this;
}
}
b = MakeBig(x.size());
} else {
if (n == 0) {
clear();
return *this;
}
b = big();
if (b->capacity < n) {
FreeBig(b);
b = MakeBig(n);
}
}
memcpy(b->data, x.data(), n * sizeof(T));
b->size = n;
return *this;
}
template <class T>
inline PtrVec<T>::PtrVec(PtrVec&& x) : rep_(x.rep_) {
x.rep_ = kEmptyTag;
}
template <class T>
inline PtrVec<T>& PtrVec<T>::operator=(PtrVec&& x) {
if (this != &x) {
if (is_big()) {
FreeBig(big());
}
rep_ = x.rep_;
x.rep_ = kEmptyTag;
}
return *this;
}
template <class T>
inline size_t PtrVec<T>::size() const {
return is_big() ? big()->size : (rep_ != kEmptyTag ? 1 : 0);
}
template <class T>
inline bool PtrVec<T>::empty() const {
return rep_ == kEmptyTag;
}
template <class T>
inline T* PtrVec<T>::data() {
return is_big() ? big()->data : reinterpret_cast<T*>(&rep_);
}
template <class T>
inline T const* PtrVec<T>::data() const {
return is_big() ? big()->data : reinterpret_cast<T const*>(&rep_);
}
template <class T>
inline T& PtrVec<T>::operator[](size_t i) {
DCHECK_LT(i, size());
return *(data() + i);
}
template <class T>
inline T PtrVec<T>::operator[](size_t i) const {
DCHECK_LT(i, size());
return *(data() + i);
}
template <class T>
inline T PtrVec<T>::at(size_t i) const {
DCHECK_LT(i, size());
return *(data() + i);
}
template <class T>
inline T PtrVec<T>::front() const {
return (*this)[0];
}
template <class T>
inline T PtrVec<T>::back() const {
return (*this)[size() - 1];
}
template <class T>
inline typename PtrVec<T>::const_iterator PtrVec<T>::begin() const {
return data();
}
template <class T>
inline typename PtrVec<T>::const_iterator PtrVec<T>::end() const {
return data() + size();
}
template <class T>
inline void PtrVec<T>::clear() {
if (is_big()) {
FreeBig(big());
}
rep_ = kEmptyTag;
}
template <class T>
inline void PtrVec<T>::pop_back() {
DCHECK(!empty());
if (is_big()) {
big()->size--;
if (big()->size == 0) {
clear();
}
} else {
rep_ = kEmptyTag;
}
}
template <class T>
inline void PtrVec<T>::push_back(T x) {
if (!is_big()) {
if (rep_ == kEmptyTag) {
if (can_inline(x)) {
rep_ = reinterpret_cast<uintptr_t>(x);
DCHECK(!empty());
DCHECK(!is_big());
} else {
Big* b = MakeBig(1);
b->size = 1;
b->data[0] = x;
}
} else {
T singleton = front();
Big* b = MakeBig(2);
b->size = 2;
b->data[0] = singleton;
b->data[1] = x;
}
} else {
Big* b = big();
const size_t n = b->size;
DCHECK_LE(n, b->capacity);
if (n == b->capacity) {
Big* old = b;
b = MakeBig(std::max<size_t>(2, 2 * old->capacity));
memcpy(b->data, old->data, n * sizeof(T));
FreeBig(old);
}
b->data[n] = x;
b->size = n + 1;
}
}
template <class T>
inline void PtrVec<T>::erase(const_iterator iter) {
DCHECK_GE(iter, begin());
DCHECK_LT(iter, end());
if (!is_big()) {
rep_ = kEmptyTag;
} else {
Big* b = big();
const size_t index = iter - b->data;
memmove(b->data + index, b->data + index + 1,
(b->size - index - 1) * sizeof(T));
b->size--;
if (b->size == 0) {
clear();
}
}
}
template <class T>
inline PtrVec<T>::operator std::vector<T>() const {
if (empty()) return {};
return std::vector<T>(begin(), end());
}
template <typename T>
bool operator==(const PtrVec<T>& a, const PtrVec<T>& b) {
auto a_data = a.data();
auto b_data = b.data();
return std::equal(a_data, a_data + a.size(), b_data, b_data + b.size());
}
template <typename T>
bool operator!=(const PtrVec<T>& a, const PtrVec<T>& b) {
return !(a == b);
}
}
#endif | #include "xla/hlo/ir/ptrvec.h"
#include <cstdint>
#include <initializer_list>
#include <memory>
#include <utility>
#include <vector>
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace xla {
namespace {
class PtrVecTest : public testing::Test {
public:
int* NewInt(int v) {
ints_.push_back(std::make_unique<int>(v));
return ints_.back().get();
}
void Fill(PtrVec<int*>& dst, absl::Span<const int> src) {
for (int v : src) {
dst.push_back(NewInt(v));
}
}
std::vector<int> Pointees(const PtrVec<int*>& src) {
std::vector<int> result;
result.reserve(src.size());
for (int* ptr : src) {
result.push_back(*ptr);
}
return result;
}
private:
std::vector<std::unique_ptr<int>> ints_;
};
std::vector<std::vector<int>> TestCases() {
return std::vector<std::vector<int>>{
{},
{100},
{200, 300},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
};
}
TEST_F(PtrVecTest, Accessors) {
for (const auto& c : TestCases()) {
SCOPED_TRACE(c.size());
PtrVec<int*> v;
Fill(v, c);
ASSERT_EQ(v.empty(), c.empty());
ASSERT_EQ(v.size(), c.size());
if (!c.empty()) {
ASSERT_EQ(*v.front(), c.front());
ASSERT_EQ(*v.back(), c.back());
}
}
}
TEST_F(PtrVecTest, Iteration) {
for (const auto& c : TestCases()) {
SCOPED_TRACE(c.size());
PtrVec<int*> v;
Fill(v, c);
int i = 0;
for (auto ptr : v) {
ASSERT_EQ(*ptr, c[i]);
i++;
}
}
}
TEST_F(PtrVecTest, Indexing) {
for (const auto& c : TestCases()) {
SCOPED_TRACE(c.size());
PtrVec<int*> v;
Fill(v, c);
for (int i = 0; i < c.size(); i++) {
ASSERT_EQ(*v[i], c[i]);
ASSERT_EQ(*v.at(i), c[i]);
}
}
}
TEST_F(PtrVecTest, Data) {
for (const auto& c : TestCases()) {
SCOPED_TRACE(c.size());
PtrVec<int*> v;
Fill(v, c);
int** data = v.data();
for (int i = 0; i < c.size(); i++) {
ASSERT_EQ(*data[i], c[i]);
}
}
}
TEST_F(PtrVecTest, ConversionToVector) {
for (const auto& c : TestCases()) {
SCOPED_TRACE(c.size());
PtrVec<int*> v;
Fill(v, c);
std::vector<int*> vec = v;
ASSERT_EQ(vec.size(), c.size());
for (int i = 0; i < c.size(); i++) {
ASSERT_EQ(*vec[i], c[i]);
}
}
}
TEST_F(PtrVecTest, Clear) {
for (const auto& c : TestCases()) {
SCOPED_TRACE(c.size());
PtrVec<int*> v;
Fill(v, c);
v.clear();
EXPECT_EQ(Pointees(v), std::vector<int>{});
}
}
TEST_F(PtrVecTest, PopBack) {
for (const auto& c : TestCases()) {
SCOPED_TRACE(c.size());
PtrVec<int*> v;
Fill(v, c);
auto model = c;
while (!model.empty()) {
model.pop_back();
v.pop_back();
EXPECT_EQ(Pointees(v), model);
}
}
}
TEST_F(PtrVecTest, Erase) {
for (const auto& c : TestCases()) {
if (c.empty()) {
continue;
}
SCOPED_TRACE(c.size());
PtrVec<int*> v;
Fill(v, c);
auto model = c;
int offset = c.size() / 2;
model.erase(model.begin() + offset);
v.erase(v.begin() + offset);
EXPECT_EQ(Pointees(v), model);
}
}
TEST_F(PtrVecTest, Assign) {
const auto cases = TestCases();
for (const auto& x : cases) {
for (const auto& y : cases) {
SCOPED_TRACE(absl::StrFormat("from %d to %d", x.size(), y.size()));
{
PtrVec<int*> b;
Fill(b, y);
PtrVec<int*> a = b;
ASSERT_EQ(Pointees(a), y);
}
{
PtrVec<int*> b;
Fill(b, y);
PtrVec<int*> a = std::move(b);
ASSERT_EQ(Pointees(a), y);
ASSERT_EQ(Pointees(b), std::vector<int>{});
}
{
PtrVec<int*> a;
Fill(a, x);
ASSERT_EQ(Pointees(a), x);
PtrVec<int*> b;
Fill(b, y);
a = b;
ASSERT_EQ(Pointees(a), y);
}
{
PtrVec<int*> a;
Fill(a, x);
PtrVec<int*> b;
Fill(b, y);
a = std::move(b);
ASSERT_EQ(Pointees(a), y);
ASSERT_EQ(Pointees(b), std::vector<int>{});
}
}
}
}
TEST_F(PtrVecTest, ReducedAlignment) {
const char* str = "hello world";
for (int i = 0; i < 11; i++) {
PtrVec<const char*> vec;
vec.push_back(&str[i]);
EXPECT_EQ(vec.size(), 1);
EXPECT_EQ(vec[0], &str[i]);
PtrVec<const char*> copy;
copy = vec;
EXPECT_EQ(copy.size(), 1);
EXPECT_EQ(copy[0], &str[i]);
}
}
struct Elem {
int64_t number;
};
void BM_PtrVecIter(::testing::benchmark::State& state) {
const int n = state.range(0);
std::vector<Elem> storage(n);
PtrVec<Elem*> vec;
for (int i = 0; i < n; i++) {
storage[i].number = i;
vec.push_back(&storage[i]);
}
uintptr_t sum = 0;
for (auto s : state) {
for (int i = 0; i < vec.size(); i++) {
sum += reinterpret_cast<uintptr_t>(vec[i]);
}
}
VLOG(1) << sum;
}
BENCHMARK(BM_PtrVecIter)->Arg(0)->Arg(1)->Arg(2)->Arg(4)->Arg(8)->Arg(1024);
void BM_StdVecIter(::testing::benchmark::State& state) {
const int n = state.range(0);
std::vector<Elem> storage(n);
std::vector<Elem*> vec;
for (int i = 0; i < n; i++) {
storage[i].number = i;
vec.push_back(&storage[i]);
}
uintptr_t sum = 0;
for (auto s : state) {
for (int i = 0; i < vec.size(); i++) {
sum += reinterpret_cast<uintptr_t>(vec[i]);
}
}
VLOG(1) << sum;
}
BENCHMARK(BM_StdVecIter)->Arg(0)->Arg(1)->Arg(2)->Arg(4)->Arg(8)->Arg(1024);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/ptrvec.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/ptrvec_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e69fde1a-b1ac-484c-b566-41a0b2e4cb3a | cpp | tensorflow/tensorflow | pattern_matcher | third_party/xla/xla/service/pattern_matcher.h | third_party/xla/xla/service/pattern_matcher_test.cc | #ifndef XLA_SERVICE_PATTERN_MATCHER_H_
#define XLA_SERVICE_PATTERN_MATCHER_H_
#include <cstddef>
#include <cstdint>
#include <ios>
#include <memory>
#include <optional>
#include <ostream>
#include <sstream>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "absl/utility/utility.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/ir/ptrvec.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
struct MatchOption {
bool capture;
bool single_user_only;
std::ostream* explain_os;
};
template <typename Value, typename Pattern>
bool Match(Value* value, const Pattern& pattern,
MatchOption option = {true,
false,
nullptr}) {
if (option.capture) {
auto new_option = option;
new_option.capture = false;
if (!pattern.Match(value, new_option)) {
return false;
}
}
return pattern.Match(value, option);
}
template <typename Value, typename Pattern>
bool MatchSingleUserOnly(Value* value, const Pattern& pattern) {
MatchOption option = {true, true,
nullptr};
return Match(value, pattern, option);
}
template <typename FilterPattern, typename Pattern>
bool MatchAndLogIfFailed(HloInstruction* instr, absl::string_view desc,
const Pattern& pattern, bool enable_logging,
const FilterPattern& filter_pattern) {
bool matched = Match(instr, pattern);
if (matched || !enable_logging || !Match(instr, filter_pattern)) {
return matched;
}
std::stringstream os;
CHECK(!Match(
instr, pattern,
{false, false, &os}));
LOG(ERROR) << "Failed to match " << desc << ":\n" << os.str();
return false;
}
namespace match {
namespace detail {
#pragma push_macro("EXPLAIN")
#define EXPLAIN \
if (option.explain_os) *option.explain_os
enum {
kIndentInc = 2,
};
inline void Indent(std::ostream* os, int64_t indent) {
*os << "\n";
for (int64_t i = 0; i < indent; ++i) {
*os << " ";
}
}
template <typename T, typename Dummy = void>
struct IsTrivialMatcher {
static constexpr bool value = false;
};
template <typename T>
struct IsTrivialMatcher<T,
typename std::enable_if<T::kIsTrivialMatcher>::type> {
static constexpr bool value = true;
};
template <typename Item, typename... Patterns>
class AllOfPattern {
public:
explicit AllOfPattern(const Patterns&... patterns) : patterns_(patterns...) {}
bool Match(const Item* item, MatchOption option) const {
bool matched = MatchImpl(item, option, std::integral_constant<size_t, 0>());
DCHECK(matched || !option.capture);
return matched;
}
bool Match(Item* item, MatchOption option) const {
bool matched = MatchImpl(item, option, std::integral_constant<size_t, 0>());
DCHECK(matched || !option.capture);
return matched;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
DescribeToImpl(os, std::integral_constant<size_t, 0>(), indent);
}
const std::tuple<Patterns...>& patterns() const { return patterns_; }
private:
template <typename ItemType, size_t index>
bool MatchImpl(ItemType* item, MatchOption option,
std::integral_constant<size_t, index>) const {
return std::get<index>(patterns_).Match(item, option) &&
MatchImpl(item, option, std::integral_constant<size_t, index + 1>());
}
template <typename ItemType>
bool MatchImpl(ItemType* item, MatchOption option,
std::integral_constant<size_t, sizeof...(Patterns)>) const {
return true;
}
template <size_t index>
void DescribeToImpl(std::ostream* os, std::integral_constant<size_t, index>,
int64_t indent) const {
constexpr bool first_is_trivial =
IsTrivialMatcher<typename std::remove_reference<decltype(std::get<0>(
patterns_))>::type>::value;
constexpr bool is_last = index == sizeof...(Patterns) - 1;
const auto& submatcher = std::get<index>(patterns_);
auto print_bulleted_item = [&] {
*os << " * ";
submatcher.DescribeTo(os, indent + 3);
if (!is_last) {
*os << " AND";
Indent(os, indent);
}
};
if (index == 0) {
if (first_is_trivial || is_last) {
submatcher.DescribeTo(os, indent + kIndentInc);
if (sizeof...(Patterns) > 2) {
*os << ":";
Indent(os, indent);
}
} else {
*os << "all of:";
Indent(os, indent);
print_bulleted_item();
}
} else if (first_is_trivial && index == 1 && sizeof...(Patterns) == 2) {
*os << " ";
submatcher.DescribeTo(os, indent);
} else {
print_bulleted_item();
}
DescribeToImpl(os, std::integral_constant<size_t, index + 1>(), indent);
}
void DescribeToImpl(std::ostream* os,
std::integral_constant<size_t, sizeof...(Patterns)>,
int64_t indent) const {}
std::tuple<Patterns...> patterns_;
};
}
template <typename Item, typename... Patterns>
auto AllOf(const Patterns&... patterns) {
return detail::AllOfPattern<typename std::remove_const<Item>::type,
Patterns...>(patterns...);
}
template <typename Item, typename... InnerPs, typename... OuterPs>
auto AllOf(const detail::AllOfPattern<Item, InnerPs...>& inner_p,
const OuterPs&... outer_ps) {
auto make_all_of = [](const InnerPs&... inner_ps,
const OuterPs&... outer_ps) {
return detail::AllOfPattern<typename std::remove_const<Item>::type,
InnerPs..., OuterPs...>(inner_ps...,
outer_ps...);
};
return absl::apply(make_all_of, std::tuple_cat(inner_p.patterns(),
std::make_tuple(outer_ps...)));
}
namespace detail {
template <typename LayoutType, typename Impl>
class LayoutPattern;
class LayoutPatternBaseImpl {
public:
bool Match(const ::xla::Layout* layout, MatchOption option) const {
if (layout == nullptr) {
EXPLAIN << "Layout is null";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "a layout";
}
static constexpr bool kIsTrivialMatcher = true;
};
class LayoutPatternEqualImpl {
public:
explicit constexpr LayoutPatternEqualImpl(const ::xla::Layout* layout)
: layout_(layout) {}
bool Match(const ::xla::Layout* layout, MatchOption option) const {
if (!LayoutUtil::Equal(*layout_, *layout)) {
EXPLAIN << "Layout " << LayoutUtil::HumanString(*layout)
<< " is not equal to expected "
<< LayoutUtil::HumanString(*layout_);
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "equal to " << LayoutUtil::HumanString(*layout_);
}
private:
const ::xla::Layout* layout_;
};
class LayoutPatternMinorToMajorImpl {
public:
explicit LayoutPatternMinorToMajorImpl(
absl::Span<const int64_t> minor_to_major)
: minor_to_major_(minor_to_major.begin(), minor_to_major.end()) {}
bool Match(const ::xla::Layout* layout, MatchOption option) const {
if (layout->minor_to_major() != minor_to_major_) {
EXPLAIN << "Layout does not have minor to major ["
<< absl::StrJoin(minor_to_major_, ",") << "]";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "with minor to major [" << absl::StrJoin(minor_to_major_, ",")
<< "]";
}
private:
absl::InlinedVector<int64_t, 8> minor_to_major_;
};
template <typename LayoutType, typename Impl>
class LayoutPattern {
private:
template <typename NewImpl>
auto AppendImpl(NewImpl new_impl) const {
auto new_allof = AllOf<::xla::Layout>(impl_, std::move(new_impl));
return LayoutPattern<LayoutType, decltype(new_allof)>(std::move(new_allof),
matched_layout_);
}
public:
explicit constexpr LayoutPattern(const Impl& impl,
LayoutType** matched_layout)
: impl_(impl), matched_layout_(matched_layout) {}
bool Match(const ::xla::Layout* layout, MatchOption option) const {
if (impl_.Match(layout, option)) {
if (option.capture && matched_layout_) {
*matched_layout_ = layout;
}
return true;
}
return false;
}
bool Match(::xla::Layout* layout, MatchOption option) const {
if (impl_.Match(layout, option)) {
if (option.capture && matched_layout_) {
*matched_layout_ = layout;
}
return true;
}
return false;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
impl_.DescribeTo(os, indent);
}
constexpr auto EqualTo(const ::xla::Layout* layout) const {
return AppendImpl(LayoutPatternEqualImpl(layout));
}
constexpr auto WithMinorToMajor(
absl::Span<const int64_t> minor_to_major) const {
return AppendImpl(LayoutPatternMinorToMajorImpl(minor_to_major));
}
private:
Impl impl_;
LayoutType** matched_layout_;
};
template <typename Item, typename... Patterns>
class AnyOfPattern {
public:
explicit AnyOfPattern(const Patterns&... patterns) : patterns_(patterns...) {}
bool Match(const Item* item, MatchOption option) const {
return MatchImpl(item, option);
}
bool Match(Item* item, MatchOption option) const {
return MatchImpl(item, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "any of:";
Indent(os, indent);
DescribeToImpl(os, std::integral_constant<size_t, 0>(), indent);
}
private:
template <typename ItemType>
bool MatchImpl(ItemType* item, MatchOption option) const {
std::optional<std::stringstream> explanation;
MatchOption new_option = option;
if (option.explain_os) {
new_option.explain_os = &explanation.emplace();
}
bool rv = MatchRecursiveImpl(item, new_option,
std::integral_constant<size_t, 0>());
if (!rv && option.explain_os) {
EXPLAIN << "None of the following matchers succeeded:";
EXPLAIN << explanation->str();
}
return rv;
}
template <typename ItemType, size_t index>
bool MatchRecursiveImpl(ItemType* item, MatchOption option,
std::integral_constant<size_t, index>) const {
auto new_option = option;
new_option.capture = false;
std::optional<std::stringstream> explanation;
if (option.explain_os) {
new_option.explain_os = &explanation.emplace();
}
if (std::get<index>(patterns_).Match(item, new_option)) {
if (option.capture) {
bool matched = std::get<index>(patterns_).Match(item, option);
DCHECK(matched);
}
return true;
}
if (option.explain_os) {
EXPLAIN << "\nMatcher #" << index + 1;
EXPLAIN << "\n - ";
std::get<index>(patterns_).DescribeTo(option.explain_os, 3);
EXPLAIN << "\nfailed with";
EXPLAIN << "\n - ";
EXPLAIN << absl::StrReplaceAll(explanation->str(), {{"\n", "\n "}});
}
return MatchRecursiveImpl(item, option,
std::integral_constant<size_t, index + 1>());
}
template <typename ItemType>
bool MatchRecursiveImpl(
ItemType* item, MatchOption option,
std::integral_constant<size_t, sizeof...(Patterns)>) const {
return false;
}
template <size_t index>
void DescribeToImpl(std::ostream* os, std::integral_constant<size_t, index>,
int64_t indent) const {
*os << " - ";
std::get<index>(patterns_).DescribeTo(os, indent + 3);
if (index != sizeof...(Patterns) - 1) {
*os << " OR";
Indent(os, indent);
}
DescribeToImpl(os, std::integral_constant<size_t, index + 1>(), indent);
}
void DescribeToImpl(std::ostream* os,
std::integral_constant<size_t, sizeof...(Patterns)>,
int64_t indent) const {}
std::tuple<Patterns...> patterns_;
};
}
inline constexpr auto Layout(const ::xla::Layout** matched_layout = nullptr) {
return detail::LayoutPattern<const ::xla::Layout,
detail::LayoutPatternBaseImpl>(
detail::LayoutPatternBaseImpl(), matched_layout);
}
inline constexpr auto Layout(::xla::Layout** matched_layout) {
return detail::LayoutPattern<::xla::Layout, detail::LayoutPatternBaseImpl>(
detail::LayoutPatternBaseImpl(), matched_layout);
}
namespace detail {
template <typename ShapeType, typename Impl>
class ShapePattern;
class ShapePatternBaseImpl {
public:
bool Match(const ::xla::Shape* shape, MatchOption option) const {
if (shape == nullptr) {
EXPLAIN << "Shape is null";
}
return shape != nullptr;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "a shape";
}
static constexpr bool kIsTrivialMatcher = true;
};
class ShapePatternEqualImpl {
public:
explicit constexpr ShapePatternEqualImpl(const ::xla::Shape* shape)
: shape_(shape) {}
bool Match(const ::xla::Shape* shape, MatchOption option) const {
if (!ShapeUtil::Equal(*shape_, *shape)) {
EXPLAIN << "Shape not equal to "
<< ShapeUtil::HumanStringWithLayout(*shape_);
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "equal to " << ShapeUtil::HumanStringWithLayout(*shape_);
}
private:
const ::xla::Shape* shape_;
};
class ShapePatternCompatibleImpl {
public:
explicit constexpr ShapePatternCompatibleImpl(const ::xla::Shape* shape)
: shape_(shape) {}
bool Match(const ::xla::Shape* shape, MatchOption option) const {
if (!ShapeUtil::Compatible(*shape_, *shape)) {
EXPLAIN << "Shape not compatible with "
<< ShapeUtil::HumanString(*shape_);
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "compatible with " << ShapeUtil::HumanString(*shape_);
}
private:
const ::xla::Shape* shape_;
};
class ShapePatternElementTypeImpl {
public:
explicit constexpr ShapePatternElementTypeImpl(PrimitiveType element_type)
: element_type_(element_type) {}
bool Match(const ::xla::Shape* shape, MatchOption option) const {
if (shape->element_type() != element_type_) {
EXPLAIN << "Shape does not have element type "
<< PrimitiveType_Name(element_type_);
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "with element type " << PrimitiveType_Name(element_type_);
}
private:
PrimitiveType element_type_;
};
class ShapePatternDimsImpl {
public:
explicit ShapePatternDimsImpl(absl::Span<const int64_t> dims)
: dims_(dims.begin(), dims.end()) {}
bool Match(const ::xla::Shape* shape, MatchOption option) const {
if (shape->dimensions() != dims_) {
EXPLAIN << "Shape does not have dimensions [" << absl::StrJoin(dims_, ",")
<< "]";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "with dimensions [" << absl::StrJoin(dims_, ",") << "]";
}
private:
absl::InlinedVector<int64_t, 8> dims_;
};
class ShapePatternIsScalarImpl {
public:
explicit constexpr ShapePatternIsScalarImpl() = default;
bool Match(const ::xla::Shape* shape, MatchOption option) const {
if (!ShapeUtil::IsScalar(*shape)) {
EXPLAIN << "Shape is not a scalar";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "that represents a scalar";
}
};
class ShapePatternIsArrayImpl {
public:
explicit constexpr ShapePatternIsArrayImpl() = default;
bool Match(const ::xla::Shape* shape, MatchOption option) const {
if (!shape->IsArray()) {
EXPLAIN << "Shape is not an array";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "that represents an array";
}
};
class ShapePatternIsDenseArrayImpl {
public:
explicit constexpr ShapePatternIsDenseArrayImpl() = default;
bool Match(const ::xla::Shape* shape, MatchOption option) const {
if (!LayoutUtil::IsDenseArray(*shape)) {
EXPLAIN << "Shape is not a dense array";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "that represents a dense array";
}
};
class ShapePatternIsTupleImpl {
public:
explicit constexpr ShapePatternIsTupleImpl() = default;
bool Match(const ::xla::Shape* shape, MatchOption option) const {
if (!shape->IsTuple()) {
EXPLAIN << "Shape is not a tuple";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "that represents a tuple";
}
};
class ShapePatternEffectiveScalarImpl {
public:
explicit constexpr ShapePatternEffectiveScalarImpl() = default;
bool Match(const ::xla::Shape* shape, MatchOption option) const {
if (!ShapeUtil::IsEffectiveScalar(*shape)) {
EXPLAIN << "Shape is not an effective scalar";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "that is an effective scalar";
}
};
class ShapePatternRankImpl {
public:
explicit constexpr ShapePatternRankImpl(int64_t rank) : rank_(rank) {}
bool Match(const ::xla::Shape* shape, MatchOption option) const {
if (shape->rank() != rank_) {
if (rank_ == 0) {
EXPLAIN << "Shape is not a scalar";
} else {
EXPLAIN << "Shape does not have rank " << rank_;
}
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
if (rank_ == 0) {
*os << "that is a scalar";
} else {
*os << "that has " << rank_ << " dimension" << (rank_ != 1 ? "s" : "");
}
}
private:
int64_t rank_;
};
template <typename LayoutType, typename LayoutImpl>
class ShapePatternLayoutImpl {
public:
explicit constexpr ShapePatternLayoutImpl(
const LayoutPattern<LayoutType, LayoutImpl>& layout)
: layout_(layout) {}
bool Match(const ::xla::Shape* shape, MatchOption option) const {
return LayoutUtil::HasLayout(*shape) &&
layout_.Match(&shape->layout(), option);
}
bool Match(::xla::Shape* shape, MatchOption option) const {
if (!LayoutUtil::HasLayout(*shape)) {
EXPLAIN << "Shape does not have a layout";
return false;
}
if (!layout_.Match(shape->mutable_layout(), option)) {
EXPLAIN << "\nin layout";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "with";
Indent(os, indent + kIndentInc);
layout_.DescribeTo(os, indent + kIndentInc);
}
private:
LayoutPattern<LayoutType, LayoutImpl> layout_;
};
template <typename SubshapeType, typename SubshapeImpl>
class ShapePatternSubshapeImpl {
public:
explicit ShapePatternSubshapeImpl(
ShapeIndexView index,
const ShapePattern<SubshapeType, SubshapeImpl>& subshape)
: index_(index), subshape_(subshape) {}
bool Match(const ::xla::Shape* shape, MatchOption option) const {
return MatchImpl(shape, option);
}
bool Match(::xla::Shape* shape, MatchOption option) const {
return MatchImpl(shape, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "with subshape at index " << ShapeIndex(index_) << " which is";
Indent(os, indent + kIndentInc);
subshape_.DescribeTo(os, indent + kIndentInc);
}
private:
::xla::Shape* GetSubshape(::xla::Shape* shape) const {
return ShapeUtil::GetMutableSubshape(shape, index_);
}
const ::xla::Shape* GetSubshape(const ::xla::Shape* shape) const {
return &ShapeUtil::GetSubshape(*shape, index_);
}
template <typename ShapeType>
bool MatchImpl(ShapeType* shape, MatchOption option) const {
if (!ShapeUtil::IndexIsValid(*shape, index_)) {
EXPLAIN << "No subshape at " << ShapeIndex(index_);
return false;
}
if (!subshape_.Match(GetSubshape(shape), option)) {
EXPLAIN << "\nin subshape at " << ShapeIndex(index_);
return false;
}
return true;
}
ShapeIndexView index_;
ShapePattern<SubshapeType, SubshapeImpl> subshape_;
};
template <typename ShapeType, typename Impl>
class ShapePattern {
private:
template <typename NewImpl>
auto AppendImpl(NewImpl new_impl) const {
auto new_all_of = AllOf<::xla::Shape>(impl_, std::move(new_impl));
return ShapePattern<ShapeType, decltype(new_all_of)>(std::move(new_all_of),
matched_shape_);
}
public:
explicit constexpr ShapePattern(const Impl& impl, ShapeType** matched_shape)
: impl_(impl), matched_shape_(matched_shape) {}
bool Match(const ::xla::Shape* shape, MatchOption option) const {
if (impl_.Match(shape, option)) {
if (option.capture && matched_shape_) {
*matched_shape_ = shape;
}
return true;
}
if (shape) {
EXPLAIN << "\nin "
<< (shape->has_layout() ? ShapeUtil::HumanStringWithLayout(*shape)
: ShapeUtil::HumanString(*shape));
}
return false;
}
bool Match(::xla::Shape* shape, MatchOption option) const {
if (impl_.Match(shape, option)) {
if (option.capture && matched_shape_) {
*matched_shape_ = shape;
}
return true;
}
EXPLAIN << "\nin "
<< (shape->has_layout() ? ShapeUtil::HumanStringWithLayout(*shape)
: ShapeUtil::HumanString(*shape));
return false;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
return impl_.DescribeTo(os, indent);
}
constexpr auto EqualTo(const ::xla::Shape* shape) const {
return AppendImpl(ShapePatternEqualImpl(shape));
}
constexpr auto CompatibleTo(const ::xla::Shape* shape) const {
return AppendImpl(ShapePatternCompatibleImpl(shape));
}
constexpr auto WithElementType(PrimitiveType element_type) const {
return AppendImpl(ShapePatternElementTypeImpl(element_type));
}
constexpr auto WithDims(absl::Span<const int64_t> dims) const {
return AppendImpl(ShapePatternDimsImpl(dims));
}
constexpr auto IsScalar() const {
return AppendImpl(ShapePatternIsScalarImpl());
}
constexpr auto IsArray() const {
return AppendImpl(ShapePatternIsArrayImpl());
}
constexpr auto IsTuple() const {
return AppendImpl(ShapePatternIsTupleImpl());
}
constexpr auto IsEffectiveScalar() const {
return AppendImpl(ShapePatternEffectiveScalarImpl());
}
constexpr auto WithRank(int64_t rank) const {
return AppendImpl(ShapePatternRankImpl(rank));
}
template <typename LayoutType, typename LayoutImpl>
auto WithLayout(const LayoutPattern<LayoutType, LayoutImpl>& layout) const {
return AppendImpl(ShapePatternLayoutImpl<LayoutType, LayoutImpl>(layout));
}
constexpr auto WithLayout(absl::Span<const int64_t> minor_to_major) const {
return WithLayout(Layout().WithMinorToMajor(minor_to_major));
}
constexpr auto WithLayoutEqualTo(const ::xla::Layout* layout) const {
return WithLayout(Layout().EqualTo(layout));
}
constexpr auto IsDenseArray() const {
return AppendImpl(ShapePatternIsDenseArrayImpl());
}
template <typename SubshapeType, typename SubshapeImpl>
auto WithSubshape(
ShapeIndexView index,
const ShapePattern<SubshapeType, SubshapeImpl>& subshape) const {
return AppendImpl(
ShapePatternSubshapeImpl<SubshapeType, SubshapeImpl>(index, subshape));
}
ShapePattern<ShapeType,
AllOfPattern<::xla::Shape, Impl,
ShapePatternSubshapeImpl<
const ::xla::Shape,
AllOfPattern<::xla::Shape, ShapePatternBaseImpl,
ShapePatternEqualImpl>>>>
WithSubshapeEqualTo(ShapeIndexView index, const ::xla::Shape* shape) const {
return WithSubshape(index,
ShapePattern<const ::xla::Shape, ShapePatternBaseImpl>(
ShapePatternBaseImpl(), nullptr)
.EqualTo(shape));
}
ShapePattern<ShapeType,
AllOfPattern<::xla::Shape, Impl,
ShapePatternSubshapeImpl<
const ::xla::Shape,
AllOfPattern<::xla::Shape, ShapePatternBaseImpl,
ShapePatternCompatibleImpl>>>>
WithSubshapeCompatibleTo(ShapeIndexView index,
const ::xla::Shape* shape) const {
return WithSubshape(index,
ShapePattern<const ::xla::Shape, ShapePatternBaseImpl>(
ShapePatternBaseImpl(), nullptr)
.CompatibleTo(shape));
}
private:
Impl impl_;
ShapeType** matched_shape_;
};
}
inline constexpr auto Shape(const ::xla::Shape** matched_shape = nullptr) {
return detail::ShapePattern<const ::xla::Shape, detail::ShapePatternBaseImpl>(
detail::ShapePatternBaseImpl(), matched_shape);
}
inline constexpr auto Shape(::xla::Shape** matched_shape) {
return detail::ShapePattern<::xla::Shape, detail::ShapePatternBaseImpl>(
detail::ShapePatternBaseImpl(), matched_shape);
}
namespace detail {
inline HloInstruction* HloOperand(HloInstruction* instr, int64_t idx) {
return instr->mutable_operand(idx);
}
inline const HloInstruction* HloOperand(const HloInstruction* instr,
int64_t idx) {
return instr->operand(idx);
}
inline std::string InstToString(const HloInstruction* inst) {
return inst->ToString(
HloPrintOptions().set_print_metadata(false).set_print_percent(false));
}
template <typename HloInstructionType, typename Impl>
class HloInstructionPattern;
class HloInstructionPatternBaseImpl {
public:
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
if (inst == nullptr) {
EXPLAIN << "HloInstruction* is null";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "an HloInstruction";
}
static constexpr bool kIsTrivialMatcher = true;
};
class HloInstructionPatternNameImpl {
public:
explicit HloInstructionPatternNameImpl(absl::string_view name)
: name_(name) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
if (inst->name() != name_) {
EXPLAIN << "HloInstruction not named \"" << name_ << "\"";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "named \"" << name_ << "\"";
}
private:
absl::string_view name_;
};
class HloInstructionIsImpl {
public:
explicit HloInstructionIsImpl(const HloInstruction* inst) : inst_(inst) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
if (inst != inst_) {
EXPLAIN << "HloInstruction " << std::hex << std::nouppercase
<< std::showbase << reinterpret_cast<uint64_t>(inst) << " is not "
<< reinterpret_cast<uint64_t>(inst_) << " ("
<< InstToString(inst_) << ")";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "which is " << std::hex << std::nouppercase << std::showbase
<< reinterpret_cast<uint64_t>(inst_) << " (" << InstToString(inst_)
<< ")";
}
private:
const HloInstruction* inst_;
};
class HloInstructionPatternOpcodeImpl {
public:
explicit constexpr HloInstructionPatternOpcodeImpl(HloOpcode opcode,
bool invert)
: opcode_(opcode), invert_(invert) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
if (invert_ && inst->opcode() == opcode_) {
EXPLAIN << "HloInstruction has opcode " << opcode_
<< ", expected anything else";
return false;
}
if (!invert_ && inst->opcode() != opcode_) {
EXPLAIN << "HloInstruction doesn't have opcode " << opcode_;
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
if (!invert_) {
*os << "with opcode " << opcode_;
} else {
*os << "with any opcode other than " << opcode_;
}
}
private:
HloOpcode opcode_;
bool invert_;
};
class HloInstructionCustomCallTargetImpl {
public:
explicit HloInstructionCustomCallTargetImpl(
absl::Span<const absl::string_view> custom_call_targets)
: custom_call_targets_(custom_call_targets.begin(),
custom_call_targets.end()) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
if (inst->opcode() != HloOpcode::kCustomCall ||
!absl::c_linear_search(custom_call_targets_,
inst->custom_call_target())) {
if (custom_call_targets_.size() == 1) {
EXPLAIN << "HloInstruction is not a custom call with a target '"
<< custom_call_targets_.front() << "'";
} else {
EXPLAIN << "HloInstruction is not a custom call with a target in {"
<< absl::StrJoin(custom_call_targets_, ", ") << "}";
}
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
if (custom_call_targets_.size() == 1) {
*os << "custom call with target '" << custom_call_targets_.front() << "'";
} else {
*os << "custom call with target in {"
<< absl::StrJoin(custom_call_targets_, ", ") << "}";
}
}
private:
absl::InlinedVector<std::string, 1> custom_call_targets_;
};
class HloInstructionPatternNumOperandsImpl {
public:
explicit constexpr HloInstructionPatternNumOperandsImpl(int64_t num_operands)
: num_operands_(num_operands) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
if (inst->operand_count() != num_operands_) {
EXPLAIN << "HloInstruction doesn't have " << num_operands_ << " operands";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "with " << num_operands_ << " operand"
<< (num_operands_ != 1 ? "s" : "");
}
private:
int64_t num_operands_;
};
template <typename ShapeType, typename ShapeImpl>
class HloInstructionPatternShapeImpl {
public:
explicit constexpr HloInstructionPatternShapeImpl(
const ShapePattern<ShapeType, ShapeImpl>& shape)
: shape_(shape) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
if (!shape_.Match(&inst->shape(), option)) {
EXPLAIN << "\nin output shape";
return false;
}
return true;
}
bool Match(::xla::HloInstruction* inst, MatchOption option) const {
if (!shape_.Match(inst->mutable_shape(), option)) {
EXPLAIN << "\nin output shape";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "outputting";
Indent(os, indent + kIndentInc);
shape_.DescribeTo(os, indent + kIndentInc);
}
private:
ShapePattern<ShapeType, ShapeImpl> shape_;
};
template <typename OperandType, typename OperandImpl>
class HloInstructionPatternOperandImpl {
public:
explicit constexpr HloInstructionPatternOperandImpl(
int64_t operand_index,
const HloInstructionPattern<OperandType, OperandImpl>& operand)
: operand_index_(operand_index), operand_(operand) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
bool Match(::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "with operand " << operand_index_ << " which is:";
Indent(os, indent + kIndentInc);
operand_.DescribeTo(os, indent + kIndentInc);
}
private:
template <typename HloInstructionType>
bool MatchImpl(HloInstructionType* inst, MatchOption option) const {
if (operand_index_ >= inst->operand_count()) {
EXPLAIN << "desired operand index " << operand_index_
<< " is out of bounds";
return false;
}
if (!operand_.Match(HloOperand(inst, operand_index_), option)) {
EXPLAIN << "\nin operand " << operand_index_;
return false;
}
if (option.single_user_only &&
inst->operand(operand_index_)->user_count() != 1) {
EXPLAIN << "Operand " << operand_index_ << " of HloInstruction has "
<< inst->operand(operand_index_)->user_count()
<< " users. Expected 1.";
return false;
}
return true;
}
int64_t operand_index_;
HloInstructionPattern<OperandType, OperandImpl> operand_;
};
template <typename OperandType, typename OperandImpl>
class HloInstructionPatternOperandIfPresentImpl {
public:
explicit constexpr HloInstructionPatternOperandIfPresentImpl(
int64_t operand_index,
const HloInstructionPattern<OperandType, OperandImpl>& operand)
: operand_index_(operand_index), operand_(operand) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
bool Match(::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "either with fewer than " << operand_index_ + 1 << " operand"
<< (operand_index_ + 1 != 1 ? "s" : "") << ", or with an operand "
<< operand_index_ << " which is:";
Indent(os, indent + kIndentInc);
operand_.DescribeTo(os, indent + kIndentInc);
}
private:
template <typename HloInstructionType>
bool MatchImpl(HloInstructionType* inst, MatchOption option) const {
if (operand_index_ >= inst->operand_count()) {
return true;
}
if (!operand_.Match(HloOperand(inst, operand_index_), option)) {
EXPLAIN << "\nin operand " << operand_index_;
return false;
}
return true;
}
int64_t operand_index_;
HloInstructionPattern<OperandType, OperandImpl> operand_;
};
template <typename OperandType1, typename OperandImpl1, typename OperandType2,
typename OperandImpl2>
class HloInstructionPatternBinaryOperandsAnyOrderImpl {
public:
explicit constexpr HloInstructionPatternBinaryOperandsAnyOrderImpl(
const HloInstructionPattern<OperandType1, OperandImpl1>& op1,
const HloInstructionPattern<OperandType2, OperandImpl2>& op2)
: op1_(op1), op2_(op2) {}
bool Match(::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "with two operands in either order:";
Indent(os, indent);
*os << " - ";
op1_.DescribeTo(os, indent + 3);
Indent(os, indent);
*os << " - ";
op2_.DescribeTo(os, indent + 3);
}
private:
HloInstruction* operand(HloInstruction* inst, int64_t idx) const {
return inst->mutable_operand(idx);
}
const HloInstruction* operand(const HloInstruction* inst, int64_t idx) const {
return inst->operand(idx);
}
template <typename HloInstructionType>
bool MatchImpl(HloInstructionType* inst, MatchOption option) const {
if (inst->operand_count() != 2) {
EXPLAIN << "HloInstruction did not have two operands";
return false;
}
if (option.single_user_only) {
for (int i = 0; i < 2; ++i) {
if (inst->operand(i)->user_count() != 1) {
EXPLAIN << "Operand " << i << " of HloInstruction has "
<< inst->operand(i)->user_count() << " users. Expected 1.";
return false;
}
}
}
if (!option.explain_os) {
auto try_match = [&](int64_t idx1, int64_t idx2) {
MatchOption new_option = option;
new_option.capture = false;
if (op1_.Match(operand(inst, idx1), new_option) &&
op2_.Match(operand(inst, idx2), new_option)) {
if (option.capture) {
bool matched = op1_.Match(operand(inst, idx1), option) &&
op2_.Match(operand(inst, idx2), option);
DCHECK(matched);
}
return true;
}
return false;
};
return try_match(0, 1) || try_match(1, 0);
}
bool matches[ 2][ 2];
std::stringstream explanations[ 2][ 2];
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 2; ++j) {
MatchOption new_option = option;
new_option.capture = false;
new_option.explain_os = &explanations[i][j];
matches[i][j] = i == 0 ? op1_.Match(operand(inst, j), new_option)
: op2_.Match(operand(inst, j), new_option);
}
}
for (int i = 0; i < 2; ++i) {
if (matches[0][i] && matches[1][(i + 1) % 2]) {
if (option.capture) {
auto* operand1 = operand(inst, i);
auto* operand2 = operand(inst, (i + 1) % 2);
bool matched =
op1_.Match(operand1, option) && op2_.Match(operand2, option);
DCHECK(matched);
}
return true;
}
}
auto describe_matcher = [&](int matcher_idx) {
EXPLAIN << "\n - ";
if (matcher_idx == 0) {
op1_.DescribeTo(option.explain_os, 3);
} else {
CHECK_EQ(matcher_idx, 1);
op2_.DescribeTo(option.explain_os, 3);
}
for (int i = 0; i < 2; ++i) {
if (matches[matcher_idx][ i]) {
continue;
}
EXPLAIN << "\ndoes not match " << (i == 0 ? "LHS" : "RHS") << ":\n";
EXPLAIN << " - ";
EXPLAIN << absl::StrReplaceAll(
explanations[matcher_idx][ i].str(), {{"\n", "\n "}});
}
};
bool wrote_explanation = false;
for (int i = 0; !wrote_explanation && i < 2; ++i) {
if (!matches[i][0] && !matches[i][1]) {
EXPLAIN << "HloInstruction's operands (ignoring order) did not match "
<< (i == 0 ? "first" : "second") << " matcher. Specifically,";
describe_matcher(i);
wrote_explanation = true;
}
}
for (int i = 0; !wrote_explanation && i < 2; ++i) {
if (matches[ 0][ i] &&
matches[ 1][ i]) {
CHECK(!matches[0][(i + 1) % 2]);
CHECK(!matches[1][(i + 1) % 2]);
CHECK(!wrote_explanation);
EXPLAIN << "HloInstruction's " << (i == 1 ? "LHS" : "RHS")
<< " operand did not match either of the two matchers. "
"Specifically,";
describe_matcher(0);
EXPLAIN << "\nand";
describe_matcher(1);
wrote_explanation = true;
}
}
CHECK(wrote_explanation);
return false;
}
HloInstructionPattern<OperandType1, OperandImpl1> op1_;
HloInstructionPattern<OperandType2, OperandImpl2> op2_;
};
class HloInstructionPatternFusionKindImpl {
public:
explicit constexpr HloInstructionPatternFusionKindImpl(
::xla::HloInstruction::FusionKind kind)
: kind_(kind) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
bool Match(::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "with fusion kind " << ToString(kind_);
}
private:
template <typename HloInstructionType>
bool MatchImpl(HloInstructionType* inst, MatchOption option) const {
if (inst->opcode() != HloOpcode::kFusion) {
EXPLAIN << "HloInstruction does not have fusion kind " << ToString(kind_)
<< "; it's not a fusion";
return false;
}
if (inst->fusion_kind() != kind_) {
EXPLAIN << "HloInstruction does not have fusion kind " << ToString(kind_);
return false;
}
return true;
}
::xla::HloInstruction::FusionKind kind_;
};
class HloInstructionPatternTupleIndexImpl {
public:
explicit constexpr HloInstructionPatternTupleIndexImpl(int64_t tuple_index)
: tuple_index_(tuple_index) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
bool Match(::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "which is a GTE with index " << tuple_index_;
}
private:
template <typename HloInstructionType>
bool MatchImpl(HloInstructionType* inst, MatchOption option) const {
if (inst->opcode() != HloOpcode::kGetTupleElement) {
EXPLAIN << "HloInstruction is not a GTE with index " << tuple_index_
<< "; it's not a GTE at all";
return false;
}
if (inst->tuple_index() != tuple_index_) {
EXPLAIN << "HloInstruction is not a GTE with index " << tuple_index_;
return false;
}
return true;
}
int64_t tuple_index_;
};
class HloInstructionPatternParameterNumImpl {
public:
explicit constexpr HloInstructionPatternParameterNumImpl(
int64_t parameter_num)
: parameter_num_(parameter_num) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
bool Match(::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "which is parameter " << parameter_num_;
}
private:
template <typename HloInstructionType>
bool MatchImpl(HloInstructionType* inst, MatchOption option) const {
if (inst->opcode() != HloOpcode::kParameter ||
inst->parameter_number() != parameter_num_) {
EXPLAIN << "HloInstruction is not parameter " << parameter_num_;
return false;
}
return true;
}
int64_t parameter_num_;
};
class HloInstructionPatternOneUseOrUserImpl {
protected:
bool MatchOneUser(const HloInstruction* inst, MatchOption option) const {
if (inst->user_count() != 1) {
EXPLAIN << "HloInstruction has " << inst->user_count()
<< " users, but expected exactly one.";
if (inst->user_count() > 1) {
EXPLAIN << "\nAll users:";
for (const HloInstruction* user : inst->users()) {
EXPLAIN << "\n - " << InstToString(user);
}
}
return false;
}
return true;
}
};
class HloInstructionPatternOneUseImpl
: public HloInstructionPatternOneUseOrUserImpl {
public:
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
if (!MatchOneUser(inst, option)) {
return false;
}
int64_t use_count = absl::c_count_if(
inst->users()[0]->operands(),
[&](const HloInstruction* operand) { return operand == inst; });
if (use_count != 1) {
EXPLAIN << "HloInstruction is used " << use_count
<< " times by its user, but is expected to be used just once: "
<< InstToString(inst->users()[0]);
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "which has exactly one use";
}
};
class HloInstructionPatternOneUserImpl
: public HloInstructionPatternOneUseOrUserImpl {
public:
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
return MatchOneUser(inst, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "which has exactly one user (but possibly is used multiple times by "
"that instruction)";
}
};
class HloInstructionPatternNumUserImpl {
public:
explicit constexpr HloInstructionPatternNumUserImpl(int64_t user_num)
: user_num_(user_num) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
if (inst->user_count() != user_num_) {
EXPLAIN << "HloInstruction has " << inst->user_count()
<< " users, but expected exactly " << user_num_ << " users.";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "which has exactly " << user_num_
<< " users (but possibly is used multiple times by "
"same instruction)";
}
private:
int64_t user_num_;
};
class HloInstructionPatternAtMostNumUserImpl {
public:
explicit constexpr HloInstructionPatternAtMostNumUserImpl(int64_t user_num)
: user_num_(user_num) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
if (inst->user_count() > user_num_) {
EXPLAIN << "HloInstruction has " << inst->user_count()
<< " users, but expected less than or equal " << user_num_
<< " users.";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "which has less than or equal " << user_num_
<< " users (but possibly is used multiple times by "
"same instruction)";
}
private:
int64_t user_num_;
};
class HloInstructionPatternComparisonDirectionImpl {
public:
explicit constexpr HloInstructionPatternComparisonDirectionImpl(
ComparisonDirection direction)
: direction_(direction) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
bool Match(::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "which has comparison direction "
<< ComparisonDirectionToString(direction_);
}
private:
template <typename HloInstructionType>
bool MatchImpl(HloInstructionType* inst, MatchOption option) const {
if (inst->opcode() != HloOpcode::kCompare ||
inst->comparison_direction() != direction_) {
EXPLAIN << "HloInstruction is not comparison "
<< ComparisonDirectionToString(direction_);
return false;
}
return true;
}
ComparisonDirection direction_;
};
class HloInstructionPatternConvDnumsImpl {
public:
explicit HloInstructionPatternConvDnumsImpl(absl::string_view dnums)
: HloInstructionPatternConvDnumsImpl(
ParseConvolutionDimensionNumbers(dnums).value()) {}
explicit HloInstructionPatternConvDnumsImpl(ConvolutionDimensionNumbers dnums)
: dnums_(std::move(dnums)) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
bool Match(::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "which has convolution dimension numbers "
<< ConvolutionDimensionNumbersToString(dnums_);
}
private:
template <typename HloInstructionType>
bool MatchImpl(HloInstructionType* inst, MatchOption option) const {
if (inst->opcode() != HloOpcode::kConvolution &&
inst->opcode() != HloOpcode::kCustomCall) {
EXPLAIN << "HloInstruction is not convolution or custom-call and so "
"can't have convolution_dimension_numbers";
return false;
}
const ConvolutionDimensionNumbers& actual_dnums =
inst->convolution_dimension_numbers();
if (!tsl::protobuf::util::MessageDifferencer::Equals(dnums_,
actual_dnums)) {
EXPLAIN << "convolution_dimension_numbers "
<< ConvolutionDimensionNumbersToString(actual_dnums)
<< " don't match expected "
<< ConvolutionDimensionNumbersToString(dnums_);
return false;
}
return true;
}
ConvolutionDimensionNumbers dnums_;
};
class HloInstructionPredicateImpl {
public:
explicit HloInstructionPredicateImpl(HloPredicate fn) : fn_(std::move(fn)) {}
bool Match(const HloInstruction* inst, MatchOption option) const {
bool match = fn_(inst);
if (!match) {
EXPLAIN << "HloInstruction does not match user-specified predicate";
}
return match;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "which matches a user-specified predicate";
}
private:
HloPredicate fn_;
};
class HloInstructionContractingDimsImpl {
public:
explicit HloInstructionContractingDimsImpl(
absl::Span<const int64_t> lhs_contracting_dims,
absl::Span<const int64_t> rhs_contracting_dims)
: lhs_contracting_dims_(lhs_contracting_dims.begin(),
lhs_contracting_dims.end()),
rhs_contracting_dims_(rhs_contracting_dims.begin(),
rhs_contracting_dims.end()) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
bool Match(::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "with lhs_contracting_dims {"
<< absl::StrJoin(lhs_contracting_dims_, ",")
<< "} and rhs_contracting_dims {"
<< absl::StrJoin(rhs_contracting_dims_, ",") << "}";
}
private:
template <typename HloInstructionType>
bool MatchImpl(HloInstructionType* inst, MatchOption option) const {
if (inst->opcode() != HloOpcode::kDot) {
EXPLAIN << "HloInstruction is not dot so "
"can't have dot_dimension_numbers";
return false;
}
const DotDimensionNumbers& dnums = inst->dot_dimension_numbers();
if (absl::MakeSpan(dnums.lhs_contracting_dimensions()) !=
lhs_contracting_dims_) {
EXPLAIN << "lhs_contracting_dimensions {"
<< absl::StrJoin(dnums.lhs_contracting_dimensions(), ",")
<< "} don't match expected {"
<< absl::StrJoin(lhs_contracting_dims_, ",") << "}";
return false;
}
if (absl::MakeSpan(dnums.rhs_contracting_dimensions()) !=
rhs_contracting_dims_) {
EXPLAIN << "rhs_contracting_dimensions {"
<< absl::StrJoin(dnums.rhs_contracting_dimensions(), ",")
<< "} don't match expected {"
<< absl::StrJoin(rhs_contracting_dims_, ",") << "}";
return false;
}
return true;
}
absl::InlinedVector<int64_t, 8> lhs_contracting_dims_;
absl::InlinedVector<int64_t, 8> rhs_contracting_dims_;
};
class HloInstructionReplicaGroupsImpl {
public:
explicit HloInstructionReplicaGroupsImpl(
std::vector<std::vector<int64_t>> replica_groups)
: replica_groups_(std::move(replica_groups)) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
bool Match(::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
std::vector<std::string> replica_group_strs;
replica_group_strs.reserve(replica_groups_.size());
for (const std::vector<int64_t>& replica_group : replica_groups_) {
replica_group_strs.push_back(
absl::StrCat("{", absl::StrJoin(replica_group, ","), "}"));
}
*os << "with replica_group {" << absl::StrJoin(replica_group_strs, ",")
<< "}";
}
private:
template <typename HloInstructionType>
bool MatchImpl(HloInstructionType* inst, MatchOption option) const {
const HloCollectiveInstruction* collective =
DynCast<HloCollectiveInstruction>(inst);
if (!collective) {
EXPLAIN << "HloInstruction is not a collective";
return false;
}
if (absl::c_equal(collective->replica_groups(), replica_groups_,
[](const ReplicaGroup& a, const std::vector<int64_t>& b) {
return absl::c_equal(a.replica_ids(), b);
})) {
return true;
}
std::ostringstream desc_stream;
DescribeTo(&desc_stream);
std::vector<std::string> replica_group_strs;
replica_group_strs.reserve(replica_groups_.size());
for (const ReplicaGroup& replica_group : collective->replica_groups()) {
replica_group_strs.push_back(absl::StrCat(
"{", absl::StrJoin(replica_group.replica_ids(), ","), "}"));
}
EXPLAIN << "replica_group {" << absl::StrJoin(replica_group_strs, ",")
<< "} don't match expected " << desc_stream.str();
return false;
}
std::vector<std::vector<int64_t>> replica_groups_;
};
class HloInstructionShardingImpl {
public:
explicit HloInstructionShardingImpl(
const std::optional<HloSharding>& sharding)
: sharding_(sharding) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
bool Match(::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
if (sharding_.has_value()) {
*os << "with sharding " << sharding_->ToString();
} else {
*os << "with no sharding";
}
}
private:
template <typename HloInstructionType>
bool MatchImpl(HloInstructionType* inst, MatchOption option) const {
if (!sharding_.has_value()) {
if (!inst->has_sharding()) {
return true;
}
EXPLAIN << "HloInstruction is expected to have no sharding.";
return false;
}
if (inst->has_sharding()) {
if (inst->sharding() == sharding_.value()) {
return true;
}
EXPLAIN << "sharding " << inst->sharding().ToString()
<< " don't match expected " << sharding_->ToString();
return false;
} else {
EXPLAIN << "HloInstruction has no sharding. Expected: "
<< sharding_->ToString();
return false;
}
}
std::optional<HloSharding> sharding_;
};
class HloInstructionControlDepsImpl {
public:
explicit HloInstructionControlDepsImpl(
absl::Span<HloInstruction* const> preds,
absl::Span<HloInstruction* const> succs)
: preds_(preds), succs_(succs) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
bool Match(::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
auto print_deps = [os](absl::Span<HloInstruction* const> deps,
absl::string_view type) {
if (deps.empty()) {
*os << "no control " << type;
} else {
*os << "control " << type << " {" << absl::StrJoin(deps, ",", fmt)
<< "}";
}
};
*os << "with ";
print_deps(preds_, "predecessors");
*os << " and ";
print_deps(succs_, "successors");
}
private:
template <typename HloInstructionType>
bool MatchImpl(HloInstructionType* inst, MatchOption option) const {
auto match_deps = [&](absl::Span<HloInstruction* const> expected_deps,
const PtrVec<HloInstruction*>& actual_deps,
absl::string_view type) {
if (!absl::c_equal(expected_deps, actual_deps)) {
EXPLAIN << "HloInstruction expected to have control " << type << " {"
<< absl::StrJoin(expected_deps, ",", fmt) << "} but has {"
<< absl::StrJoin(actual_deps, ",", fmt) << "}";
return false;
}
return true;
};
return match_deps(preds_, inst->control_predecessors(), "predecessors") &&
match_deps(succs_, inst->control_successors(), "successors");
}
static void fmt(std::string* out, const HloInstruction* inst) {
absl::StrAppend(out, inst->name());
};
absl::Span<HloInstruction* const> preds_, succs_;
};
template <typename ScalarTy>
class HloConstantScalarImpl {
public:
explicit constexpr HloConstantScalarImpl(bool match_effective_scalar)
: val_(std::nullopt), match_effective_scalar_(match_effective_scalar) {}
constexpr HloConstantScalarImpl(ScalarTy val, bool match_effective_scalar)
: val_(val), match_effective_scalar_(match_effective_scalar) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
bool Match(::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "which is a constant "
<< (match_effective_scalar_ ? "effective " : "") << "scalar";
if (val_.has_value()) {
*os << " with value " << *val_;
}
}
private:
template <typename InstTy>
bool MatchImpl(InstTy* inst, MatchOption option) const {
const auto* const_inst = DynCast<HloConstantInstruction>(inst);
if (!const_inst) {
EXPLAIN << "HloInstruction is not a constant";
return false;
}
if (match_effective_scalar_ &&
!ShapeUtil::IsEffectiveScalar(inst->shape())) {
EXPLAIN << "HloInstruction is not an effective scalar";
return false;
}
if (!match_effective_scalar_ && !ShapeUtil::IsScalar(inst->shape())) {
EXPLAIN << "HloInstruction is not a scalar";
return false;
}
if (!val_.has_value()) {
return true;
}
auto const_inst_scalar_or = const_inst->literal().Reshape({});
if (!const_inst_scalar_or.ok()) {
EXPLAIN << "could not convert matched literal to effective scalar";
return false;
}
Literal const_inst_scalar = std::move(const_inst_scalar_or).value();
if (!const_inst_scalar.IsEqualAt({}, *val_)) {
EXPLAIN << "HloInstruction's constant value "
<< const_inst_scalar.ToStringWithoutShape()
<< " did not match expected value " << *val_;
return false;
}
return true;
}
std::optional<ScalarTy> val_;
bool match_effective_scalar_;
};
template <typename HloInstructionType, typename Impl>
class HloInstructionPattern {
private:
template <typename NewImpl>
auto AppendImpl(NewImpl new_impl) const {
auto new_allof = AllOf<::xla::HloInstruction>(impl_, std::move(new_impl));
return HloInstructionPattern<HloInstructionType, decltype(new_allof)>(
std::move(new_allof), matched_inst_);
}
public:
explicit constexpr HloInstructionPattern(const Impl& impl,
HloInstructionType** matched_inst)
: impl_(impl), matched_inst_(matched_inst) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
if (impl_.Match(inst, option)) {
if (option.capture && matched_inst_) {
*matched_inst_ = inst;
}
return true;
}
if (inst != nullptr) {
EXPLAIN << "\nin " << InstToString(inst);
}
return false;
}
bool Match(::xla::HloInstruction* inst, MatchOption option,
bool explain_instruction = true) const {
if (impl_.Match(inst, option)) {
if (option.capture && matched_inst_) {
*matched_inst_ = inst;
}
return true;
}
if (explain_instruction) {
EXPLAIN << "\nin " << InstToString(inst);
}
return false;
}
auto WithName(absl::string_view name) const {
return AppendImpl(HloInstructionPatternNameImpl(name));
}
auto WithOpcode(HloOpcode opcode) const {
return AppendImpl(HloInstructionPatternOpcodeImpl(opcode, false));
}
auto WithCustomCallTarget(
absl::Span<const absl::string_view> custom_call_targets) const {
return AppendImpl(HloInstructionCustomCallTargetImpl(custom_call_targets));
}
auto WithNumOperands(int64_t num_operands) const {
return AppendImpl(HloInstructionPatternNumOperandsImpl(num_operands));
}
auto WithoutOpcode(HloOpcode opcode) const {
return AppendImpl(HloInstructionPatternOpcodeImpl(opcode, true));
}
constexpr auto Is(const HloInstruction* instr) const {
return AppendImpl(HloInstructionIsImpl(instr));
}
constexpr auto IsConstant() const { return WithOpcode(HloOpcode::kConstant); }
constexpr auto IsConstantScalar() const {
return AppendImpl(
HloConstantScalarImpl< int>(false));
}
template <typename ScalarTy>
constexpr auto IsConstantScalar(const ScalarTy& val) const {
return AppendImpl(
HloConstantScalarImpl<ScalarTy>(val, false));
}
constexpr auto IsConstantEffectiveScalar() const {
return AppendImpl(
HloConstantScalarImpl< int>(true));
}
template <typename ScalarTy>
constexpr auto IsConstantEffectiveScalar(const ScalarTy& val) const {
return AppendImpl(
HloConstantScalarImpl<ScalarTy>(val, true));
}
constexpr auto IsNonConstant() const {
return WithoutOpcode(HloOpcode::kConstant);
}
template <typename ShapeType, typename ShapeImpl>
constexpr auto WithShape(
const ShapePattern<ShapeType, ShapeImpl>& shape) const {
return AppendImpl(
HloInstructionPatternShapeImpl<ShapeType, ShapeImpl>(shape));
}
constexpr auto WithShape(PrimitiveType ty, absl::Span<const int64_t> dims) {
return WithShape(Shape().WithElementType(ty).WithDims(dims));
}
constexpr auto WithShape(PrimitiveType ty, absl::Span<const int64_t> dims,
absl::Span<const int64_t> minor_to_major) {
return WithShape(
Shape().WithElementType(ty).WithDims(dims).WithLayout(minor_to_major));
}
template <typename Dummy = void>
constexpr auto WithShapeEqualTo(const ::xla::Shape* shape) const {
return WithShape(Shape().EqualTo(shape));
}
template <typename Dummy = void>
constexpr auto WithShapeCompatibleTo(const ::xla::Shape* shape) const {
return WithShape(Shape().CompatibleTo(shape));
}
constexpr auto WithElementType(PrimitiveType ty) {
return WithShape(Shape().WithElementType(ty));
}
template <typename OperandType, typename OperandImpl>
constexpr auto WithOperand(
int64_t operand_index,
const HloInstructionPattern<OperandType, OperandImpl>& operand) const {
return AppendImpl(
HloInstructionPatternOperandImpl<OperandType, OperandImpl>(
operand_index, operand));
}
template <typename OperandType, typename OperandImpl>
constexpr auto WithOperandIfPresent(
int64_t operand_index,
const HloInstructionPattern<OperandType, OperandImpl>& operand) const {
return AppendImpl(
HloInstructionPatternOperandIfPresentImpl<OperandType, OperandImpl>(
operand_index, operand));
}
template <typename OperandType1, typename OperandImpl1, typename OperandType2,
typename OperandImpl2>
constexpr auto WithBinaryOperandsAnyOrder(
const HloInstructionPattern<OperandType1, OperandImpl1>& op1,
const HloInstructionPattern<OperandType2, OperandImpl2>& op2) const {
return AppendImpl(
HloInstructionPatternBinaryOperandsAnyOrderImpl<
OperandType1, OperandImpl1, OperandType2, OperandImpl2>(op1, op2));
}
constexpr auto WithFusionKind(HloInstruction::FusionKind kind) const {
return AppendImpl(HloInstructionPatternFusionKindImpl(kind));
}
constexpr auto WithTupleIndex(int64_t tuple_index) const {
return AppendImpl(HloInstructionPatternTupleIndexImpl(tuple_index));
}
constexpr auto WithParameterNum(int64_t parameter_num) const {
return AppendImpl(HloInstructionPatternParameterNumImpl(parameter_num));
}
constexpr auto WithOneUse() const {
return AppendImpl(HloInstructionPatternOneUseImpl());
}
constexpr auto WithOneUser() const {
return AppendImpl(HloInstructionPatternOneUserImpl());
}
constexpr auto WithNumUser(int64_t user_num) const {
return AppendImpl(HloInstructionPatternNumUserImpl(user_num));
}
constexpr auto WithAtMostNumUser(int64_t user_num) const {
return AppendImpl(HloInstructionPatternAtMostNumUserImpl(user_num));
}
auto WithComparisonDirection(ComparisonDirection direction) const {
return AppendImpl(HloInstructionPatternComparisonDirectionImpl(direction));
}
auto WithConvDnums(absl::string_view dnums) const {
return AppendImpl(HloInstructionPatternConvDnumsImpl(dnums));
}
auto WithConvDnums(ConvolutionDimensionNumbers dnums) const {
return AppendImpl(HloInstructionPatternConvDnumsImpl(dnums));
}
auto WithPredicate(HloPredicate fn) const {
return AppendImpl(HloInstructionPredicateImpl(std::move(fn)));
}
auto WithContractingDims(
absl::Span<const int64_t> lhs_contracting_dims,
absl::Span<const int64_t> rhs_contracting_dims) const {
return AppendImpl(HloInstructionContractingDimsImpl(lhs_contracting_dims,
rhs_contracting_dims));
}
auto WithReplicaGroups(
std::vector<std::vector<int64_t>> replica_groups) const {
return AppendImpl(
HloInstructionReplicaGroupsImpl(std::move(replica_groups)));
}
auto WithSharding(absl::string_view sharding) const {
return AppendImpl(
HloInstructionShardingImpl(ParseSharding(sharding).value()));
}
auto WithControlDeps(absl::Span<HloInstruction* const> preds,
absl::Span<HloInstruction* const> succs) {
return AppendImpl(HloInstructionControlDepsImpl(preds, succs));
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
impl_.DescribeTo(os, indent);
}
private:
Impl impl_;
HloInstructionType** matched_inst_;
};
template <typename Item, typename... Patterns>
struct AnyOfImpl {
auto operator()(const Patterns&... patterns) const {
return AnyOfPattern<typename std::remove_const<Item>::type, Patterns...>(
patterns...);
}
};
template <typename... Patterns>
struct AnyOfImpl<HloInstruction, Patterns...> {
auto operator()(const Patterns&... patterns) const {
auto any_of = AnyOfPattern<HloInstruction, Patterns...>(patterns...);
return HloInstructionPattern<HloInstruction, decltype(any_of)>(
std::move(any_of), nullptr);
}
};
}
template <typename Item, typename... Patterns>
auto AnyOf(const Patterns&... patterns) {
return detail::AnyOfImpl<Item, Patterns...>()(patterns...);
}
inline constexpr auto Op(const ::xla::HloInstruction** matched_inst = nullptr) {
return detail::HloInstructionPattern<const ::xla::HloInstruction,
detail::HloInstructionPatternBaseImpl>(
detail::HloInstructionPatternBaseImpl(), matched_inst);
}
inline constexpr auto Op(::xla::HloInstruction** matched_inst) {
return detail::HloInstructionPattern<::xla::HloInstruction,
detail::HloInstructionPatternBaseImpl>(
detail::HloInstructionPatternBaseImpl(), matched_inst);
}
#define XLA_NULLOP_PATTERN(NAME) \
inline auto NAME() { return Op().WithOpcode(HloOpcode::k##NAME); } \
\
template <typename HloInstructionType> \
inline auto NAME(HloInstructionType** matched_inst) { \
return Op(matched_inst).WithOpcode(HloOpcode::k##NAME); \
}
XLA_NULLOP_PATTERN(Constant)
XLA_NULLOP_PATTERN(Parameter)
XLA_NULLOP_PATTERN(Iota)
XLA_NULLOP_PATTERN(Rng)
XLA_NULLOP_PATTERN(PartitionId)
XLA_NULLOP_PATTERN(ReplicaId)
#undef XLA_NULLOP_PATTERN
#define XLA_UNOP_PATTERN(NAME) \
inline auto NAME() { return Op().WithOpcode(HloOpcode::k##NAME); } \
\
template <typename HloInstructionType> \
inline auto NAME(HloInstructionType** matched_inst) { \
return Op(matched_inst).WithOpcode(HloOpcode::k##NAME); \
} \
\
template <typename Arg> \
inline auto NAME(Arg&& arg) { \
return Op() \
.WithOpcode(HloOpcode::k##NAME) \
.WithOperand(0, std::forward<Arg>(arg)); \
} \
\
template <typename HloInstructionType, typename Arg> \
inline auto NAME(HloInstructionType** matched_inst, Arg&& arg) { \
return Op(matched_inst) \
.WithOpcode(HloOpcode::k##NAME) \
.WithOperand(0, std::forward<Arg>(arg)); \
}
XLA_UNOP_PATTERN(Abs)
XLA_UNOP_PATTERN(RoundNearestAfz)
XLA_UNOP_PATTERN(Bitcast)
XLA_UNOP_PATTERN(BitcastConvert)
XLA_UNOP_PATTERN(Broadcast)
XLA_UNOP_PATTERN(Cbrt)
XLA_UNOP_PATTERN(Ceil)
XLA_UNOP_PATTERN(Convert)
XLA_UNOP_PATTERN(Copy)
XLA_UNOP_PATTERN(Cos)
XLA_UNOP_PATTERN(AllReduceStart)
XLA_UNOP_PATTERN(AllReduceDone)
XLA_UNOP_PATTERN(AllToAll)
XLA_UNOP_PATTERN(AsyncDone)
XLA_UNOP_PATTERN(CollectiveBroadcast)
XLA_UNOP_PATTERN(CollectivePermute)
XLA_UNOP_PATTERN(CollectivePermuteStart)
XLA_UNOP_PATTERN(CollectivePermuteDone)
XLA_UNOP_PATTERN(Domain)
XLA_UNOP_PATTERN(Erf)
XLA_UNOP_PATTERN(Exp)
XLA_UNOP_PATTERN(Expm1)
XLA_UNOP_PATTERN(Fft)
XLA_UNOP_PATTERN(Floor)
XLA_UNOP_PATTERN(GetTupleElement)
XLA_UNOP_PATTERN(Imag)
XLA_UNOP_PATTERN(Infeed)
XLA_UNOP_PATTERN(IsFinite)
XLA_UNOP_PATTERN(Log)
XLA_UNOP_PATTERN(Logistic)
XLA_UNOP_PATTERN(Not)
XLA_UNOP_PATTERN(Negate)
XLA_UNOP_PATTERN(OptimizationBarrier)
XLA_UNOP_PATTERN(Real)
XLA_UNOP_PATTERN(Recv)
XLA_UNOP_PATTERN(RecvDone)
XLA_UNOP_PATTERN(ReducePrecision)
XLA_UNOP_PATTERN(Reshape)
XLA_UNOP_PATTERN(Reverse)
XLA_UNOP_PATTERN(Rsqrt)
XLA_UNOP_PATTERN(SendDone)
XLA_UNOP_PATTERN(Sign)
XLA_UNOP_PATTERN(Sin)
XLA_UNOP_PATTERN(Slice)
XLA_UNOP_PATTERN(Sqrt)
XLA_UNOP_PATTERN(Tan)
XLA_UNOP_PATTERN(Tanh)
XLA_UNOP_PATTERN(Transpose)
XLA_UNOP_PATTERN(While)
#undef XLA_UNOP_PATTERN
#define XLA_BINOP_PATTERN(NAME) \
inline auto NAME() { return Op().WithOpcode(HloOpcode::k##NAME); } \
\
template <typename Lhs, typename Rhs> \
inline auto NAME(Lhs&& lhs, Rhs&& rhs) { \
return Op() \
.WithOpcode(HloOpcode::k##NAME) \
.WithOperand(0, std::forward<Lhs>(lhs)) \
.WithOperand(1, std::forward<Rhs>(rhs)); \
} \
\
template <typename HloInstructionType, typename Lhs, typename Rhs> \
inline auto NAME(HloInstructionType** matched_inst, Lhs&& lhs, Rhs&& rhs) { \
return Op(matched_inst) \
.WithOpcode(HloOpcode::k##NAME) \
.WithOperand(0, std::forward<Lhs>(lhs)) \
.WithOperand(1, std::forward<Rhs>(rhs)); \
}
#define XLA_COMMUTATIVE_BINOP_PATTERN(NAME) \
XLA_BINOP_PATTERN(NAME) \
\
template <typename HloInstructionType, typename Lhs, typename Rhs> \
inline auto NAME##AnyOrder(HloInstructionType** matched_inst, Lhs&& lhs, \
Rhs&& rhs) { \
return Op(matched_inst) \
.WithOpcode(HloOpcode::k##NAME) \
.WithBinaryOperandsAnyOrder(std::forward<Lhs>(lhs), \
std::forward<Rhs>(rhs)); \
} \
template <typename Lhs, typename Rhs> \
inline auto NAME##AnyOrder(Lhs&& lhs, Rhs&& rhs) { \
return NAME##AnyOrder<const HloInstruction>( \
nullptr, std::forward<Lhs>(lhs), std::forward<Rhs>(rhs)); \
}
XLA_COMMUTATIVE_BINOP_PATTERN(Add)
XLA_BINOP_PATTERN(Atan2)
XLA_BINOP_PATTERN(Divide)
XLA_BINOP_PATTERN(Complex)
XLA_BINOP_PATTERN(Compare)
XLA_BINOP_PATTERN(Convolution)
XLA_BINOP_PATTERN(Dot)
XLA_BINOP_PATTERN(Gather)
XLA_COMMUTATIVE_BINOP_PATTERN(Maximum)
XLA_COMMUTATIVE_BINOP_PATTERN(Minimum)
XLA_COMMUTATIVE_BINOP_PATTERN(Multiply)
XLA_BINOP_PATTERN(Outfeed)
XLA_BINOP_PATTERN(Pad)
XLA_BINOP_PATTERN(Power)
XLA_BINOP_PATTERN(Remainder)
XLA_BINOP_PATTERN(Send)
XLA_BINOP_PATTERN(Subtract)
XLA_COMMUTATIVE_BINOP_PATTERN(And)
XLA_COMMUTATIVE_BINOP_PATTERN(Or)
XLA_BINOP_PATTERN(ShiftLeft)
XLA_BINOP_PATTERN(ShiftRightArithmetic)
XLA_BINOP_PATTERN(ShiftRightLogical)
XLA_COMMUTATIVE_BINOP_PATTERN(Xor)
#undef XLA_COMMUTATIVE_BINOP_PATTERN
#undef XLA_BINOP_PATTERN
#define XLA_TERNOP_PATTERN(NAME) \
inline auto NAME() { return Op().WithOpcode(HloOpcode::k##NAME); } \
\
template <typename Arg0, typename Arg1, typename Arg2> \
inline auto NAME(Arg0&& arg0, Arg1&& arg1, Arg2&& arg2) { \
return Op() \
.WithOpcode(HloOpcode::k##NAME) \
.WithOperand(0, std::forward<Arg0>(arg0)) \
.WithOperand(1, std::forward<Arg1>(arg1)) \
.WithOperand(2, std::forward<Arg2>(arg2)); \
} \
\
template <typename HloInstructionType, typename Arg0, typename Arg1, \
typename Arg2> \
inline auto NAME(HloInstructionType** matched_inst, Arg0&& arg0, \
Arg1&& arg1, Arg2&& arg2) { \
return Op(matched_inst) \
.WithOpcode(HloOpcode::k##NAME) \
.WithOperand(0, std::forward<Arg0>(arg0)) \
.WithOperand(1, std::forward<Arg1>(arg1)) \
.WithOperand(2, std::forward<Arg2>(arg2)); \
}
XLA_TERNOP_PATTERN(Clamp);
XLA_TERNOP_PATTERN(Select);
XLA_TERNOP_PATTERN(SelectAndScatter);
#undef XLA_TERNOP_PATTERN
namespace detail {
template <typename Matcher, typename FirstArg>
inline auto WithOperands(Matcher&& m, int64_t operand_num,
FirstArg&& first_arg) {
return m.WithOperand(operand_num, std::forward<FirstArg>(first_arg));
}
template <typename Matcher, typename FirstArg, typename... Args>
inline auto WithOperands(Matcher&& m, int64_t operand_num, FirstArg&& first_arg,
Args&&... args) {
return WithOperands(
m.WithOperand(operand_num, std::forward<FirstArg>(first_arg)),
operand_num + 1, std::forward<Args>(args)...);
}
}
#define XLA_VARIADIC_OP_PATTERN(NAME) \
inline auto NAME() { return Op().WithOpcode(HloOpcode::k##NAME); } \
\
template <typename... Args> \
inline auto NAME(Args&&... args) { \
return detail::WithOperands( \
Op().WithOpcode(HloOpcode::k##NAME).WithNumOperands(sizeof...(Args)), \
0, std::forward<Args>(args)...); \
} \
\
template <typename HloInstructionType, typename... Args> \
inline auto NAME(HloInstructionType** matched_inst, Args&&... args) { \
return detail::WithOperands(Op(matched_inst) \
.WithOpcode(HloOpcode::k##NAME) \
.WithNumOperands(sizeof...(Args)), \
0, \
std::forward<Args>(args)...); \
} \
\
template <typename HloInstructionType> \
inline auto NAME(HloInstructionType** matched_inst) { \
return Op(matched_inst).WithOpcode(HloOpcode::k##NAME); \
}
XLA_VARIADIC_OP_PATTERN(AfterAll);
XLA_VARIADIC_OP_PATTERN(AllGather)
XLA_VARIADIC_OP_PATTERN(AllReduce)
XLA_VARIADIC_OP_PATTERN(AsyncStart)
XLA_VARIADIC_OP_PATTERN(Concatenate);
XLA_VARIADIC_OP_PATTERN(Conditional);
XLA_VARIADIC_OP_PATTERN(DynamicSlice)
XLA_VARIADIC_OP_PATTERN(DynamicUpdateSlice)
XLA_VARIADIC_OP_PATTERN(Fusion);
XLA_VARIADIC_OP_PATTERN(Map)
XLA_VARIADIC_OP_PATTERN(Reduce);
XLA_VARIADIC_OP_PATTERN(ReduceScatter)
XLA_VARIADIC_OP_PATTERN(ReduceWindow)
XLA_VARIADIC_OP_PATTERN(Scatter);
XLA_VARIADIC_OP_PATTERN(Sort);
XLA_VARIADIC_OP_PATTERN(Tuple);
XLA_VARIADIC_OP_PATTERN(Call);
inline auto CustomCall() { return Op().WithOpcode(HloOpcode::kCustomCall); }
template <typename HloInstructionType>
auto CustomCall(HloInstructionType** matched_inst) {
return Op(matched_inst).WithOpcode(HloOpcode::kCustomCall);
}
template <
typename Arg0, typename... Args,
typename std::enable_if<
!std::is_convertible<Arg0, absl::string_view>::value &&
!std::is_convertible<Arg0, HloInstruction**>::value &&
!std::is_convertible<Arg0, const HloInstruction**>::value>::type* =
nullptr>
auto CustomCall(Arg0&& arg0, Args&&... args) {
return detail::WithOperands(CustomCall().WithNumOperands(sizeof...(Args) + 1),
0, std::forward<Arg0>(arg0),
std::forward<Args>(args)...);
}
template <typename... Args>
auto CustomCall(absl::Span<const absl::string_view> custom_call_targets,
Args&&... args) {
return CustomCall(std::forward<Args>(args)...)
.WithCustomCallTarget(custom_call_targets);
}
template <typename HloInstructionType, typename Arg0, typename... Args,
typename std::enable_if<!std::is_convertible<
Arg0, absl::string_view>::value>::type* = nullptr>
auto CustomCall(HloInstructionType** matched_inst, Arg0&& arg0,
Args&&... args) {
return detail::WithOperands(
CustomCall(matched_inst).WithNumOperands(sizeof...(Args) + 1),
0, std::forward<Arg0>(arg0), std::forward<Args>(args)...);
}
template <typename HloInstructionType, typename... Args>
auto CustomCall(HloInstructionType** matched_inst,
absl::Span<const absl::string_view> custom_call_targets,
Args&&... args) {
return CustomCall(matched_inst, std::forward<Args>(args)...)
.WithCustomCallTarget(custom_call_targets);
}
#define XLA_COMPARE_PATTERN(NAME) \
inline auto NAME() { \
return Op() \
.WithOpcode(HloOpcode::kCompare) \
.WithComparisonDirection(ComparisonDirection::k##NAME); \
} \
\
template <typename Lhs, typename Rhs> \
inline auto NAME(Lhs&& lhs, Rhs&& rhs) { \
return Op() \
.WithOpcode(HloOpcode::kCompare) \
.WithOperand(0, std::forward<Lhs>(lhs)) \
.WithOperand(1, std::forward<Rhs>(rhs)) \
.WithComparisonDirection(ComparisonDirection::k##NAME); \
} \
\
template <typename HloInstructionType, typename Lhs, typename Rhs> \
inline auto NAME(HloInstructionType** matched_inst, Lhs&& lhs, Rhs&& rhs) { \
return Op(matched_inst) \
.WithOpcode(HloOpcode::kCompare) \
.WithOperand(0, std::forward<Lhs>(lhs)) \
.WithOperand(1, std::forward<Rhs>(rhs)) \
.WithComparisonDirection(ComparisonDirection::k##NAME); \
}
#define XLA_COMMUTATIVE_COMPARE_PATTERN(NAME) \
XLA_COMPARE_PATTERN(NAME) \
\
template <typename HloInstructionType, typename Lhs, typename Rhs> \
inline auto NAME##AnyOrder(HloInstructionType** matched_inst, Lhs&& lhs, \
Rhs&& rhs) { \
return Op(matched_inst) \
.WithOpcode(HloOpcode::kCompare) \
.WithBinaryOperandsAnyOrder(std::forward<Lhs>(lhs), \
std::forward<Rhs>(rhs)); \
} \
template <typename Lhs, typename Rhs> \
inline auto NAME##AnyOrder(Lhs&& lhs, Rhs&& rhs) { \
return NAME##AnyOrder<const HloInstruction>( \
nullptr, std::forward<Lhs>(lhs), std::forward<Rhs>(rhs)); \
}
XLA_COMMUTATIVE_COMPARE_PATTERN(Eq);
XLA_COMMUTATIVE_COMPARE_PATTERN(Ne);
XLA_COMPARE_PATTERN(Ge);
XLA_COMPARE_PATTERN(Gt);
XLA_COMPARE_PATTERN(Le);
XLA_COMPARE_PATTERN(Lt);
inline auto NonConstant() { return Op().IsNonConstant(); }
template <typename HloInstructionType>
inline auto NonConstant(HloInstructionType** matched_inst) {
return Op(matched_inst).IsNonConstant();
}
template <typename Arg>
inline auto GetTupleElement(Arg&& arg, int64_t tuple_index) {
return Op()
.WithOpcode(HloOpcode::kGetTupleElement)
.WithOperand(0, std::forward<Arg>(arg))
.WithTupleIndex(tuple_index);
}
template <typename HloInstructionType, typename Arg>
inline auto GetTupleElement(HloInstructionType** matched_inst, Arg&& arg,
int64_t tuple_index) {
return Op(matched_inst)
.WithOpcode(HloOpcode::kGetTupleElement)
.WithOperand(0, std::forward<Arg>(arg))
.WithTupleIndex(tuple_index);
}
inline auto Parameter(int64_t parameter_num) {
return Op().WithOpcode(HloOpcode::kParameter).WithParameterNum(parameter_num);
}
template <typename HloInstructionType>
inline auto Parameter(HloInstructionType** matched_inst,
int64_t parameter_num) {
return Op(matched_inst)
.WithOpcode(HloOpcode::kParameter)
.WithParameterNum(parameter_num);
}
inline auto ConstantScalar() { return Op().IsConstantScalar(); }
template <typename HloInstructionType>
inline auto ConstantScalar(HloInstructionType** matched_inst) {
return Op(matched_inst).IsConstantScalar();
}
template <typename ScalarTy>
inline auto ConstantScalar(ScalarTy val) {
return Op().IsConstantScalar(val);
}
template <typename HloInstructionType, typename ScalarTy>
inline auto ConstantScalar(HloInstructionType** matched_inst, ScalarTy val) {
return Op(matched_inst).IsConstantScalar(val);
}
inline auto ConstantEffectiveScalar() {
return Op().IsConstantEffectiveScalar();
}
template <typename HloInstructionType>
inline auto ConstantEffectiveScalar(HloInstructionType** matched_inst) {
return Op(matched_inst).IsConstantEffectiveScalar();
}
template <typename ScalarTy>
inline auto ConstantEffectiveScalar(ScalarTy val) {
return Op().IsConstantEffectiveScalar(val);
}
template <typename HloInstructionType, typename ScalarTy>
inline auto ConstantEffectiveScalar(HloInstructionType** matched_inst,
ScalarTy val) {
return Op(matched_inst).IsConstantEffectiveScalar(val);
}
namespace detail {
class InstructionPatternInterface {
public:
virtual ~InstructionPatternInterface() = default;
virtual bool Match(::xla::HloInstruction* instr,
MatchOption option) const = 0;
virtual void DescribeTo(std::ostream* os, int64_t indent) const = 0;
};
template <typename Pattern>
class TypedInstructionPattern : public InstructionPatternInterface {
public:
explicit TypedInstructionPattern(Pattern pattern)
: pattern_(std::move(pattern)) {}
bool Match(::xla::HloInstruction* instr, MatchOption option) const override {
return pattern_.Match(instr, option);
}
void DescribeTo(std::ostream* os, int64_t indent) const override {
pattern_.DescribeTo(os, indent);
}
private:
Pattern pattern_;
};
class HloInstructionPatternSharedImpl {
public:
template <typename Pattern>
explicit HloInstructionPatternSharedImpl(Pattern pattern)
: pattern_(std::make_shared<TypedInstructionPattern<Pattern>>(
std::move(pattern))) {}
bool Match(::xla::HloInstruction* instr, MatchOption option) const {
return pattern_->Match(instr, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
pattern_->DescribeTo(os, indent);
}
private:
std::shared_ptr<InstructionPatternInterface> pattern_;
};
}
template <typename HloInstructionType, typename OriginalImpl>
inline auto SharedSubpattern(
detail::HloInstructionPattern<HloInstructionType, OriginalImpl> pattern) {
auto impl = detail::HloInstructionPatternSharedImpl(std::move(pattern));
return detail::HloInstructionPattern<HloInstructionType, decltype(impl)>(
std::move(impl), nullptr);
}
}
}
#undef EXPLAIN
#pragma pop_macro("EXPLAIN")
#endif | #include "xla/service/pattern_matcher.h"
#include <memory>
#include <sstream>
#include <string>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
namespace m = match;
using PatternMatcherTest = HloTestBase;
TEST_F(PatternMatcherTest, AddOp) {
constexpr char kModuleStr[] = R"(HloModule two_plus_two_module
ENTRY %two_plus_two_computation () -> f32[] {
%two = f32[] constant(2)
ROOT %two_plus_two = f32[] add(f32[] %two, f32[] %two)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
const HloInstruction* matched_inst;
HloInstruction* matched_operand;
Shape* matched_shape;
ASSERT_TRUE(Match(
hlo_module->entry_computation()->root_instruction(),
match::Op(&matched_inst)
.WithName("two_plus_two")
.WithOpcode(HloOpcode::kAdd)
.WithShape(match::Shape(&matched_shape).IsDenseArray())
.WithOperand(
0,
match::Op(&matched_operand).WithOpcode(HloOpcode::kConstant))));
ASSERT_NE(matched_inst, nullptr);
EXPECT_EQ(matched_inst->name(), "two_plus_two");
EXPECT_EQ(matched_inst->opcode(), HloOpcode::kAdd);
EXPECT_TRUE(Match(hlo_module->entry_computation()->root_instruction(),
match::Add(match::Constant(), match::Constant())));
EXPECT_FALSE(Match(hlo_module->entry_computation()->root_instruction(),
match::Op().WithName("bad_name")));
matched_inst = nullptr;
EXPECT_FALSE(Match(hlo_module->entry_computation()->root_instruction(),
match::Multiply(&matched_inst, match::Op(), match::Op())));
}
TEST_F(PatternMatcherTest, ScalarShape) {
auto scalar_shape = ShapeUtil::MakeShape(F32, {});
Shape* matched_shape;
EXPECT_TRUE(Match(&scalar_shape, match::Shape(&matched_shape).IsScalar()));
EXPECT_EQ(matched_shape, &scalar_shape);
EXPECT_TRUE(Match(&scalar_shape, match::Shape().IsArray()));
EXPECT_TRUE(Match(&scalar_shape, match::Shape().IsDenseArray()));
EXPECT_FALSE(Match(&scalar_shape, match::Shape().IsTuple()));
EXPECT_TRUE(Match(&scalar_shape, match::Shape().WithElementType(F32)));
EXPECT_TRUE(Match(&scalar_shape, match::Shape().WithRank(0)));
EXPECT_FALSE(Match(
&scalar_shape,
match::Shape().WithSubshape({0}, match::Shape()).WithElementType(F32)));
}
TEST_F(PatternMatcherTest, DenseArrayShape) {
auto array_shape = ShapeUtil::MakeShape(F32, {2, 3, 4});
Shape* matched_shape;
EXPECT_TRUE(Match(&array_shape, match::Shape(&matched_shape).IsArray()));
EXPECT_EQ(matched_shape, &array_shape);
EXPECT_TRUE(Match(&array_shape, match::Shape().IsDenseArray()));
EXPECT_FALSE(Match(&array_shape, match::Shape().IsScalar()));
EXPECT_FALSE(Match(&array_shape, match::Shape().IsTuple()));
EXPECT_TRUE(Match(&array_shape, match::Shape().WithElementType(F32)));
EXPECT_TRUE(Match(&array_shape, match::Shape().WithRank(3)));
EXPECT_FALSE(
Match(&array_shape, match::Shape().WithSubshape({0}, match::Shape())));
EXPECT_TRUE(Match(&array_shape, match::Shape().WithLayout({2, 1, 0})));
EXPECT_FALSE(Match(&array_shape, match::Shape().WithLayout({0, 1, 2})));
Layout* matched_layout;
EXPECT_TRUE(Match(&array_shape,
match::Shape().WithLayout(match::Layout(&matched_layout))));
EXPECT_EQ(matched_layout, &array_shape.layout());
EXPECT_TRUE(Match(&array_shape, match::Shape().IsDenseArray()));
}
TEST_F(PatternMatcherTest, DenseArrayShapeWithLayout) {
auto array_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 2, 3}, {1, 2, 0});
Shape* matched_shape;
EXPECT_TRUE(
Match(&array_shape, match::Shape(&matched_shape).WithLayout({1, 2, 0})));
EXPECT_EQ(matched_shape, &array_shape);
EXPECT_FALSE(Match(&array_shape, match::Shape().WithLayout({2, 0, 1})));
Layout* matched_layout;
EXPECT_TRUE(
Match(&array_shape,
match::Shape().WithLayout(
match::Layout(&matched_layout).WithMinorToMajor({1, 2, 0}))));
EXPECT_EQ(matched_layout, &array_shape.layout());
}
TEST_F(PatternMatcherTest, TupleShape) {
auto tuple_shape = ShapeUtil::MakeTupleShape({
ShapeUtil::MakeShape(F32, {1, 2, 3}),
ShapeUtil::MakeShape(S32, {4, 5}),
});
EXPECT_TRUE(Match(&tuple_shape, match::Shape().IsTuple()));
EXPECT_FALSE(Match(&tuple_shape, match::Shape().IsArray()));
EXPECT_FALSE(Match(&tuple_shape, match::Shape().IsScalar()));
Shape* subshape;
ASSERT_TRUE(Match(
&tuple_shape,
match::Shape().WithSubshape(
{0}, match::Shape(&subshape).WithElementType(F32).WithRank(3))));
ASSERT_NE(subshape, nullptr);
EXPECT_TRUE(
ShapeUtil::Equal(*subshape, ShapeUtil::GetSubshape(tuple_shape, {0})));
EXPECT_TRUE(Match(&tuple_shape,
match::Shape().WithSubshape(
{0}, match::Shape().EqualTo(
&ShapeUtil::GetSubshape(tuple_shape, {0})))));
EXPECT_FALSE(Match(&tuple_shape,
match::Shape().WithSubshape(
{0}, match::Shape().EqualTo(
&ShapeUtil::GetSubshape(tuple_shape, {1})))));
ASSERT_TRUE(Match(
&tuple_shape,
match::Shape().WithSubshape(
{1}, match::Shape(&subshape).WithElementType(S32).WithRank(2))));
ASSERT_NE(subshape, nullptr);
EXPECT_TRUE(
ShapeUtil::Equal(*subshape, ShapeUtil::GetSubshape(tuple_shape, {1})));
EXPECT_TRUE(Match(&tuple_shape,
match::Shape().WithSubshape(
{1}, match::Shape().EqualTo(
&ShapeUtil::GetSubshape(tuple_shape, {1})))));
EXPECT_FALSE(Match(&tuple_shape,
match::Shape().WithSubshape(
{1}, match::Shape().EqualTo(
&ShapeUtil::GetSubshape(tuple_shape, {0})))));
EXPECT_FALSE(
Match(&tuple_shape, match::Shape().WithSubshape({2}, match::Shape())));
EXPECT_FALSE(
Match(&tuple_shape, match::Shape().WithSubshape({0, 0}, match::Shape())));
}
TEST_F(PatternMatcherTest, FusionKind) {
constexpr char kModuleStr[] = R"(
HloModule test_module
fused_computation {
ROOT fp0 = f32[] parameter(0)
}
ENTRY while.v11 {
p0 = f32[] parameter(0)
ROOT fusion = f32[] fusion(p0), kind=kLoop, calls=fused_computation
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
EXPECT_TRUE(Match(
root, match::Op().WithFusionKind(HloInstruction::FusionKind::kLoop)));
EXPECT_FALSE(Match(
root, match::Op().WithFusionKind(HloInstruction::FusionKind::kInput)));
EXPECT_FALSE(Match(root->operand(0), match::Op().WithFusionKind(
HloInstruction::FusionKind::kLoop)));
}
TEST_F(PatternMatcherTest, GetTupleElement) {
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY while.v11 {
p0 = (f32[], f32[], f32[]) parameter(0)
ROOT gte = f32[] get-tuple-element(p0), index=1
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
EXPECT_FALSE(Match(root, match::Op().WithTupleIndex(0)));
EXPECT_TRUE(Match(root, match::Op().WithTupleIndex(1)));
EXPECT_FALSE(Match(root, match::Op().WithTupleIndex(2)));
EXPECT_FALSE(Match(root, match::GetTupleElement(match::Op(), 0)));
EXPECT_TRUE(Match(root, match::GetTupleElement(match::Op(), 1)));
}
TEST_F(PatternMatcherTest, AnyOf) {
constexpr char kModuleStr[] = R"(
HloModule test_module ENTRY test { ROOT constant = f16[] constant(1) })";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
EXPECT_TRUE(
Match(root, match::AnyOf<HloInstruction>(match::ConstantScalar(0),
match::ConstantScalar(1))));
EXPECT_TRUE(
Match(root, match::AnyOf<HloInstruction>(match::ConstantScalar(1),
match::ConstantScalar(0))));
EXPECT_FALSE(
Match(root, match::AnyOf<HloInstruction>(match::ConstantScalar(0),
match::ConstantScalar(2))));
}
TEST_F(PatternMatcherTest, AnyOfInstructionIsInstructionPattern) {
constexpr char kModuleStr[] = R"(
HloModule test_module ENTRY test { ROOT constant = f16[] constant(1) })";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
EXPECT_TRUE(
Match(root, match::AnyOf<HloInstruction>(match::ConstantScalar(0),
match::ConstantScalar(1))));
EXPECT_FALSE(
Match(root, match::AnyOf<HloInstruction>(match::ConstantScalar(0),
match::ConstantScalar(1))
.WithName("foo")));
}
TEST_F(PatternMatcherTest, ConstantScalar) {
using match::ConstantEffectiveScalar;
using match::ConstantScalar;
using match::Op;
using match::Tuple;
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
a = s32[] constant(1)
b = s32[1,1] constant({{2}})
c = s32[1,2] constant({{2,2}})
d = f32[] constant(1)
e = f32[] constant(1.25)
ROOT tuple = (s32[], s32[1,1], s32[1,2], f32[], f32[]) tuple(a,b,c,d,e)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
const HloInstruction* a = root->operand(0);
const HloInstruction* b = root->operand(1);
const HloInstruction* c = root->operand(2);
const HloInstruction* d = root->operand(3);
const HloInstruction* e = root->operand(4);
EXPECT_TRUE(Match(a, ConstantScalar()));
EXPECT_TRUE(Match(a, ConstantScalar(1)));
EXPECT_TRUE(Match(a, ConstantEffectiveScalar()));
EXPECT_TRUE(Match(a, ConstantEffectiveScalar(1)));
EXPECT_FALSE(Match(a, ConstantScalar(2)));
EXPECT_FALSE(Match(a, ConstantScalar(2.01)));
EXPECT_FALSE(Match(a, ConstantEffectiveScalar(2)));
EXPECT_FALSE(Match(a, ConstantEffectiveScalar(1.01)));
EXPECT_FALSE(Match(b, ConstantScalar()));
EXPECT_FALSE(Match(b, ConstantScalar(2)));
EXPECT_TRUE(Match(b, ConstantEffectiveScalar()));
EXPECT_TRUE(Match(b, ConstantEffectiveScalar(2)));
EXPECT_FALSE(Match(c, ConstantScalar()));
EXPECT_FALSE(Match(c, ConstantScalar(2)));
EXPECT_FALSE(Match(c, ConstantEffectiveScalar()));
EXPECT_FALSE(Match(c, ConstantEffectiveScalar(2)));
EXPECT_TRUE(Match(d, ConstantScalar(1)));
EXPECT_TRUE(Match(d, ConstantEffectiveScalar(1)));
EXPECT_TRUE(Match(d, ConstantScalar(1.0)));
EXPECT_TRUE(Match(d, ConstantEffectiveScalar(1.0)));
EXPECT_TRUE(Match(e, ConstantScalar(1.25f)));
EXPECT_TRUE(Match(e, ConstantScalar(1.25)));
EXPECT_TRUE(Match(e, ConstantEffectiveScalar(1.25)));
EXPECT_FALSE(Match(e, ConstantScalar(1)));
EXPECT_FALSE(Match(e, ConstantEffectiveScalar(1)));
const HloInstruction* instr = nullptr;
EXPECT_TRUE(Match(a, ConstantScalar(&instr)));
EXPECT_EQ(instr, a);
instr = nullptr;
EXPECT_TRUE(Match(a, ConstantScalar(&instr, 1)));
EXPECT_EQ(instr, a);
instr = nullptr;
EXPECT_TRUE(Match(a, ConstantEffectiveScalar(&instr)));
EXPECT_EQ(instr, a);
instr = nullptr;
EXPECT_TRUE(Match(a, ConstantEffectiveScalar(&instr, 1)));
EXPECT_EQ(instr, a);
}
TEST_F(PatternMatcherTest, MultiplyAnyOrder) {
using match::ConstantScalar;
using match::MultiplyAnyOrder;
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
lhs = f16[] constant(42)
rhs = f16[] constant(52)
ROOT multiply = f16[] multiply(lhs, rhs)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
const HloInstruction* instr;
EXPECT_TRUE(Match(
root, MultiplyAnyOrder(&instr, ConstantScalar(42), ConstantScalar(52))));
EXPECT_TRUE(Match(
root, MultiplyAnyOrder(&instr, ConstantScalar(52), ConstantScalar(42))));
EXPECT_TRUE(Match(
root, MultiplyAnyOrder(&instr, ConstantScalar(42), ConstantScalar(52))
.IsNonConstant()));
EXPECT_TRUE(
Match(root, MultiplyAnyOrder(ConstantScalar(42), ConstantScalar(52))
.IsNonConstant()));
}
TEST_F(PatternMatcherTest, AnyOfShortCircuit) {
using match::AnyOf;
using match::Multiply;
using match::Op;
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
lhs = f16[] constant(42)
rhs = f16[] constant(52)
ROOT multiply = f16[] multiply(lhs, rhs)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
{
const HloInstruction* mul = nullptr;
const HloInstruction* any = nullptr;
ASSERT_TRUE(Match(
root, AnyOf<HloInstruction>(Multiply(&mul, Op(), Op()), Op(&any))));
EXPECT_NE(nullptr, mul);
EXPECT_EQ(nullptr, any);
}
{
const HloInstruction* mul = nullptr;
const HloInstruction* any = nullptr;
ASSERT_TRUE(Match(
root, AnyOf<HloInstruction>(Op(&any), Multiply(&mul, Op(), Op()))));
EXPECT_NE(nullptr, any);
EXPECT_EQ(nullptr, mul);
}
}
TEST_F(PatternMatcherTest, AllOf) {
using match::AllOf;
using match::Broadcast;
using match::Constant;
using match::Op;
constexpr char kModuleStr[] = R"(
HloModule test_module ENTRY test { ROOT constant = f16[] constant(1) })";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
auto f16_scalar = ShapeUtil::MakeShape(F16, {});
auto f16_pattern = Constant().WithShapeEqualTo(&f16_scalar);
auto f16_compatible_pattern = Constant().WithShapeCompatibleTo(&f16_scalar);
auto scalar_pattern = Constant().WithShape(match::Shape().IsScalar());
ASSERT_TRUE(Match(root, scalar_pattern));
ASSERT_TRUE(Match(root, f16_pattern));
ASSERT_TRUE(Match(root, f16_compatible_pattern));
EXPECT_TRUE(Match(root, AllOf<HloInstruction>(scalar_pattern, f16_pattern,
f16_compatible_pattern)));
EXPECT_TRUE(
Match(root, AllOf<HloInstruction>(f16_pattern, f16_compatible_pattern,
scalar_pattern)));
EXPECT_FALSE(
Match(root, AllOf<HloInstruction>(Broadcast(Op()), f16_pattern)));
EXPECT_FALSE(Match(
root, AllOf<HloInstruction>(Broadcast(Op()), f16_compatible_pattern)));
EXPECT_FALSE(
Match(root, AllOf<HloInstruction>(Broadcast(Op()), scalar_pattern)));
}
TEST_F(PatternMatcherTest, AllOfNoCaptureIfNotMatch) {
using match::AllOf;
using match::Broadcast;
using match::Constant;
using match::Op;
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
ROOT v = f16[] constant(42)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
const HloInstruction* constant = nullptr;
ASSERT_FALSE(
Match(root, AllOf<HloInstruction>(Constant(&constant), Broadcast(Op()))));
EXPECT_EQ(nullptr, constant);
ASSERT_TRUE(Match(root, Constant(&constant)));
EXPECT_NE(nullptr, constant);
}
TEST_F(PatternMatcherTest, TestNoCapture) {
using match::Constant;
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
ROOT v = f16[] constant(42)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
const HloInstruction* constant = nullptr;
ASSERT_TRUE(Match(root, Constant(&constant), {false}));
EXPECT_EQ(nullptr, constant);
}
TEST_F(PatternMatcherTest, TestCaptureMatchedSubPatternForAnyOf) {
using match::Add;
using match::AddAnyOrder;
using match::AnyOf;
using match::Op;
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
u = f16[] parameter(0)
v = f16[] parameter(1)
ROOT add = f16[] add(u, v)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
const HloInstruction* addend0 = nullptr;
const HloInstruction* addend1 = nullptr;
const HloInstruction* addend2 = nullptr;
auto add2_pattern = Add(Op(&addend0), Op(&addend1));
auto add3_pattern = AnyOf<HloInstruction>(
AddAnyOrder(add2_pattern, Op(&addend2)), add2_pattern, Op(&addend0));
ASSERT_TRUE(Match(root, add3_pattern));
EXPECT_NE(nullptr, addend0);
EXPECT_NE(nullptr, addend1);
EXPECT_EQ(nullptr, addend2);
}
TEST_F(PatternMatcherTest, TestConcat) {
using match::Concatenate;
using match::ConstantScalar;
using match::Op;
using match::Reshape;
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
c1 = u32[] constant(1)
c2 = u32[] constant(2)
c3 = u32[] constant(3)
c4 = u32[] constant(4)
r1 = u32[1] reshape(c1)
r2 = u32[1] reshape(c2)
r3 = u32[1] reshape(c3)
r4 = u32[1] reshape(c4)
ROOT concat = u32[4] concatenate(r1, r2, r3, r4), dimensions={0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
ASSERT_TRUE(Match(
root,
Concatenate(Reshape(ConstantScalar(1)), Reshape(ConstantScalar(2)),
Reshape(ConstantScalar(3)), Reshape(ConstantScalar(4)))));
ASSERT_FALSE(Match(
root,
Concatenate(Reshape(ConstantScalar(2)), Reshape(ConstantScalar(1)),
Reshape(ConstantScalar(3)), Reshape(ConstantScalar(4)))));
ASSERT_FALSE(Match(
root, Concatenate(Reshape(ConstantScalar(1)), Reshape(ConstantScalar(2)),
Reshape(ConstantScalar(3)))));
ASSERT_FALSE(Match(
root, Concatenate(Reshape(ConstantScalar(2)), Reshape(ConstantScalar(3)),
Reshape(ConstantScalar(4)))));
}
TEST_F(PatternMatcherTest, TestWithElementType) {
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
ROOT v = f16[] constant(42)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
EXPECT_TRUE(Match(root, m::Op().WithElementType(F16)));
EXPECT_FALSE(Match(root, m::Op().WithElementType(F32)));
}
TEST_F(PatternMatcherTest, TestWithOperandIfPresent) {
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
a = f16[] constant(42)
b = f16[] add(a, a)
ROOT root = tuple(a, b)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
auto* a = root->operand(0);
auto* b = root->operand(1);
EXPECT_TRUE(Match(a, m::Op().WithOperandIfPresent(0, m::Iota())));
EXPECT_TRUE(Match(b, m::Op().WithOperandIfPresent(0, m::Constant())));
EXPECT_TRUE(Match(b, m::Op().WithOperandIfPresent(1, m::Constant())));
EXPECT_FALSE(Match(b, m::Op().WithOperandIfPresent(0, m::Iota())));
EXPECT_TRUE(Match(b, m::Op().WithOperandIfPresent(2, m::Iota())));
EXPECT_TRUE(Match(b, m::Op().WithOperandIfPresent(3, m::Iota())));
}
TEST_F(PatternMatcherTest, TestWithPredicate) {
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
ROOT a = f16[] constant(42)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
EXPECT_TRUE(
Match(root, m::Op().WithPredicate([&](const HloInstruction* instr) {
return instr == root;
})));
EXPECT_FALSE(
Match(root, m::Op().WithPredicate([&](const HloInstruction* instr) {
return instr != root;
})));
}
template <typename Pattern>
std::string Description(const Pattern& pattern) {
std::stringstream ss;
pattern.DescribeTo(&ss);
return ss.str();
}
template <typename Elem, typename Pattern>
std::string Explanation(Elem* elem, const Pattern& pattern,
bool single_user_only = false) {
std::stringstream ss;
MatchOption options{true,
single_user_only,
&ss};
Match(elem, pattern, options);
return ss.str();
}
template <typename Elem, typename Pattern>
std::string Explanation(const std::unique_ptr<Elem>& elem,
const Pattern& pattern) {
return Explanation(elem.get(), pattern);
}
template <typename Elem, typename Pattern>
std::string Explanation(const Elem& elem, const Pattern& pattern) {
return Explanation(&elem, pattern);
}
#define EXPECT_DESC_AND_EXPLANATION(elem, pattern, expected_desc, \
expected_explanation) \
do { \
EXPECT_EQ(Description(pattern), (expected_desc)); \
EXPECT_EQ(Explanation((elem), (pattern)), expected_explanation); \
} while (0)
TEST_F(PatternMatcherTest, LayoutDescribeToAndExplain) {
auto layout = LayoutUtil::MakeLayout({1, 2});
auto layout2 = LayoutUtil::MakeLayout({2, 2});
EXPECT_DESC_AND_EXPLANATION(static_cast<const Layout*>(nullptr), m::Layout(),
"a layout", "Layout is null");
EXPECT_DESC_AND_EXPLANATION(layout2, m::Layout().EqualTo(&layout),
"a layout equal to {1,2}",
"Layout {2,2} is not equal to expected {1,2}");
}
TEST_F(PatternMatcherTest, CustomCallTargetMatcherDescribeAndExplain) {
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
ROOT out = f32[] custom-call(), custom_call_target="test_target"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
EXPECT_TRUE(Match(root, match::Op().WithCustomCallTarget({"test_target"})));
EXPECT_TRUE(Match(
root, match::Op().WithCustomCallTarget({"test_target", "other_target"})));
EXPECT_TRUE(Match(
root, match::Op().WithCustomCallTarget({"other_target", "test_target"})));
EXPECT_FALSE(Match(root, match::Op().WithCustomCallTarget({"other_target"})));
EXPECT_FALSE(Match(root, match::Op().WithCustomCallTarget(
{"other_target", "other_target2"})));
EXPECT_DESC_AND_EXPLANATION(
root, match::Op().WithCustomCallTarget({"other_target"}),
"an HloInstruction custom call with target 'other_target'",
"HloInstruction is not a custom call with a target 'other_target'\nin "
"out = f32[] custom-call(), custom_call_target=\"test_target\"");
EXPECT_DESC_AND_EXPLANATION(
root, match::Op().WithCustomCallTarget({"other_target", "other_target2"}),
"an HloInstruction custom call with target in {other_target, "
"other_target2}",
"HloInstruction is not a custom call with a target in {other_target, "
"other_target2}\nin "
"out = f32[] custom-call(), custom_call_target=\"test_target\"");
}
TEST_F(PatternMatcherTest, ShapeDescribeToAndExplain) {
auto shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 2}, {0, 1});
auto layout = shape.layout();
EXPECT_DESC_AND_EXPLANATION(static_cast<const Shape*>(nullptr), m::Shape(),
"a shape", "Shape is null");
EXPECT_DESC_AND_EXPLANATION(
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 2}, {1, 0}),
m::Shape().EqualTo(&shape), "a shape equal to f32[1,2]{0,1}",
"Shape not equal to f32[1,2]{0,1}\n"
"in f32[1,2]{1,0}");
EXPECT_DESC_AND_EXPLANATION(ShapeUtil::MakeShape(F32, {2, 2}),
m::Shape().CompatibleTo(&shape),
"a shape compatible with f32[1,2]",
"Shape not compatible with f32[1,2]\n"
"in f32[2,2]{1,0}");
EXPECT_DESC_AND_EXPLANATION(shape, m::Shape().WithElementType(F16),
"a shape with element type F16",
"Shape does not have element type F16\n"
"in f32[1,2]{0,1}");
EXPECT_DESC_AND_EXPLANATION(shape, m::Shape().IsScalar(),
"a shape that represents a scalar",
"Shape is not a scalar\n"
"in f32[1,2]{0,1}");
EXPECT_DESC_AND_EXPLANATION(ShapeUtil::MakeNil(), m::Shape().IsArray(),
"a shape that represents an array",
"Shape is not an array\n"
"in ()");
EXPECT_DESC_AND_EXPLANATION(shape, m::Shape().IsTuple(),
"a shape that represents a tuple",
"Shape is not a tuple\n"
"in f32[1,2]{0,1}");
EXPECT_DESC_AND_EXPLANATION(shape, m::Shape().IsEffectiveScalar(),
"a shape that is an effective scalar",
"Shape is not an effective scalar\n"
"in f32[1,2]{0,1}");
EXPECT_DESC_AND_EXPLANATION(shape, m::Shape().WithRank(42),
"a shape that has 42 dimensions",
"Shape does not have rank 42\n"
"in f32[1,2]{0,1}");
EXPECT_DESC_AND_EXPLANATION(shape, m::Shape().WithRank(0),
"a shape that is a scalar",
"Shape is not a scalar\n"
"in f32[1,2]{0,1}");
EXPECT_DESC_AND_EXPLANATION(shape, m::Shape().WithRank(1).IsArray(),
"a shape:\n"
" * that has 1 dimension AND\n"
" * that represents an array",
"Shape does not have rank 1\n"
"in f32[1,2]{0,1}");
EXPECT_DESC_AND_EXPLANATION(ShapeUtil::MakeNil(),
m::Shape().IsArray().WithRank(1),
"a shape:\n"
" * that represents an array AND\n"
" * that has 1 dimension",
"Shape is not an array\n"
"in ()");
EXPECT_DESC_AND_EXPLANATION(
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 2}, {1, 0}),
m::Shape().WithLayoutEqualTo(&layout),
"a shape with\n a layout equal to {0,1}",
"Layout {1,0} is not equal to expected {0,1}\n"
"in f32[1,2]{1,0}");
EXPECT_DESC_AND_EXPLANATION(shape,
m::Shape().WithSubshapeEqualTo({10}, &shape),
"a shape with subshape at index {10} which is\n"
" a shape equal to f32[1,2]{0,1}",
"No subshape at {10}\n"
"in f32[1,2]{0,1}");
EXPECT_DESC_AND_EXPLANATION(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {2, 2})}),
m::Shape().WithSubshapeEqualTo({0}, &shape),
"a shape with subshape at index {0} which is\n"
" a shape equal to f32[1,2]{0,1}",
"Shape not equal to f32[1,2]{0,1}\n"
"in f32[2,2]{1,0}\n"
"in subshape at {0}\n"
"in (f32[2,2])");
EXPECT_DESC_AND_EXPLANATION(shape,
m::Shape().WithSubshapeCompatibleTo({10}, &shape),
"a shape with subshape at index {10} which is\n"
" a shape compatible with f32[1,2]",
"No subshape at {10}\n"
"in f32[1,2]{0,1}");
EXPECT_DESC_AND_EXPLANATION(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {2, 2})}),
m::Shape().WithSubshapeCompatibleTo({0}, &shape),
"a shape with subshape at index {0} which is\n"
" a shape compatible with f32[1,2]",
"Shape not compatible with f32[1,2]\n"
"in f32[2,2]{1,0}\n"
"in subshape at {0}\n"
"in (f32[2,2])");
EXPECT_DESC_AND_EXPLANATION(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeTupleShape({shape})}),
m::Shape().WithSubshape({0, 0}, m::Shape().IsScalar()),
"a shape with subshape at index {0,0} which is\n"
" a shape that represents a scalar",
"Shape is not a scalar\n"
"in f32[1,2]{0,1}\n"
"in subshape at {0,0}\n"
"in ((f32[1,2]))");
}
std::unique_ptr<HloInstruction> SetName(absl::string_view name,
std::unique_ptr<HloInstruction> instr) {
instr->SetAndSanitizeName(name);
return instr;
}
TEST_F(PatternMatcherTest, HloInstructionDescribeToAndExplain) {
std::unique_ptr<HloInstruction> iota =
SetName("i", HloInstruction::CreateIota(ShapeUtil::MakeShape(S32, {42}),
0));
std::unique_ptr<HloInstruction> constant =
SetName("c", HloInstruction::CreateConstant(LiteralUtil::CreateR0(0)));
EXPECT_DESC_AND_EXPLANATION(static_cast<const HloInstruction*>(nullptr),
m::Op(), "an HloInstruction",
"HloInstruction* is null");
EXPECT_DESC_AND_EXPLANATION(iota, m::Op().WithName("foo"),
"an HloInstruction named \"foo\"",
"HloInstruction not named \"foo\"\n"
"in i = s32[42]{0} iota(), iota_dimension=0");
EXPECT_DESC_AND_EXPLANATION(iota, m::Op().WithOpcode(HloOpcode::kAdd),
"an HloInstruction with opcode add",
"HloInstruction doesn't have opcode add\n"
"in i = s32[42]{0} iota(), iota_dimension=0");
EXPECT_DESC_AND_EXPLANATION(
constant, m::Op().IsNonConstant(),
"an HloInstruction with any opcode other than constant",
"HloInstruction has opcode constant, expected anything else\n"
"in c = s32[] constant(0)");
EXPECT_DESC_AND_EXPLANATION(iota, m::Op().WithNumOperands(42),
"an HloInstruction with 42 operands",
"HloInstruction doesn't have 42 operands\n"
"in i = s32[42]{0} iota(), iota_dimension=0");
EXPECT_DESC_AND_EXPLANATION(iota, m::Op().WithShape(m::Shape().IsTuple()),
"an HloInstruction outputting\n"
" a shape that represents a tuple",
"Shape is not a tuple\n"
"in s32[42]{0}\n"
"in output shape\n"
"in i = s32[42]{0} iota(), iota_dimension=0");
EXPECT_DESC_AND_EXPLANATION(iota, m::Op().WithShape(F32, {42}),
"an HloInstruction outputting\n"
" a shape:\n"
" * with element type F32 AND\n"
" * with dimensions [42]",
"Shape does not have element type F32\n"
"in s32[42]{0}\n"
"in output shape\n"
"in i = s32[42]{0} iota(), iota_dimension=0");
EXPECT_DESC_AND_EXPLANATION(iota, m::Op().WithShape(S32, {128}),
"an HloInstruction outputting\n"
" a shape:\n"
" * with element type S32 AND\n"
" * with dimensions [128]",
"Shape does not have dimensions [128]\n"
"in s32[42]{0}\n"
"in output shape\n"
"in i = s32[42]{0} iota(), iota_dimension=0");
EXPECT_DESC_AND_EXPLANATION(
iota, m::Op().WithOperand(2, m::Op().WithOpcode(HloOpcode::kAdd)),
"an HloInstruction with operand 2 which is:\n"
" an HloInstruction with opcode add",
"desired operand index 2 is out of bounds\n"
"in i = s32[42]{0} iota(), iota_dimension=0");
EXPECT_DESC_AND_EXPLANATION(
SetName("a", HloInstruction::CreateBinary(ShapeUtil::MakeShape(S32, {}),
HloOpcode::kAdd, constant.get(),
constant.get())),
m::Op().WithOperand(1, m::Op().IsNonConstant()),
"an HloInstruction with operand 1 which is:\n"
" an HloInstruction with any opcode other than constant",
"HloInstruction has opcode constant, expected anything else\n"
"in c = s32[] constant(0)\n"
"in operand 1\n"
"in a = s32[] add(s32[] c, s32[] c)");
EXPECT_DESC_AND_EXPLANATION(
iota, m::Op().WithFusionKind(HloInstruction::FusionKind::kLoop),
"an HloInstruction with fusion kind kLoop",
"HloInstruction does not have fusion kind kLoop; it's not a fusion\n"
"in i = s32[42]{0} iota(), iota_dimension=0");
EXPECT_DESC_AND_EXPLANATION(
iota, m::Op().WithTupleIndex(42),
"an HloInstruction which is a GTE with index 42",
"HloInstruction is not a GTE with index 42; it's not a GTE at all\n"
"in i = s32[42]{0} iota(), iota_dimension=0");
EXPECT_DESC_AND_EXPLANATION(iota, m::Op().IsConstantScalar(),
"an HloInstruction which is a constant scalar",
"HloInstruction is not a constant\n"
"in i = s32[42]{0} iota(), iota_dimension=0");
EXPECT_DESC_AND_EXPLANATION(
SetName("c", HloInstruction::CreateConstant(
LiteralUtil::CreateR1<int>({1, 2}))),
m::Op().IsConstantEffectiveScalar(),
"an HloInstruction which is a constant effective scalar",
"HloInstruction is not an effective scalar\n"
"in c = s32[2]{0} constant({1, 2})");
EXPECT_DESC_AND_EXPLANATION(
SetName("c", HloInstruction::CreateConstant(LiteralUtil::CreateR0(10))),
m::Op().IsConstantScalar(42),
"an HloInstruction which is a constant scalar with value 42",
"HloInstruction's constant value 10 did not match expected value 42\n"
"in c = s32[] constant(10)");
EXPECT_DESC_AND_EXPLANATION(
SetName("c", HloInstruction::CreateConstant(LiteralUtil::CreateR0(2.25))),
m::Op().IsConstantEffectiveScalar(1.25),
"an HloInstruction which is a constant effective scalar with value 1.25",
"HloInstruction's constant value 2.25 did not match expected value 1.25\n"
"in c = f64[] constant(2.25)");
EXPECT_DESC_AND_EXPLANATION(
constant, m::Op().Is(iota.get()),
absl::StrCat("an HloInstruction which is 0x", absl::Hex(iota.get()),
" (i = s32[42]{0} iota(), iota_dimension=0)"),
absl::StrCat("HloInstruction 0x", absl::Hex(constant.get()), " is not 0x",
absl::Hex(iota.get()),
" (i = s32[42]{0} iota(), iota_dimension=0)\n"
"in c = s32[] constant(0)"));
EXPECT_DESC_AND_EXPLANATION(
SetName("a",
HloInstruction::CreateBinary(constant->shape(), HloOpcode::kAdd,
constant.get(), constant.get())),
m::Op().WithOperandIfPresent(0, m::Iota()),
"an HloInstruction either with fewer than 1 operand, or with an operand "
"0 which is:\n"
" an HloInstruction with opcode iota",
"HloInstruction doesn't have opcode iota\n"
"in c = s32[] constant(0)\n"
"in operand 0\n"
"in a = s32[] add(s32[] c, s32[] c)");
EXPECT_DESC_AND_EXPLANATION(
constant, m::Op().WithPredicate(HloPredicateFalse),
"an HloInstruction which matches a user-specified predicate",
"HloInstruction does not match user-specified predicate\n"
"in c = s32[] constant(0)");
}
TEST_F(PatternMatcherTest, HloInstructionMatcherAnyOrderDescribeTo) {
auto scalar_s32 = ShapeUtil::MakeShape(S32, {});
EXPECT_DESC_AND_EXPLANATION(
SetName("a", HloInstruction::CreateBinary(
scalar_s32, HloOpcode::kAdd,
SetName("b", HloInstruction::CreateConstant(
LiteralUtil::CreateR0(0)))
.get(),
SetName("c", HloInstruction::CreateConstant(
LiteralUtil::CreateR0(0)))
.get())),
m::AddAnyOrder(m::Op().WithName("b"), m::Op().WithName("bar")),
"an HloInstruction:\n"
" * with opcode add AND\n"
" * with two operands in either order:\n"
" - an HloInstruction named \"b\"\n"
" - an HloInstruction named \"bar\"",
"HloInstruction's operands (ignoring order) did not match second "
"matcher. Specifically,\n"
" - an HloInstruction named \"bar\"\n"
"does not match LHS:\n"
" - HloInstruction not named \"bar\"\n"
" in b = s32[] constant(0)\n"
"does not match RHS:\n"
" - HloInstruction not named \"bar\"\n"
" in c = s32[] constant(0)\n"
"in a = s32[] add(s32[] b, s32[] c)");
EXPECT_DESC_AND_EXPLANATION(
SetName("a",
HloInstruction::CreateBinary(
scalar_s32, HloOpcode::kAdd,
HloInstruction::CreateParameter(0, scalar_s32, "p").get(),
SetName("c", HloInstruction::CreateConstant(
LiteralUtil::CreateR0(0)))
.get())),
m::AddAnyOrder(m::Op().IsConstantScalar(), m::Op().IsConstant()),
"an HloInstruction:\n"
" * with opcode add AND\n"
" * with two operands in either order:\n"
" - an HloInstruction which is a constant scalar\n"
" - an HloInstruction with opcode constant",
"HloInstruction's LHS operand did not match either of the two matchers. "
"Specifically,\n"
" - an HloInstruction which is a constant scalar\n"
"does not match LHS:\n"
" - HloInstruction is not a constant\n"
" in p = s32[] parameter(0)\n"
"and\n"
" - an HloInstruction with opcode constant\n"
"does not match LHS:\n"
" - HloInstruction doesn't have opcode constant\n"
" in p = s32[] parameter(0)\n"
"in a = s32[] add(s32[] p, s32[] c)");
}
TEST_F(PatternMatcherTest, AnyOfMatcherDescribeToAndExplain) {
EXPECT_DESC_AND_EXPLANATION(
ShapeUtil::MakeScalarShape(S32),
m::AnyOf<Shape>(m::Shape().WithRank(1), m::Shape().WithElementType(F32)),
"any of:\n"
" - a shape that has 1 dimension OR\n"
" - a shape with element type F32",
"None of the following matchers succeeded:\n"
"Matcher #1\n"
" - a shape that has 1 dimension\n"
"failed with\n"
" - Shape does not have rank 1\n"
" in s32[]\n"
"Matcher #2\n"
" - a shape with element type F32\n"
"failed with\n"
" - Shape does not have element type F32\n"
" in s32[]");
}
TEST_F(PatternMatcherTest, Parameter) {
auto param =
HloInstruction::CreateParameter(1, ShapeUtil::MakeShape(F32, {}), "p1");
auto non_param =
SetName("c", HloInstruction::CreateConstant(LiteralUtil::CreateR0(0)));
EXPECT_FALSE(Match(param.get(), m::Parameter(0)));
EXPECT_TRUE(Match(param.get(), m::Parameter()));
EXPECT_TRUE(Match(param.get(), m::Parameter(1)));
EXPECT_FALSE(Match(non_param.get(), m::Parameter()));
EXPECT_FALSE(Match(non_param.get(), m::Parameter(1)));
EXPECT_DESC_AND_EXPLANATION(non_param, m::Parameter(1),
"an HloInstruction:\n"
" * with opcode parameter AND\n"
" * which is parameter 1",
"HloInstruction doesn't have opcode parameter\n"
"in c = s32[] constant(0)");
EXPECT_EQ(Explanation(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "p0"),
m::Parameter(1)),
"HloInstruction is not parameter 1\n"
"in p0 = f32[] parameter(0)");
}
TEST_F(PatternMatcherTest, OneUseAndOneUser) {
auto param =
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "p0");
EXPECT_FALSE(Match(param.get(), m::Op().WithOneUse()));
EXPECT_DESC_AND_EXPLANATION(
param, m::Op().WithOneUse(),
"an HloInstruction which has exactly one use",
"HloInstruction has 0 users, but expected exactly one.\n"
"in p0 = f32[] parameter(0)");
EXPECT_FALSE(Match(param.get(), m::Op().WithOneUser()));
EXPECT_DESC_AND_EXPLANATION(
param, m::Op().WithOneUser(),
"an HloInstruction which has exactly one user (but possibly is used "
"multiple times by that instruction)",
"HloInstruction has 0 users, but expected exactly one.\n"
"in p0 = f32[] parameter(0)");
{
auto reshape =
SetName("r", HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {1}), param.get()));
EXPECT_TRUE(Match(param.get(), m::Op().WithOneUse()));
EXPECT_TRUE(Match(param.get(), m::Op().WithOneUser()));
auto reshape1 =
SetName("r1", HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {1}), param.get()));
EXPECT_FALSE(Match(param.get(), m::Op().WithOneUse()));
EXPECT_FALSE(Match(param.get(), m::Op().WithOneUser()));
const char* kMultipleUserExplanation =
"HloInstruction has 2 users, but expected exactly one.\n"
"All users:\n"
" - r = f32[1]{0} reshape(f32[] p0)\n"
" - r1 = f32[1]{0} reshape(f32[] p0)\n"
"in p0 = f32[] parameter(0)";
EXPECT_EQ(Explanation(param.get(), m::Op().WithOneUse()),
kMultipleUserExplanation);
EXPECT_EQ(Explanation(param.get(), m::Op().WithOneUser()),
kMultipleUserExplanation);
}
auto add = SetName("add", HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kAdd,
param.get(), param.get()));
EXPECT_TRUE(Match(param.get(), m::Op().WithOneUser()));
EXPECT_FALSE(Match(param.get(), m::Op().WithOneUse()));
EXPECT_EQ(Explanation(param.get(), m::Op().WithOneUse()),
"HloInstruction is used 2 times by its user, but is expected to be "
"used just once: add = f32[] add(f32[] p0, f32[] p0)\n"
"in p0 = f32[] parameter(0)");
}
TEST_F(PatternMatcherTest, MatchSingleUserOnlyUnaryOpOneUser) {
auto param =
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "p");
auto reshape =
SetName("reshape", HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {1}), param.get()));
EXPECT_TRUE(MatchSingleUserOnly(reshape.get(), m::Reshape(m::Op())));
EXPECT_TRUE(Match(reshape.get(), m::Reshape(m::Op().WithOneUser())));
}
TEST_F(PatternMatcherTest, MatchSingleUserOnlyUnaryOpTwoUsers) {
auto param =
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "p");
auto reshape =
SetName("reshape", HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {1}), param.get()));
auto bitcast =
SetName("bitcast", HloInstruction::CreateBitcast(
ShapeUtil::MakeShape(F32, {1}), param.get()));
EXPECT_TRUE(MatchSingleUserOnly(param.get(), m::Op()));
EXPECT_TRUE(Match(param.get(), m::Op()));
EXPECT_TRUE(MatchSingleUserOnly(bitcast.get(), m::Bitcast()));
EXPECT_TRUE(Match(bitcast.get(), m::Bitcast()));
EXPECT_FALSE(MatchSingleUserOnly(bitcast.get(), m::Bitcast(m::Op())));
EXPECT_FALSE(Match(bitcast.get(), m::Bitcast(m::Op().WithOneUser())));
EXPECT_EQ(Explanation(bitcast.get(), m::Bitcast(m::Op()),
true),
"Operand 0 of HloInstruction has 2 users. Expected 1.\nin bitcast "
"= f32[1]{0} bitcast(f32[] p)");
}
TEST_F(PatternMatcherTest, MatchSingleUserOnlyBinaryOpOneUser) {
auto param0 =
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "p0");
auto add = SetName("add", HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kAdd,
param0.get(), param0.get()));
EXPECT_TRUE(MatchSingleUserOnly(add.get(), m::Add(m::Op(), m::Op())));
EXPECT_TRUE(
Match(add.get(), m::Add(m::Op().WithOneUser(), m::Op().WithOneUser())));
}
TEST_F(PatternMatcherTest, MatchSingleUserOnlyBinaryOpTwoUsers) {
auto param0 =
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "p0");
auto param1 =
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "p1");
auto add = SetName("add", HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kAdd,
param0.get(), param0.get()));
auto mul =
SetName("mul", HloInstruction::CreateBinary(ShapeUtil::MakeShape(F32, {}),
HloOpcode::kMultiply,
param1.get(), param0.get()));
EXPECT_TRUE(MatchSingleUserOnly(mul.get(), m::Multiply()));
EXPECT_TRUE(Match(mul.get(), m::Multiply()));
EXPECT_FALSE(MatchSingleUserOnly(mul.get(), m::Multiply(m::Op(), m::Op())));
EXPECT_FALSE(Match(
mul.get(), m::Multiply(m::Op().WithOneUser(), m::Op().WithOneUser())));
EXPECT_EQ(Explanation(mul.get(), m::Multiply(m::Op(), m::Op()),
true),
"Operand 1 of HloInstruction has 2 users. Expected 1.\nin mul = "
"f32[] multiply(f32[] p1, f32[] p0)");
EXPECT_FALSE(MatchSingleUserOnly(add.get(), m::Add(m::Op(), m::Op())));
EXPECT_FALSE(
Match(add.get(), m::Add(m::Op().WithOneUser(), m::Op().WithOneUser())));
EXPECT_EQ(Explanation(add.get(), m::Add(m::Op(), m::Op()),
true),
"Operand 0 of HloInstruction has 2 users. Expected 1.\nin add = "
"f32[] add(f32[] p0, f32[] p0)");
}
TEST_F(PatternMatcherTest, MatchSingleUserOnlyBinaryOpTwoUsersLowerLevel) {
auto param0 =
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "p0");
auto param1 =
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "p1");
auto add = SetName("add", HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kAdd,
param0.get(), param0.get()));
auto mul =
SetName("mul", HloInstruction::CreateBinary(ShapeUtil::MakeShape(F32, {}),
HloOpcode::kMultiply,
param1.get(), param0.get()));
auto div = SetName("div", HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}),
HloOpcode::kDivide, add.get(), mul.get()));
EXPECT_TRUE(
MatchSingleUserOnly(div.get(), m::Divide(m::Add(), m::Multiply())));
EXPECT_TRUE(Match(div.get(), m::Divide(m::Add().WithOneUser(),
m::Multiply().WithOneUser())));
EXPECT_FALSE(MatchSingleUserOnly(
div.get(), m::Divide(m::Add(m::Op(), m::Op()), m::Multiply())));
EXPECT_FALSE(Match(
div.get(),
m::Divide(
m::Add(m::Op().WithOneUser(), m::Op().WithOneUser()).WithOneUser(),
m::Multiply().WithOneUser())));
EXPECT_EQ(Explanation(add.get(), m::Add(m::Op(), m::Op()),
true),
"Operand 0 of HloInstruction has 2 users. Expected 1.\nin add = "
"f32[] add(f32[] p0, f32[] p0)");
}
TEST_F(PatternMatcherTest, Comparison) {
auto shape = ShapeUtil::MakeShape(F32, {1});
auto p0 = HloInstruction::CreateParameter(0, shape, "param.0");
auto p1 = HloInstruction::CreateParameter(1, shape, "param.1");
auto eq = HloInstruction::CreateCompare(shape, p0.get(), p1.get(),
ComparisonDirection::kEq);
auto ne = HloInstruction::CreateCompare(shape, p0.get(), p1.get(),
ComparisonDirection::kNe);
auto add =
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p0.get(), p1.get());
auto le = HloInstruction::CreateCompare(shape, p0.get(), add.get(),
ComparisonDirection::kLe);
EXPECT_TRUE(Match(eq.get(), m::Compare()));
EXPECT_TRUE(Match(eq.get(), m::Eq()));
EXPECT_TRUE(Match(eq.get(), m::Eq(m::Parameter(0), m::Parameter(1))));
EXPECT_TRUE(Match(eq.get(), m::EqAnyOrder(m::Parameter(1), m::Parameter(0))));
EXPECT_TRUE(Match(ne.get(), m::Compare()));
EXPECT_TRUE(Match(ne.get(), m::Ne()));
EXPECT_TRUE(Match(
le.get(),
m::Compare(m::Parameter(0), m::Add(m::Parameter(0), m::Parameter(1)))));
EXPECT_TRUE(Match(le.get(), m::Le(m::Parameter(0),
m::Add(m::Parameter(0), m::Parameter(1)))));
EXPECT_FALSE(Match(eq.get(), m::Add()));
EXPECT_FALSE(Match(eq.get(), m::Ne()));
EXPECT_FALSE(
Match(le.get(),
m::Eq(m::Parameter(0), m::Add(m::Parameter(0), m::Parameter(1)))));
EXPECT_FALSE(Match(eq.get(), m::Eq(m::Parameter(1), m::Parameter(0))));
EXPECT_DESC_AND_EXPLANATION(
eq, m::Ne().WithOneUser(),
"an HloInstruction:\n"
" * with opcode compare AND\n"
" * which has comparison direction NE AND\n"
" * which has exactly one user (but possibly is used "
"multiple times by that instruction)",
"HloInstruction is not comparison NE\n"
"in compare = f32[1]{0} compare(f32[1]{0} param.0, f32[1]{0} param.1), "
"direction=EQ");
}
TEST_F(PatternMatcherTest, ConvDnums) {
TF_ASSERT_OK_AND_ASSIGN(ConvolutionDimensionNumbers dnums,
ParseConvolutionDimensionNumbers("bf01_oi01->bf01"));
auto param =
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "p0");
auto op = HloInstruction::CreateCustomCall(ShapeUtil::MakeShape(F32, {}),
{},
"foo");
op->set_convolution_dimension_numbers(dnums);
EXPECT_TRUE(Match(op.get(), m::CustomCall().WithConvDnums(dnums)));
EXPECT_TRUE(
Match(op.get(), m::CustomCall().WithConvDnums("bf01_oi01->bf01")));
TF_ASSERT_OK_AND_ASSIGN(ConvolutionDimensionNumbers different_dnums,
ParseConvolutionDimensionNumbers("b01f_oi01->bf01"));
EXPECT_FALSE(Match(op.get(), m::CustomCall().WithConvDnums(different_dnums)));
EXPECT_FALSE(
Match(op.get(), m::CustomCall().WithConvDnums("b01f_oi01->bf01")));
EXPECT_FALSE(
Match(param.get(), m::CustomCall().WithConvDnums("b01f_oi01->bf01")));
EXPECT_DESC_AND_EXPLANATION(
op.get(), m::CustomCall().WithConvDnums("b01f_oi01->bf01"),
"an HloInstruction:\n"
" * with opcode custom-call AND\n"
" * which has convolution dimension numbers b01f_oi01->bf01",
"convolution_dimension_numbers bf01_oi01->bf01 don't match expected "
"b01f_oi01->bf01\n"
"in custom-call = f32[] custom-call(), dim_labels=bf01_oi01->bf01, "
"custom_call_target=\"foo\"");
}
TEST_F(PatternMatcherTest, CustomCallMatchers) {
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT out = f32[] custom-call(p0, p1), custom_call_target="test_target"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
EXPECT_TRUE(Match(root, m::CustomCall()));
EXPECT_TRUE(Match(root, m::CustomCall({"test_target"})));
EXPECT_TRUE(Match(
root, m::CustomCall({"test_target"}, m::Parameter(0), m::Parameter(1))));
EXPECT_TRUE(Match(root, m::CustomCall({"test_target", "other_target"})));
EXPECT_TRUE(Match(root, m::CustomCall({"other_target", "test_target"})));
EXPECT_TRUE(Match(root, m::CustomCall({"test_target", "other_target"},
m::Parameter(0), m::Parameter(1))));
EXPECT_TRUE(Match(root, m::CustomCall({"other_target", "test_target"},
m::Parameter(0), m::Parameter(1))));
HloInstruction* instr;
EXPECT_TRUE(Match(root, m::CustomCall(&instr)));
EXPECT_TRUE(Match(root, m::CustomCall(&instr, {"test_target"})));
EXPECT_TRUE(Match(root, m::CustomCall(&instr, {"test_target"},
m::Parameter(0), m::Parameter(1))));
const HloInstruction* const_instr;
EXPECT_TRUE(Match(root, m::CustomCall(&const_instr)));
EXPECT_TRUE(Match(root, m::CustomCall(&const_instr, {"test_target"})));
EXPECT_TRUE(Match(root, m::CustomCall(&const_instr, {"test_target"},
m::Parameter(0), m::Parameter(1))));
EXPECT_FALSE(Match(root, m::CustomCall({"other_target"})));
EXPECT_FALSE(Match(root, m::CustomCall({"other_target", "other_target2"})));
EXPECT_FALSE(Match(
root, m::CustomCall({"test_target"}, m::Parameter(1), m::Parameter(0))));
}
TEST_F(PatternMatcherTest, SharedSubpatternPreservesTheSemantics) {
auto scalar0 = m::SharedSubpattern(m::ConstantScalar(0));
auto pattern0 = m::AnyOf<HloInstruction>(m::Convert(scalar0), scalar0);
auto scalar1 = m::SharedSubpattern(m::ConstantScalar(1));
auto pattern1 = m::AnyOf<HloInstruction>(m::Convert(scalar1), scalar1);
{
constexpr char kModuleStr[] = R"(
HloModule test_module ENTRY test {
ROOT constant = f16[] constant(0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
EXPECT_TRUE(Match(root, pattern0));
EXPECT_FALSE(Match(root, pattern1));
}
{
constexpr char kModuleStr[] = R"(
HloModule test_module ENTRY test {
constant = f16[] constant(0)
ROOT convert = f32[] convert(constant)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
EXPECT_TRUE(Match(root, pattern0));
EXPECT_FALSE(Match(root, pattern1));
}
}
TEST_F(PatternMatcherTest, SharedSubpatternCanBeNested) {
auto scalar0 = m::SharedSubpattern(match::ConstantScalar(0));
auto subpattern0 = m::SharedSubpattern(
m::AnyOf<HloInstruction>(m::Convert(scalar0), scalar0));
auto pattern0 =
m::AnyOf<HloInstruction>(m::Convert(subpattern0), subpattern0);
auto scalar1 = m::SharedSubpattern(match::ConstantScalar(1));
auto subpattern1 = m::SharedSubpattern(
m::AnyOf<HloInstruction>(m::Convert(scalar1), scalar1));
auto pattern1 =
m::AnyOf<HloInstruction>(m::Convert(subpattern1), subpattern1);
{
constexpr char kModuleStr[] = R"(
HloModule test_module ENTRY test {
constant = f16[] constant(0)
convert = f32[] convert(constant)
ROOT convert1 = f32[] convert(convert)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
EXPECT_TRUE(Match(root, pattern0));
EXPECT_FALSE(Match(root, pattern1));
}
}
TEST_F(PatternMatcherTest, TestWithContractingDims) {
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
%param1 = f32[2048,1024] parameter(0)
%param2 = f32[1024,33708] parameter(1)
ROOT %dot1 = f32[2048,33708]{1,0} dot(f32[2048,1024]{1,0} %param1,
f32[1024,33708]{0,1} %param2),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
EXPECT_TRUE(Match(root, m::Dot().WithContractingDims({1}, {0})));
EXPECT_FALSE(Match(root, m::Dot().WithContractingDims({0}, {1})));
EXPECT_FALSE(Match(root, m::Dot().WithContractingDims({1}, {0, 1})));
EXPECT_DESC_AND_EXPLANATION(
root, m::Dot().WithContractingDims({1}, {0, 1}),
"an HloInstruction:\n"
" * with opcode dot AND\n"
" * with lhs_contracting_dims {1} and rhs_contracting_dims {0,1}",
"rhs_contracting_dimensions {0} don't match expected {0,1}\n"
"in dot1 = f32[2048,33708]{1,0} dot(f32[2048,1024]{1,0} param1, "
"f32[1024,33708]{1,0} param2), lhs_contracting_dims={1}, "
"rhs_contracting_dims={0}");
}
TEST_F(PatternMatcherTest, TestWithReplicaGroups) {
constexpr char kModuleStr[] = R"(
HloModule test_module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY test {
input = f32[128,32]{0,1} parameter(0)
ROOT all-reduce = f32[128,32]{0,1} all-reduce(input),
replica_groups={{0,1},{2,3}}, to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
EXPECT_TRUE(Match(root, m::AllReduce().WithReplicaGroups({{0, 1}, {2, 3}})));
EXPECT_FALSE(Match(root, m::AllReduce().WithReplicaGroups({{}, {}})));
EXPECT_FALSE(Match(root, m::AllReduce().WithReplicaGroups({{1, 0}, {3, 2}})));
EXPECT_DESC_AND_EXPLANATION(
root, m::AllReduce().WithReplicaGroups({{1, 0}, {3, 2}}),
"an HloInstruction:\n"
" * with opcode all-reduce AND\n"
" * with replica_group {{1,0},{3,2}}",
"replica_group {{0,1},{2,3}} don't match expected with replica_group "
"{{1,0},{3,2}}\n"
"in all-reduce = f32[128,32]{0,1} all-reduce(f32[128,32]{0,1} input), "
"replica_groups={{0,1},{2,3}}, to_apply=add");
}
TEST_F(PatternMatcherTest, TestWithSharding) {
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
p0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={devices=[1,2,2,1]0,1,2,3},
metadata={op_name="test"}
ROOT copy = f32[5,7,11,13]{3,2,1,0} copy(p0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* instruction = FindInstruction(hlo_module.get(), "p0");
EXPECT_TRUE(
Match(instruction, m::Op().WithSharding("{devices=[1,2,2,1]0,1,2,3}")));
EXPECT_FALSE(
Match(instruction, m::Op().WithSharding("{devices=[2,2,1,1]0,1,2,3}")));
EXPECT_DESC_AND_EXPLANATION(
instruction, m::Op().WithSharding("{devices=[2,2,1,1]0,1,2,3}"),
"an HloInstruction with sharding {devices=[2,2,1,1]0,1,2,3}",
"sharding {devices=[1,2,2,1]0,1,2,3} don't match expected "
"{devices=[2,2,1,1]0,1,2,3}\n"
"in p0 = f32[5,7,11,13]{3,2,1,0} parameter(0), "
"sharding={devices=[1,2,2,1]0,1,2,3}");
}
TEST_F(PatternMatcherTest, TestWithControlDeps) {
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
p0 = f32[4] parameter(0)
p1 = f32[4] parameter(1)
add = f32[4] add(p0, p1)
mul = f32[4] multiply(p0, p1), control-predecessors={add}
div = f32[4] divide(p0, p1), control-predecessors={mul}
ROOT t = (f32[4], f32[4], f32[4]) tuple(add, mul, div)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* add = FindInstruction(hlo_module.get(), "add");
auto* mul = FindInstruction(hlo_module.get(), "mul");
auto* div = FindInstruction(hlo_module.get(), "div");
EXPECT_TRUE(Match(add, m::Op().WithControlDeps({}, {mul})));
EXPECT_TRUE(Match(mul, m::Op().WithControlDeps({add}, {div})));
EXPECT_TRUE(Match(div, m::Op().WithControlDeps({mul}, {})));
EXPECT_FALSE(Match(div, m::Op().WithControlDeps({mul}, {div})));
EXPECT_DESC_AND_EXPLANATION(
div, m::Op().WithControlDeps({mul}, {div}),
"an HloInstruction with control predecessors {mul} and control "
"successors {div}",
"HloInstruction expected to have control successors {div} but has {}\n"
"in div = f32[4]{0} divide(f32[4]{0} p0, f32[4]{0} p1), "
"control-predecessors={mul}");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/pattern_matcher.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/pattern_matcher_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d6649578-7be7-42e1-8e40-fd3691a3885d | cpp | tensorflow/tensorflow | mapped_ptr_container_sorter | third_party/xla/xla/service/mapped_ptr_container_sorter.h | third_party/xla/xla/service/mapped_ptr_container_sorter_test.cc | #ifndef XLA_SERVICE_MAPPED_PTR_CONTAINER_SORTER_H_
#define XLA_SERVICE_MAPPED_PTR_CONTAINER_SORTER_H_
#include <array>
#include <cstddef>
#include <functional>
#include <limits>
#include <list>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
template <typename PointedToTy>
class MappedPtrContainerSorter {
public:
using MapPtrFn = absl::FunctionRef<const PointedToTy*(const PointedToTy*)>;
using UnmappedPtrIndexFn = absl::FunctionRef<size_t(const PointedToTy*)>;
static UnmappedPtrIndexFn IndexBeforeMappedElementsFn();
static UnmappedPtrIndexFn IndexAfterMappedElementsFn();
static UnmappedPtrIndexFn InvalidIndexFn();
template <typename OrderedTy, typename UnorderedTy>
static absl::Status Sort(MapPtrFn map_ptr, UnmappedPtrIndexFn unmapped_index,
const OrderedTy& ordered_container,
UnorderedTy& unordered_container);
private:
class SortedIndices {
public:
SortedIndices(size_t max_partial_order_exclusive,
size_t unordered_container_size)
: max_partial_order_exclusive_(max_partial_order_exclusive),
unordered_container_size_(unordered_container_size),
mapped_element_indices_by_partial_order_(
max_partial_order_exclusive) {}
absl::Status AddMappedElement(size_t unordered_container_index,
size_t partial_order);
void AddUnmappedElement(size_t unordered_container_index,
size_t target_index_amongst_mapped_elements);
std::string ToString() const;
absl::StatusOr<std::vector<size_t>> Flatten() const;
private:
SortedIndices() = delete;
size_t max_partial_order_exclusive_;
size_t unordered_container_size_;
std::vector<std::vector<size_t>> mapped_element_indices_by_partial_order_;
absl::flat_hash_map<size_t, std::vector<size_t>>
target_index_to_unmapped_element_index_;
};
static size_t IndexBeforeMappedElements() {
return std::numeric_limits<size_t>::max() - 2;
}
static size_t IndexAfterMappedElements() {
return std::numeric_limits<size_t>::max() - 1;
}
static size_t InvalidIndex() { return std::numeric_limits<size_t>::max(); }
template <typename OrderedTy, typename UnorderedTy>
static absl::StatusOr<std::vector<size_t>> ComputeNewIndices(
MapPtrFn map_ptr, UnmappedPtrIndexFn unmapped_index,
const OrderedTy& ordered_container,
const UnorderedTy& unordered_container);
template <typename UnorderedTy>
static void Reorder(std::vector<size_t> new_indices,
UnorderedTy& unordered_container);
};
namespace mapped_ptr_container_sorter_internal {
template <typename I, typename O>
struct PtrGetter {
static O Get(I i);
};
template <typename T>
struct PtrGetter<T* const&, const T*> {
static const T* Get(T* const& p) { return p; }
};
template <typename T>
struct PtrGetter<T const* const&, const T*> {
static const T* Get(T const* const& p) { return p; }
};
template <typename T>
struct PtrGetter<T*&, T*> {
static T* Get(T*& p) { return p; }
};
template <typename T>
struct PtrGetter<const std::unique_ptr<T>&, const T*> {
static const T* Get(const std::unique_ptr<T>& p) { return p.get(); }
};
template <typename T>
struct PtrGetter<std::unique_ptr<T>&, T*> {
static T* Get(std::unique_ptr<T>& p) { return p.get(); }
};
}
template <typename PointedToTy>
typename MappedPtrContainerSorter<PointedToTy>::UnmappedPtrIndexFn
MappedPtrContainerSorter<PointedToTy>::IndexBeforeMappedElementsFn() {
static const auto fn = [](const PointedToTy*) {
return IndexBeforeMappedElements();
};
return fn;
}
template <typename PointedToTy>
typename MappedPtrContainerSorter<PointedToTy>::UnmappedPtrIndexFn
MappedPtrContainerSorter<PointedToTy>::IndexAfterMappedElementsFn() {
static const auto fn = [](const PointedToTy*) {
return IndexAfterMappedElements();
};
return fn;
}
template <typename PointedToTy>
typename MappedPtrContainerSorter<PointedToTy>::UnmappedPtrIndexFn
MappedPtrContainerSorter<PointedToTy>::InvalidIndexFn() {
static const auto fn = [](const PointedToTy*) { return InvalidIndex(); };
return fn;
}
template <typename PointedToTy>
absl::Status
MappedPtrContainerSorter<PointedToTy>::SortedIndices::AddMappedElement(
size_t unordered_container_index, size_t partial_order) {
if (partial_order >= mapped_element_indices_by_partial_order_.size()) {
return InternalStrCat("invalid partial order: ", partial_order, " v max(",
mapped_element_indices_by_partial_order_.size(), ")");
}
mapped_element_indices_by_partial_order_[partial_order].push_back(
unordered_container_index);
return absl::OkStatus();
}
template <typename PointedToTy>
void MappedPtrContainerSorter<PointedToTy>::SortedIndices::AddUnmappedElement(
size_t unordered_container_index,
size_t target_index_amongst_mapped_elements) {
target_index_to_unmapped_element_index_[target_index_amongst_mapped_elements]
.push_back(unordered_container_index);
}
template <typename PointedToTy>
std::string MappedPtrContainerSorter<PointedToTy>::SortedIndices::ToString()
const {
std::vector<std::string> mapped_element_strs;
mapped_element_strs.reserve(mapped_element_indices_by_partial_order_.size());
for (const auto& indices : mapped_element_indices_by_partial_order_) {
mapped_element_strs.push_back(
absl::StrCat("[", absl::StrJoin(indices, ", "), "]"));
}
std::vector<std::string> unmapped_element_strs;
unmapped_element_strs.reserve(target_index_to_unmapped_element_index_.size());
for (const auto& kv : target_index_to_unmapped_element_index_) {
std::string key = absl::StrCat(kv.first);
if (kv.first == IndexBeforeMappedElements()) {
key = "before_mapped";
}
if (kv.first == IndexAfterMappedElements()) {
key = "after_mapped";
}
if (kv.first == InvalidIndex()) {
key = "invalid";
}
unmapped_element_strs.push_back(
absl::StrCat(key, ": [", absl::StrJoin(kv.second, ", "), "]"));
}
return absl::StrCat(
"max_partial_order_exclusive_: ", max_partial_order_exclusive_, "\n",
"unordered_container_size_: ", unordered_container_size_, "\n",
"mapped_element_indices_by_partial_order_: [",
absl::StrJoin(mapped_element_strs, ", "), "]\n",
"target_index_to_unmapped_element_index_: {",
absl::StrJoin(unmapped_element_strs, ", "), "}\n");
}
template <typename PointedToTy>
absl::StatusOr<std::vector<size_t>>
MappedPtrContainerSorter<PointedToTy>::SortedIndices::Flatten() const {
std::vector<size_t> result(unordered_container_size_, InvalidIndex());
size_t next_available_index = 0;
auto next_index_fn = [&]() -> absl::StatusOr<size_t> {
if (next_available_index >= unordered_container_size_) {
return InternalStrCat(
"invalid unordered_container index: ", next_available_index,
" v size(", unordered_container_size_, ")");
}
return next_available_index++;
};
if (target_index_to_unmapped_element_index_.contains(
IndexBeforeMappedElements())) {
const auto& indices =
target_index_to_unmapped_element_index_.at(IndexBeforeMappedElements());
for (size_t index : indices) {
TF_ASSIGN_OR_RETURN(result[index], next_index_fn());
}
}
size_t num_inserted_mapped_elements = 0;
for (const auto& mapped_element_indices :
mapped_element_indices_by_partial_order_) {
for (size_t mapped_element_index : mapped_element_indices) {
TF_ASSIGN_OR_RETURN(result[mapped_element_index], next_index_fn());
++num_inserted_mapped_elements;
if (target_index_to_unmapped_element_index_.contains(
num_inserted_mapped_elements - 1)) {
const auto& unmapped_element_indices =
target_index_to_unmapped_element_index_.at(
num_inserted_mapped_elements - 1);
for (size_t unmapped_element_index : unmapped_element_indices) {
TF_ASSIGN_OR_RETURN(result[unmapped_element_index], next_index_fn());
}
}
}
}
if (target_index_to_unmapped_element_index_.contains(
IndexAfterMappedElements())) {
const auto& indices =
target_index_to_unmapped_element_index_.at(IndexAfterMappedElements());
for (size_t index : indices) {
TF_ASSIGN_OR_RETURN(result[index], next_index_fn());
}
}
absl::flat_hash_set<size_t> used_indices;
for (size_t index : result) {
if (used_indices.contains(index)) {
return InternalStrCat(
"2 elements in unordered_container are destined for the same "
"index: ",
index);
}
if (index >= unordered_container_size_) {
return InvalidArgumentStrCat("invalid unordered_container index: ", index,
" v size(", unordered_container_size_, ")");
}
}
return result;
}
template <typename PointedToTy>
template <typename OrderedTy, typename UnorderedTy>
absl::StatusOr<std::vector<size_t>>
MappedPtrContainerSorter<PointedToTy>::ComputeNewIndices(
MapPtrFn map_ptr, UnmappedPtrIndexFn unmapped_index,
const OrderedTy& ordered_container,
const UnorderedTy& unordered_container) {
using UnorderedPtrGetter = mapped_ptr_container_sorter_internal::PtrGetter<
typename UnorderedTy::const_reference, const PointedToTy*>;
using OrderedPtrGetter = mapped_ptr_container_sorter_internal::PtrGetter<
typename OrderedTy::const_reference, const PointedToTy*>;
if (unordered_container.size() >= IndexBeforeMappedElements()) {
return InvalidArgumentStrCat("Unordered container is too large to sort.");
}
absl::flat_hash_set<const PointedToTy*> unordered_ptrs;
for (const auto& unordered_element : unordered_container) {
const PointedToTy* ptr = UnorderedPtrGetter::Get(unordered_element);
unordered_ptrs.insert(ptr);
}
absl::flat_hash_map<const PointedToTy*, std::list<size_t>>
mapped_ptr_to_partial_order;
size_t next_partial_order_value = 0;
for (const auto& ordered_element : ordered_container) {
const PointedToTy* ordered_ptr = OrderedPtrGetter::Get(ordered_element);
const PointedToTy* unordered_ptr = map_ptr(ordered_ptr);
if (!unordered_ptr) {
continue;
}
if (!unordered_ptrs.contains(unordered_ptr)) {
continue;
}
mapped_ptr_to_partial_order[unordered_ptr].push_back(
next_partial_order_value);
++next_partial_order_value;
}
SortedIndices result(next_partial_order_value, unordered_container.size());
for (size_t i = 0; i < unordered_container.size(); ++i) {
const PointedToTy* ptr = UnorderedPtrGetter::Get(unordered_container[i]);
if (!mapped_ptr_to_partial_order.contains(ptr)) {
result.AddUnmappedElement(i, unmapped_index(ptr));
continue;
}
auto& index_list = mapped_ptr_to_partial_order[ptr];
TF_RETURN_IF_ERROR(result.AddMappedElement(i, index_list.front()));
if (index_list.size() > 1) {
index_list.pop_front();
}
}
VLOG(5) << "Pre flatten unordered_container result:\n" << result.ToString();
return result.Flatten();
}
template <typename PointedToTy>
template <typename UnorderedTy>
void MappedPtrContainerSorter<PointedToTy>::Reorder(
std::vector<size_t> new_indices, UnorderedTy& unordered_container) {
size_t old_pos = 0;
while (old_pos < new_indices.size()) {
size_t new_pos = new_indices[old_pos];
if (old_pos == new_pos) {
++old_pos;
continue;
}
std::swap(new_indices[old_pos], new_indices[new_pos]);
std::swap(unordered_container[old_pos], unordered_container[new_pos]);
}
}
template <typename PointedToTy>
template <typename OrderedTy, typename UnorderedTy>
absl::Status MappedPtrContainerSorter<PointedToTy>::Sort(
MapPtrFn map_ptr, UnmappedPtrIndexFn unmapped_index,
const OrderedTy& ordered_container, UnorderedTy& unordered_container) {
std::vector<size_t> indices;
TF_ASSIGN_OR_RETURN(
indices, ComputeNewIndices(map_ptr, unmapped_index, ordered_container,
unordered_container));
Reorder(std::move(indices), unordered_container);
return absl::OkStatus();
}
}
#endif | #include "xla/service/mapped_ptr_container_sorter.h"
#include <cstddef>
#include <list>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/functional/bind_front.h"
#include "absl/log/log.h"
#include "xla/test.h"
#include "xla/tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
using ::testing::ElementsAre;
using ::testing::Pointee;
std::vector<std::unique_ptr<std::string>> CreateUniquePtrContainer(
const std::vector<std::string>& values) {
std::vector<std::unique_ptr<std::string>> container;
for (auto value : values) {
container.push_back(std::make_unique<std::string>(value));
}
return container;
}
class MappedPtrContainerSorterTest : public ::testing::Test {
public:
using Sorter = MappedPtrContainerSorter<std::string>;
MappedPtrContainerSorterTest()
: ordered_unique_ptrs_(CreateUniquePtrContainer(
{"m0", "m1", "m2", "m3", "not_in_unordered"})),
unordered_unique_ptrs_(
CreateUniquePtrContainer({"m3", "m1", "m0", "m2"})) {
for (auto& unique : ordered_unique_ptrs_) {
ordered_raw_ptrs_.push_back(unique.get());
ordered_const_raw_ptrs_.push_back(unique.get());
}
for (auto& unique : unordered_unique_ptrs_) {
unordered_raw_ptrs_.push_back(unique.get());
unordered_const_raw_ptrs_.push_back(unique.get());
}
}
protected:
const std::string* MapPtr(const std::string* ordered) const {
for (size_t i = 0; i < unordered_unique_ptrs_.size(); ++i) {
if (*ordered == *unordered_unique_ptrs_[i]) {
return unordered_unique_ptrs_[i].get();
}
}
return nullptr;
}
auto MapPtrFn() const {
return absl::bind_front(&MappedPtrContainerSorterTest::MapPtr, this);
}
void AddUnmappedElementsToUnorderedUniquePtrs() {
unordered_unique_ptrs_.insert(unordered_unique_ptrs_.begin(),
std::make_unique<std::string>("u0"));
unordered_unique_ptrs_.insert(unordered_unique_ptrs_.begin() + 2,
std::make_unique<std::string>("u1"));
unordered_unique_ptrs_.insert(unordered_unique_ptrs_.begin() + 3,
std::make_unique<std::string>("u2"));
unordered_unique_ptrs_.insert(unordered_unique_ptrs_.end(),
std::make_unique<std::string>("u3"));
}
std::vector<std::unique_ptr<std::string>> ordered_unique_ptrs_;
std::vector<std::unique_ptr<std::string>> unordered_unique_ptrs_;
std::vector<std::string*> ordered_raw_ptrs_;
std::vector<std::string*> unordered_raw_ptrs_;
std::vector<const std::string*> ordered_const_raw_ptrs_;
std::vector<const std::string*> unordered_const_raw_ptrs_;
};
TEST_F(MappedPtrContainerSorterTest, SortUniquePtrs) {
TF_EXPECT_OK(Sorter::Sort(MapPtrFn(), Sorter::InvalidIndexFn(),
ordered_unique_ptrs_, unordered_unique_ptrs_));
EXPECT_THAT(
unordered_unique_ptrs_,
ElementsAre(Pointee(std::string("m0")), Pointee(std::string("m1")),
Pointee(std::string("m2")), Pointee(std::string("m3"))));
}
TEST_F(MappedPtrContainerSorterTest, RawPtrs) {
TF_EXPECT_OK(Sorter::Sort(MapPtrFn(), Sorter::InvalidIndexFn(),
ordered_raw_ptrs_, unordered_raw_ptrs_));
EXPECT_THAT(
unordered_raw_ptrs_,
ElementsAre(Pointee(std::string("m0")), Pointee(std::string("m1")),
Pointee(std::string("m2")), Pointee(std::string("m3"))));
}
TEST_F(MappedPtrContainerSorterTest, ConstRawPtrs) {
TF_EXPECT_OK(Sorter::Sort(MapPtrFn(), Sorter::InvalidIndexFn(),
ordered_const_raw_ptrs_,
unordered_const_raw_ptrs_));
EXPECT_THAT(
unordered_const_raw_ptrs_,
ElementsAre(Pointee(std::string("m0")), Pointee(std::string("m1")),
Pointee(std::string("m2")), Pointee(std::string("m3"))));
}
TEST_F(MappedPtrContainerSorterTest, DifferentContainerTypes) {
std::list<std::unique_ptr<std::string>> ordered_ptrs;
for (auto& ptr : ordered_unique_ptrs_) {
ordered_ptrs.push_back(std::move(ptr));
}
TF_EXPECT_OK(Sorter::Sort(MapPtrFn(), Sorter::InvalidIndexFn(), ordered_ptrs,
unordered_unique_ptrs_));
EXPECT_THAT(
unordered_unique_ptrs_,
ElementsAre(Pointee(std::string("m0")), Pointee(std::string("m1")),
Pointee(std::string("m2")), Pointee(std::string("m3"))));
}
TEST_F(MappedPtrContainerSorterTest, WithUnmappedPtrsAfterMappedPtrs) {
AddUnmappedElementsToUnorderedUniquePtrs();
TF_EXPECT_OK(Sorter::Sort(MapPtrFn(), Sorter::IndexAfterMappedElementsFn(),
ordered_unique_ptrs_, unordered_unique_ptrs_));
EXPECT_THAT(
unordered_unique_ptrs_,
ElementsAre(Pointee(std::string("m0")), Pointee(std::string("m1")),
Pointee(std::string("m2")), Pointee(std::string("m3")),
Pointee(std::string("u0")), Pointee(std::string("u1")),
Pointee(std::string("u2")), Pointee(std::string("u3"))));
}
TEST_F(MappedPtrContainerSorterTest, WithUnmappedPtrsBeforeMappedPtrs) {
AddUnmappedElementsToUnorderedUniquePtrs();
TF_EXPECT_OK(Sorter::Sort(MapPtrFn(), Sorter::IndexBeforeMappedElementsFn(),
ordered_unique_ptrs_, unordered_unique_ptrs_));
EXPECT_THAT(unordered_unique_ptrs_,
ElementsAre(
Pointee(std::string("u0")), Pointee(std::string("u1")),
Pointee(std::string("u2")), Pointee(std::string("u3")),
Pointee(std::string("m0")), Pointee(std::string("m1")),
Pointee(std::string("m2")), Pointee(std::string("m3"))));
}
TEST_F(MappedPtrContainerSorterTest, WithUnmappedPtrsInCustomLocations) {
auto unmapped_ptr_index = [](const std::string* s) -> size_t {
if (*s == "u0") {
return Sorter::IndexAfterMappedElementsFn()(s);
}
if (*s == "u1") {
return 2;
}
if (*s == "u2") {
return 2;
}
if (*s == "u3") {
return Sorter::IndexBeforeMappedElementsFn()(s);
}
LOG(FATAL) << "We should not be getting an unmapped ptr index for " << *s;
};
AddUnmappedElementsToUnorderedUniquePtrs();
TF_EXPECT_OK(Sorter::Sort(MapPtrFn(), unmapped_ptr_index,
ordered_unique_ptrs_, unordered_unique_ptrs_));
EXPECT_THAT(
unordered_unique_ptrs_,
ElementsAre(
Pointee(std::string("u3")),
Pointee(std::string("m0")),
Pointee(std::string("m1")),
Pointee(std::string("m2")),
Pointee(std::string("u1")),
Pointee(std::string("u2")),
Pointee(std::string("m3")),
Pointee(std::string("u0"))
));
}
TEST_F(MappedPtrContainerSorterTest,
ManyOrderedElementsMapToFewUnorderedElements) {
std::string* ordered_m1 = nullptr;
for (auto ptr : ordered_raw_ptrs_) {
if (*ptr == "m1") {
ordered_m1 = ptr;
break;
}
}
ASSERT_NE(ordered_m1, nullptr);
std::string* unordered_m1 = nullptr;
for (auto ptr : unordered_raw_ptrs_) {
if (*ptr == "m1") {
unordered_m1 = ptr;
break;
}
}
ASSERT_NE(unordered_m1, nullptr);
ordered_raw_ptrs_.insert(ordered_raw_ptrs_.begin(), ordered_m1);
ordered_raw_ptrs_.push_back(ordered_m1);
unordered_raw_ptrs_.push_back(unordered_m1);
TF_EXPECT_OK(Sorter::Sort(MapPtrFn(), Sorter::IndexBeforeMappedElementsFn(),
ordered_raw_ptrs_, unordered_raw_ptrs_));
EXPECT_THAT(
unordered_raw_ptrs_,
ElementsAre(
Pointee(std::string("m1")),
Pointee(std::string("m0")),
Pointee(std::string("m1")),
Pointee(std::string("m2")), Pointee(std::string("m3"))));
}
TEST_F(MappedPtrContainerSorterTest,
FewOrderedElementsMapToManyUnorderedElements) {
std::string* ordered_m1 = nullptr;
for (auto ptr : ordered_raw_ptrs_) {
if (*ptr == "m1") {
ordered_m1 = ptr;
break;
}
}
ASSERT_NE(ordered_m1, nullptr);
std::string* unordered_m1 = nullptr;
for (auto ptr : unordered_raw_ptrs_) {
if (*ptr == "m1") {
unordered_m1 = ptr;
break;
}
}
ASSERT_NE(unordered_m1, nullptr);
ordered_raw_ptrs_.insert(ordered_raw_ptrs_.begin(), ordered_m1);
unordered_raw_ptrs_.push_back(unordered_m1);
unordered_raw_ptrs_.push_back(unordered_m1);
TF_EXPECT_OK(Sorter::Sort(MapPtrFn(), Sorter::IndexBeforeMappedElementsFn(),
ordered_raw_ptrs_, unordered_raw_ptrs_));
EXPECT_THAT(
unordered_raw_ptrs_,
ElementsAre(
Pointee(std::string("m1")),
Pointee(std::string("m0")),
Pointee(std::string("m1")),
Pointee(std::string("m1")),
Pointee(std::string("m2")), Pointee(std::string("m3"))));
}
TEST_F(MappedPtrContainerSorterTest, InvalidUnmappedIndex) {
unordered_unique_ptrs_.push_back(std::make_unique<std::string>("u0"));
auto unmapped_index_fn = [](const std::string* unmapped) -> size_t {
if (*unmapped == "u0") {
return 4;
}
return Sorter::IndexBeforeMappedElementsFn()(unmapped);
};
EXPECT_FALSE(Sorter::Sort(MapPtrFn(), unmapped_index_fn, ordered_unique_ptrs_,
unordered_unique_ptrs_)
.ok());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/mapped_ptr_container_sorter.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/mapped_ptr_container_sorter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e27221c5-0b3b-45d4-8e65-4e62b0ab5c84 | cpp | tensorflow/tensorflow | fuzzy_matcher | third_party/xla/xla/service/fuzzy_matcher.h | third_party/xla/xla/service/fuzzy_matcher_test.cc | #ifndef XLA_SERVICE_FUZZY_MATCHER_H_
#define XLA_SERVICE_FUZZY_MATCHER_H_
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/pattern_matcher.h"
namespace xla {
namespace fm {
template <typename Pattern>
auto OptConvert(Pattern pattern) {
auto shared = match::SharedSubpattern(pattern);
return match::AnyOf<HloInstruction>(match::Convert(shared), shared);
}
#define XLA_FUZZY_UNOP_PATTERN(NAME) \
template <typename HloInstructionType> \
inline auto NAME(HloInstructionType** matched_inst) { \
return OptConvert(match::Op(matched_inst).WithOpcode(HloOpcode::k##NAME)); \
} \
\
template <typename Arg> \
inline auto NAME(Arg&& arg) { \
return OptConvert(match::Op() \
.WithOpcode(HloOpcode::k##NAME) \
.WithOperand(0, std::forward<Arg>(arg))); \
} \
\
template <typename HloInstructionType, typename Arg> \
inline auto NAME(HloInstructionType** matched_inst, Arg&& arg) { \
return OptConvert(match::Op(matched_inst) \
.WithOpcode(HloOpcode::k##NAME) \
.WithOperand(0, std::forward<Arg>(arg))); \
}
XLA_FUZZY_UNOP_PATTERN(Tanh)
XLA_FUZZY_UNOP_PATTERN(Exp)
XLA_FUZZY_UNOP_PATTERN(Broadcast)
#undef XLA_FUZZY_UNOP_PATTERN
#define XLA_FUZZY_BINOP_PATTERN(NAME) \
template <typename HloInstructionType, typename Lhs, typename Rhs> \
inline auto NAME(HloInstructionType** matched_inst, Lhs&& lhs, Rhs&& rhs) { \
return OptConvert(match::Op(matched_inst) \
.WithOpcode(HloOpcode::k##NAME) \
.WithOperand(0, std::forward<Lhs>(lhs)) \
.WithOperand(1, std::forward<Rhs>(rhs))); \
} \
template <typename Lhs, typename Rhs> \
inline auto NAME(Lhs&& lhs, Rhs&& rhs) { \
return OptConvert(match::Op() \
.WithOpcode(HloOpcode::k##NAME) \
.WithOperand(0, std::forward<Lhs>(lhs)) \
.WithOperand(1, std::forward<Rhs>(rhs))); \
}
XLA_FUZZY_BINOP_PATTERN(Dot)
XLA_FUZZY_BINOP_PATTERN(Divide)
XLA_FUZZY_BINOP_PATTERN(Subtract)
XLA_FUZZY_BINOP_PATTERN(Multiply)
XLA_FUZZY_BINOP_PATTERN(Reduce)
#undef XLA_FUZZY_BINOP_PATTERN
#define XLA_FUZZY_TERNOP_PATTERN(NAME) \
template <typename Arg0, typename Arg1, typename Arg2> \
inline auto NAME(Arg0&& arg0, Arg1&& arg1, Arg2&& arg2) { \
return OptConvert(match::Op() \
.WithOpcode(HloOpcode::k##NAME) \
.WithOperand(0, std::forward<Arg0>(arg0)) \
.WithOperand(1, std::forward<Arg1>(arg1)) \
.WithOperand(2, std::forward<Arg2>(arg2))); \
} \
\
template <typename HloInstructionType, typename Arg0, typename Arg1, \
typename Arg2> \
inline auto NAME(HloInstructionType** matched_inst, Arg0&& arg0, \
Arg1&& arg1, Arg2&& arg2) { \
return OptConvert(match::Op(matched_inst) \
.WithOpcode(HloOpcode::k##NAME) \
.WithOperand(0, std::forward<Arg0>(arg0)) \
.WithOperand(1, std::forward<Arg1>(arg1)) \
.WithOperand(2, std::forward<Arg2>(arg2))); \
}
XLA_FUZZY_TERNOP_PATTERN(Select);
#undef XLA_FUZZY_TERNOP_PATTERN
}
}
#endif | #include "xla/service/fuzzy_matcher.h"
#include <gtest/gtest.h>
#include "xla/service/pattern_matcher.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
using FuzzyMatcherTest = HloTestBase;
TEST_F(FuzzyMatcherTest, IgnoreConvert) {
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
x = f16[8,3] parameter(0)
y = f16[8,3] parameter(1)
div = f16[8,3] divide(x, y)
ROOT convert = f32[8,3] convert(div)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
EXPECT_TRUE(
Match(root, fm::Divide(match::Parameter(0), match::Parameter(1))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/fuzzy_matcher.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/fuzzy_matcher_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e71d581e-3184-4873-831f-aba1c7d47594 | cpp | tensorflow/tensorflow | hlo_rematerialization_test_utils | third_party/xla/xla/service/hlo_rematerialization_test_utils.h | third_party/xla/xla/service/hlo_rematerialization_test_utils_test.cc | #ifndef XLA_SERVICE_HLO_REMATERIALIZATION_TEST_UTILS_H_
#define XLA_SERVICE_HLO_REMATERIALIZATION_TEST_UTILS_H_
#include <cstdint>
#include <memory>
#include <string>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla {
class RematerializationTestBase : public HloTestBase {
protected:
std::unique_ptr<HloComputation> MakeRematerializableComputation(
const std::string& suffix = "") {
auto builder = HloComputation::Builder(TestName() + suffix);
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, vec1_shape_, "param"));
auto reshape = builder.AddInstruction(
HloInstruction::CreateReshape(scalar_shape_, param));
auto bcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(vec1024_shape_, reshape, {}));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kNegate, bcast));
auto concat_1 = builder.AddInstruction(HloInstruction::CreateConcatenate(
ShapeUtil::MakeShape(xla::F32, {2048}), {negate, negate},
0));
auto slice_1 = builder.AddInstruction(HloInstruction::CreateSlice(
vec1_shape_, concat_1, {0},
{1},
{1}));
auto concat_2 = builder.AddInstruction(HloInstruction::CreateConcatenate(
ShapeUtil::MakeShape(xla::F32, {1025}), {bcast, slice_1},
0));
builder.AddInstruction(HloInstruction::CreateSlice(vec1_shape_, concat_2,
{0},
{1},
{1}));
return builder.Build();
}
std::unique_ptr<HloComputation> MakeRematerializableWhileComputation(
HloComputation* while_cond, HloComputation* while_body,
const std::string& suffix = "") {
auto builder = HloComputation::Builder(TestName() + suffix);
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, vec1_shape_, "param"));
auto reshape = builder.AddInstruction(
HloInstruction::CreateReshape(scalar_shape_, param));
auto bcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(vec1024_shape_, reshape, {}));
auto slice_1 = builder.AddInstruction(
HloInstruction::CreateSlice(vec1_shape_, bcast, {0},
{1},
{1}));
auto while_inst = builder.AddInstruction(HloInstruction::CreateWhile(
vec1_shape_, while_cond, while_body, slice_1));
auto concat = builder.AddInstruction(HloInstruction::CreateConcatenate(
ShapeUtil::MakeShape(xla::F32, {1025}), {bcast, while_inst},
0));
builder.AddInstruction(HloInstruction::CreateSlice(vec1_shape_, concat,
{0},
{1},
{1}));
return builder.Build();
}
std::unique_ptr<HloComputation> MakeConditionComputation() {
auto builder = HloComputation::Builder(TestName() + ".cond");
builder.AddInstruction(
HloInstruction::CreateParameter(0, vec1_shape_, "param"));
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
return builder.Build();
}
static int64_t ByteSizeOf(const Shape& shape) {
return ShapeUtil::ByteSizeOf(shape, sizeof(void*));
}
protected:
const Shape scalar_shape_ = ShapeUtil::MakeShape(xla::F32, {});
const Shape vec1_shape_ = ShapeUtil::MakeShape(xla::F32, {1});
const Shape vec1024_shape_ = ShapeUtil::MakeShape(xla::F32, {1024});
};
}
#endif | #include "xla/service/hlo_rematerialization_test_utils.h"
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
namespace xla {
namespace {
using ::testing::UnorderedElementsAre;
class HloRematerializationTestUtilsTest : public RematerializationTestBase {};
TEST_F(HloRematerializationTestUtilsTest, MakeRematerializableComputation) {
auto computation = MakeRematerializableComputation();
std::vector<HloInstruction*> instructions(computation->instructions().begin(),
computation->instructions().end());
EXPECT_EQ(instructions[0]->name(), "param");
EXPECT_EQ(instructions[1]->name(), "reshape");
EXPECT_THAT(instructions[1]->operands(),
UnorderedElementsAre(instructions[0]));
EXPECT_EQ(instructions[2]->name(), "broadcast");
EXPECT_THAT(instructions[2]->operands(),
UnorderedElementsAre(instructions[1]));
EXPECT_EQ(instructions[3]->name(), "negate");
EXPECT_THAT(instructions[3]->operands(),
UnorderedElementsAre(instructions[2]));
EXPECT_EQ(instructions[4]->name(), "concatenate");
EXPECT_THAT(instructions[4]->operands(),
UnorderedElementsAre(instructions[3], instructions[3]));
EXPECT_EQ(instructions[5]->name(), "slice");
EXPECT_THAT(instructions[5]->operands(),
UnorderedElementsAre(instructions[4]));
EXPECT_EQ(instructions[6]->name(), "concatenate");
EXPECT_THAT(instructions[6]->operands(),
UnorderedElementsAre(instructions[2], instructions[5]));
EXPECT_EQ(instructions[7]->name(), "slice");
EXPECT_THAT(instructions[7]->operands(),
UnorderedElementsAre(instructions[6]));
}
TEST_F(HloRematerializationTestUtilsTest,
MakeRematerializableWhileComputation) {
auto while_condition = MakeConditionComputation();
auto body_computation = MakeRematerializableComputation();
auto computation = MakeRematerializableWhileComputation(
while_condition.get(), body_computation.get());
std::vector<HloInstruction*> instructions(computation->instructions().begin(),
computation->instructions().end());
EXPECT_EQ(instructions[0]->name(), "param");
EXPECT_EQ(instructions[1]->name(), "reshape");
EXPECT_THAT(instructions[1]->operands(),
UnorderedElementsAre(instructions[0]));
EXPECT_EQ(instructions[2]->name(), "broadcast");
EXPECT_THAT(instructions[2]->operands(),
UnorderedElementsAre(instructions[1]));
EXPECT_EQ(instructions[3]->name(), "slice");
EXPECT_THAT(instructions[3]->operands(),
UnorderedElementsAre(instructions[2]));
EXPECT_EQ(instructions[4]->name(), "while");
EXPECT_THAT(instructions[4]->operands(),
UnorderedElementsAre(instructions[3]));
EXPECT_EQ(instructions[4]->while_condition()->name(),
"MakeRematerializableWhileComputation.cond");
EXPECT_EQ(instructions[4]->while_body()->name(),
"MakeRematerializableWhileComputation");
EXPECT_EQ(instructions[5]->name(), "concatenate");
EXPECT_THAT(instructions[5]->operands(),
UnorderedElementsAre(instructions[2], instructions[4]));
EXPECT_EQ(instructions[6]->name(), "slice");
EXPECT_THAT(instructions[6]->operands(),
UnorderedElementsAre(instructions[5]));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_rematerialization_test_utils.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_rematerialization_test_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c2752c74-3af7-4654-8232-6b113fc0920d | cpp | tensorflow/tensorflow | pattern_matcher_gmock | third_party/xla/xla/service/pattern_matcher_gmock.h | third_party/xla/xla/service/pattern_matcher_gmock_test.cc | #ifndef XLA_SERVICE_PATTERN_MATCHER_GMOCK_H_
#define XLA_SERVICE_PATTERN_MATCHER_GMOCK_H_
#include <ostream>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/layout.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape.h"
#include "xla/test.h"
#include "tsl/platform/test.h"
namespace xla {
namespace pattern_matcher_gmock_detail {
template <typename Pattern>
class GmockMatcher {
public:
explicit GmockMatcher(Pattern p) : pattern_(std::move(p)) {}
bool MatchAndExplain(const Layout& l,
::testing::MatchResultListener* listener) const {
return MatchAndExplainImpl(&l, listener);
}
bool MatchAndExplain(const Layout* l,
::testing::MatchResultListener* listener) const {
return MatchAndExplainImpl(l, listener);
}
bool MatchAndExplain(Layout* l,
::testing::MatchResultListener* listener) const {
return MatchAndExplainImpl(l, listener);
}
bool MatchAndExplain(const Shape& s,
::testing::MatchResultListener* listener) const {
return MatchAndExplainImpl(&s, listener);
}
bool MatchAndExplain(const Shape* s,
::testing::MatchResultListener* listener) const {
return MatchAndExplainImpl(s, listener);
}
bool MatchAndExplain(Shape* s,
::testing::MatchResultListener* listener) const {
return MatchAndExplainImpl(s, listener);
}
bool MatchAndExplain(const HloInstruction& instr,
::testing::MatchResultListener* listener) const {
return MatchAndExplainImpl(&instr, listener);
}
bool MatchAndExplain(const HloInstruction* instr,
::testing::MatchResultListener* listener) const {
return MatchAndExplainImpl(instr, listener);
}
bool MatchAndExplain(HloInstruction* instr,
::testing::MatchResultListener* listener) const {
return MatchAndExplainImpl(instr, listener);
}
void DescribeTo(std::ostream* os) const { pattern_.DescribeTo(os); }
void DescribeNegationTo(std::ostream* os) const {
*os << "is NOT: ";
DescribeTo(os);
}
private:
template <typename T>
bool MatchAndExplainImpl(T* t,
::testing::MatchResultListener* listener) const {
MatchOption options{true, false,
listener->stream()};
return Match(t, pattern_, options);
}
Pattern pattern_;
};
}
template <typename Pattern>
::testing::PolymorphicMatcher<
pattern_matcher_gmock_detail::GmockMatcher<Pattern>>
GmockMatch(Pattern&& p) {
return ::testing::MakePolymorphicMatcher(
pattern_matcher_gmock_detail::GmockMatcher<Pattern>(
std::forward<Pattern>(p)));
}
}
#endif | #include "xla/service/pattern_matcher_gmock.h"
#include <sstream>
#include <string>
#include <type_traits>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
namespace m = ::xla::match;
using ::testing::Not;
template <typename MatchedTy>
std::string Describe(const ::testing::Matcher<MatchedTy>& m) {
std::stringstream ss;
m.DescribeTo(&ss);
return ss.str();
}
template <typename MatchedTy>
std::string Explain(
const MatchedTy& val,
const ::testing::Matcher<typename std::remove_cv<MatchedTy>::type>& m) {
::testing::StringMatchResultListener listener;
EXPECT_THAT(val, ::testing::Not(m));
EXPECT_FALSE(m.MatchAndExplain(val, &listener));
return listener.str();
}
TEST(PatternMatcherGmock, MatchShape) {
Shape s = ShapeUtil::MakeShape(F32, {10, 100});
EXPECT_THAT(s, GmockMatch(m::Shape()));
EXPECT_THAT(&s, Not(GmockMatch(m::Shape().WithElementType(F16))));
EXPECT_THAT(Describe<Shape>(GmockMatch(m::Shape().IsArray())),
"a shape that represents an array");
}
TEST(PatternMatcherGmock, MatchLayout) {
Layout l = LayoutUtil::MakeLayout({0, 1});
EXPECT_THAT(l, GmockMatch(m::Layout()));
}
TEST(PatternMatchGmock, MatchInstruction) {
auto instr =
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {42}), "p");
EXPECT_THAT(instr.get(), GmockMatch(m::Parameter()));
EXPECT_THAT(*instr, GmockMatch(m::Parameter(0)));
EXPECT_THAT(*instr, Not(GmockMatch(m::Parameter(1))));
EXPECT_THAT(Describe<HloInstruction*>(GmockMatch(m::Parameter())),
"an HloInstruction with opcode parameter");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/pattern_matcher_gmock.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/pattern_matcher_gmock_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
df40c77a-7062-4e92-a882-a8f6fe66ce8f | cpp | tensorflow/tensorflow | lockable | third_party/xla/xla/service/lockable.h | third_party/xla/xla/service/lockable_test.cc | #ifndef XLA_SERVICE_LOCKABLE_H_
#define XLA_SERVICE_LOCKABLE_H_
#include <string>
#include "absl/base/thread_annotations.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "tsl/platform/logging.h"
#include "tsl/profiler/lib/traceme.h"
namespace xla {
template <typename T>
struct LockableName {
static std::string ToString(const T& value) {
return absl::StrFormat("lockable %p", &value);
}
};
template <typename T, typename LockableName = LockableName<T>>
class Lockable {
public:
class Lock {
public:
Lock() = default;
Lock(Lock&& other) {
lockable_ = other.lockable_;
other.lockable_ = nullptr;
}
Lock& operator=(Lock&& other) {
lockable_ = other.lockable_;
other.lockable_ = nullptr;
return *this;
}
~Lock() {
if (lockable_) lockable_->Release();
}
T& operator*() const { return lockable_->value_; }
T* operator->() const { return &lockable_->value_; }
operator bool() const { return lockable_ != nullptr; }
std::string ToString() const {
return lockable_ ? lockable_->ToString() : "<empty lock>";
}
private:
friend class Lockable;
explicit Lock(Lockable* lockable) : lockable_(lockable) {}
Lockable* lockable_ = nullptr;
};
Lockable() = default;
explicit Lockable(T value) : value_(std::move(value)) {
VLOG(2) << "Constructed " << LockableName::ToString(value_);
}
template <typename... Args>
explicit Lockable(Args&&... args) : value_(std::forward<Args>(args)...) {
VLOG(2) << "Constructed " << LockableName::ToString(value_);
}
Lockable(const Lockable&) = delete;
Lockable& operator=(const Lockable&) = delete;
~Lockable() {
VLOG(2) << "Destroy " << LockableName::ToString(value_);
absl::MutexLock lock(&mutex_);
CHECK_EQ(is_unlocked_, true);
}
Lock Acquire() {
tsl::profiler::TraceMe trace([&] {
return tsl::profiler::TraceMeEncode("Lockable::Lock::Acquire",
{{"lockable", ToString()}});
});
absl::MutexLock lock(&mutex_);
mutex_.Await(absl::Condition(&is_unlocked_));
VLOG(2) << "Acquired " << LockableName::ToString(value_);
is_unlocked_ = false;
return Lock(this);
}
Lock TryAcquire() {
absl::MutexLock lock(&mutex_);
if (is_unlocked_ == false) {
VLOG(2) << "Failed to acquire " << LockableName::ToString(value_);
return Lock();
}
VLOG(2) << "Acquired " << LockableName::ToString(value_);
is_unlocked_ = false;
return Lock(this);
}
std::string ToString() const { return LockableName::ToString(value_); }
protected:
const T& value() const { return value_; }
private:
friend class Lock;
void Release() {
absl::MutexLock lock(&mutex_);
VLOG(2) << "Released " << LockableName::ToString(value_);
CHECK(!is_unlocked_);
is_unlocked_ = true;
}
T value_;
absl::Mutex mutex_;
bool is_unlocked_ ABSL_GUARDED_BY(mutex_) = true;
};
}
#endif | #include "xla/service/lockable.h"
#include <cstddef>
#include <cstdint>
#include <string>
#include <utility>
#include "absl/synchronization/blocking_counter.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace xla {
tsl::thread::ThreadPool CreateThreadPool(int32_t size) {
return tsl::thread::ThreadPool(tsl::Env::Default(), "lockable_test", size);
}
template <>
struct LockableName<std::string> {
static std::string ToString(const std::string& str) {
return "lockable string " + str;
}
};
class LockableString : public Lockable<std::string> {
using Lockable::Lockable;
};
TEST(LockableTest, LockProperties) {
LockableString::Lock lock0;
EXPECT_FALSE(lock0);
LockableString str("foo");
LockableString::Lock lock1 = str.Acquire();
EXPECT_TRUE(lock1);
LockableString::Lock lock2 = std::move(lock1);
EXPECT_FALSE(lock1);
EXPECT_TRUE(lock2);
LockableString::Lock lock3 = str.TryAcquire();
EXPECT_FALSE(lock3);
EXPECT_EQ(lock1.ToString(), "<empty lock>");
EXPECT_EQ(lock2.ToString(), "lockable string foo");
EXPECT_EQ(str.ToString(), "lockable string foo");
auto sink = [](LockableString::Lock) {};
sink(std::move(lock2));
LockableString::Lock lock4 = str.TryAcquire();
EXPECT_TRUE(lock4);
}
TEST(LockableTest, ExclusiveAccess) {
absl::BlockingCounter counter(100);
auto thread_pool = CreateThreadPool(10);
LockableString str("foo");
for (size_t i = 0; i < 100; ++i) {
thread_pool.Schedule([&] {
{
auto exclusive_str = str.Acquire();
ASSERT_EQ(*exclusive_str, "foo");
}
counter.DecrementCount();
});
}
counter.Wait();
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/lockable.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/lockable_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1636930c-ab5c-49b8-9201-f3ad61c62b54 | cpp | tensorflow/tensorflow | ordered_set | third_party/xla/xla/service/graphcycles/ordered_set.h | third_party/xla/xla/service/graphcycles/ordered_set_test.cc | #ifndef XLA_SERVICE_GRAPHCYCLES_ORDERED_SET_H_
#define XLA_SERVICE_GRAPHCYCLES_ORDERED_SET_H_
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/types/span.h"
#include "tsl/platform/logging.h"
namespace xla {
template <typename T>
class OrderedSet {
public:
bool Insert(T value) {
bool new_insertion =
value_to_index_.insert({value, value_sequence_.size()}).second;
if (new_insertion) {
value_sequence_.push_back(value);
}
return new_insertion;
}
void Erase(T value) {
auto it = value_to_index_.find(value);
DCHECK(it != value_to_index_.end());
value_to_index_[value_sequence_.back()] = it->second;
std::swap(value_sequence_[it->second], value_sequence_.back());
value_sequence_.pop_back();
value_to_index_.erase(it);
}
void Reserve(size_t new_size) {
value_to_index_.reserve(new_size);
value_sequence_.reserve(new_size);
}
void Clear() {
value_to_index_.clear();
value_sequence_.clear();
}
bool Contains(T value) const { return value_to_index_.contains(value); }
size_t Size() const { return value_sequence_.size(); }
absl::Span<T const> GetSequence() const { return value_sequence_; }
private:
std::vector<T> value_sequence_;
absl::flat_hash_map<T, int> value_to_index_;
};
}
#endif | #include "xla/service/graphcycles/ordered_set.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
TEST(OrderedSetTest, Insert) {
OrderedSet<int> ordered_set;
EXPECT_TRUE(ordered_set.Insert(90));
EXPECT_TRUE(ordered_set.Insert(100));
EXPECT_TRUE(ordered_set.Insert(80));
EXPECT_FALSE(ordered_set.Insert(100));
EXPECT_EQ(ordered_set.Size(), 3);
EXPECT_TRUE(ordered_set.Contains(90));
EXPECT_TRUE(ordered_set.Contains(100));
EXPECT_TRUE(ordered_set.Contains(80));
EXPECT_FALSE(ordered_set.Contains(40));
std::array<int, 3> expected_sequence = {90, 100, 80};
EXPECT_EQ(ordered_set.GetSequence(), expected_sequence);
}
TEST(OrderedSetTest, Erase) {
OrderedSet<int> ordered_set;
EXPECT_TRUE(ordered_set.Insert(90));
EXPECT_TRUE(ordered_set.Insert(100));
EXPECT_TRUE(ordered_set.Insert(80));
ordered_set.Erase(100);
EXPECT_EQ(ordered_set.Size(), 2);
EXPECT_TRUE(ordered_set.Contains(90));
EXPECT_FALSE(ordered_set.Contains(100));
EXPECT_TRUE(ordered_set.Contains(80));
std::array<int, 2> expected_sequence_0 = {90, 80};
EXPECT_EQ(ordered_set.GetSequence(), expected_sequence_0);
ordered_set.Erase(80);
EXPECT_EQ(ordered_set.Size(), 1);
EXPECT_TRUE(ordered_set.Contains(90));
EXPECT_FALSE(ordered_set.Contains(100));
EXPECT_FALSE(ordered_set.Contains(80));
std::array<int, 1> expected_sequence_1 = {90};
EXPECT_EQ(ordered_set.GetSequence(), expected_sequence_1);
ordered_set.Erase(90);
EXPECT_EQ(ordered_set.Size(), 0);
EXPECT_FALSE(ordered_set.Contains(90));
EXPECT_FALSE(ordered_set.Contains(100));
EXPECT_FALSE(ordered_set.Contains(80));
std::array<int, 0> expected_sequence_2 = {};
EXPECT_EQ(ordered_set.GetSequence(), expected_sequence_2);
}
TEST(OrderedSetTest, Clear) {
OrderedSet<int> ordered_set;
EXPECT_TRUE(ordered_set.Insert(90));
EXPECT_TRUE(ordered_set.Insert(100));
EXPECT_TRUE(ordered_set.Insert(80));
ordered_set.Clear();
EXPECT_EQ(ordered_set.Size(), 0);
EXPECT_FALSE(ordered_set.Contains(90));
EXPECT_FALSE(ordered_set.Contains(100));
EXPECT_FALSE(ordered_set.Contains(80));
std::array<int, 0> expected_sequence = {};
EXPECT_EQ(ordered_set.GetSequence(), expected_sequence);
}
TEST(OrderedSetTest, LargeInsertions) {
const int kSize = 50 * 9000;
OrderedSet<int> ordered_set;
for (int i = 0; i < kSize; i++) {
EXPECT_TRUE(ordered_set.Insert(i + 500));
}
for (int i = 0; i < kSize; i++) {
EXPECT_EQ(ordered_set.GetSequence()[i], i + 500);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/graphcycles/ordered_set.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/graphcycles/ordered_set_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
37e3ea92-5e0b-42f0-a7ac-7eaa650395c4 | cpp | tensorflow/tensorflow | scoped_module_handle | third_party/xla/xla/stream_executor/scoped_module_handle.h | third_party/xla/xla/stream_executor/scoped_module_handle_test.cc | #ifndef XLA_STREAM_EXECUTOR_SCOPED_MODULE_HANDLE_H_
#define XLA_STREAM_EXECUTOR_SCOPED_MODULE_HANDLE_H_
#include "absl/log/check.h"
#include "xla/stream_executor/module_spec.h"
#include "xla/stream_executor/stream_executor.h"
namespace stream_executor {
class ScopedModuleHandle {
public:
ScopedModuleHandle(StreamExecutor* executor, ModuleHandle module_handle)
: executor_(executor), module_handle_(module_handle) {}
ScopedModuleHandle(ScopedModuleHandle&& other) {
executor_ = other.executor_;
module_handle_ = other.module_handle_;
other.executor_ = nullptr;
other.module_handle_ = ModuleHandle();
}
ScopedModuleHandle& operator=(ScopedModuleHandle&& other) {
executor_ = other.executor_;
module_handle_ = other.module_handle_;
other.executor_ = nullptr;
other.module_handle_ = ModuleHandle();
return *this;
}
~ScopedModuleHandle() {
if (static_cast<bool>(module_handle_)) {
CHECK(executor_->UnloadModule(module_handle_));
}
}
private:
StreamExecutor* executor_;
ModuleHandle module_handle_;
ScopedModuleHandle(const ScopedModuleHandle&) = delete;
void operator=(const ScopedModuleHandle&) = delete;
};
}
#endif | #include "xla/stream_executor/scoped_module_handle.h"
#include <utility>
#include "xla/stream_executor/mock_stream_executor.h"
#include "xla/stream_executor/module_spec.h"
#include "tsl/platform/test.h"
using testing::Return;
namespace stream_executor {
namespace {
TEST(ScopedModuleHandleTest, NoUnloadForNullHandle) {
ModuleHandle foo;
MockStreamExecutor executor;
EXPECT_CALL(executor, UnloadModule).Times(0);
{
ScopedModuleHandle first(&executor, foo);
ScopedModuleHandle second = std::move(first);
ScopedModuleHandle third(&executor, foo);
third = std::move(second);
}
}
TEST(ScopedModuleHandleTest, NonNullHandleUnloadsOnceAfterMoves) {
ModuleHandle foo(reinterpret_cast<void*>(1));
MockStreamExecutor executor;
EXPECT_CALL(executor, UnloadModule).WillOnce(Return(true));
{
ScopedModuleHandle first(&executor, foo);
ScopedModuleHandle second = std::move(first);
ScopedModuleHandle third(&executor, ModuleHandle{});
third = std::move(second);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/scoped_module_handle.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/scoped_module_handle_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e94eef46-9cde-4093-b44d-8e89d991ead3 | cpp | tensorflow/tensorflow | gpu_kernel | third_party/xla/xla/stream_executor/gpu/gpu_kernel.h | third_party/xla/xla/stream_executor/gpu/gpu_kernel_test.cc | #ifndef XLA_STREAM_EXECUTOR_GPU_GPU_KERNEL_H_
#define XLA_STREAM_EXECUTOR_GPU_GPU_KERNEL_H_
#include "xla/stream_executor/gpu/gpu_types.h"
#include "xla/stream_executor/kernel.h"
namespace stream_executor::gpu {
class GpuKernel : public Kernel {
public:
virtual GpuFunctionHandle gpu_function() const = 0;
};
inline const GpuKernel* AsGpuKernel(const Kernel* kernel) {
return static_cast<const GpuKernel*>(kernel);
}
inline GpuKernel* AsGpuKernel(Kernel* kernel) {
return static_cast<GpuKernel*>(kernel);
}
}
#endif | #include <cstdint>
#include <memory>
#include <string_view>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/ascii.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/service/platform_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/gpu/gpu_test_kernels.h"
#include "xla/stream_executor/gpu/gpu_test_kernels_fatbin.h"
#include "xla/stream_executor/kernel_spec.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/rocm/rocm_platform_id.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/stream_executor/typed_kernel_factory.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace stream_executor::gpu {
namespace {
using AddI32Kernel =
TypedKernelFactory<DeviceMemory<int32_t>, DeviceMemory<int32_t>,
DeviceMemory<int32_t>>;
class GpuKernelTest : public ::testing::Test {
public:
void SetUp() override {
auto name = absl::AsciiStrToUpper(
xla::PlatformUtil::CanonicalPlatformName("gpu").value());
Platform* platform = PlatformManager::PlatformWithName(name).value();
executor_ = platform->ExecutorForDevice(0).value();
}
void RunAddI32Kernel(const MultiKernelLoaderSpec& spec) {
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor_->CreateStream());
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor_, spec));
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<int32_t> a = executor_->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor_->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> c = executor_->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&a, 1, byte_length));
TF_ASSERT_OK(stream->Memset32(&b, 2, byte_length));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
ASSERT_TRUE(
stream->ThenLaunch(ThreadDim(), BlockDim(4), add, a, b, c).ok());
std::vector<int32_t> dst(4, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<int32_t> expected = {3, 3, 3, 3};
ASSERT_EQ(dst, expected);
}
StreamExecutor* executor_;
};
TEST_F(GpuKernelTest, LoadAndRunKernelFromPtx) {
if (executor_->GetPlatform()->id() ==
stream_executor::rocm::kROCmPlatformId) {
GTEST_SKIP() << "There is no PTX or any equivalent abstraction for ROCm.";
}
MultiKernelLoaderSpec spec(3);
spec.AddCudaPtxInMemory(internal::kAddI32KernelPtx, "AddI32");
RunAddI32Kernel(spec);
}
TEST_F(GpuKernelTest, LoadAndRunKernelFromCubin) {
MultiKernelLoaderSpec spec(3);
TF_ASSERT_OK_AND_ASSIGN(auto binary, GetGpuTestKernelsFatbin());
spec.AddCudaCubinInMemory(binary, "AddI32");
RunAddI32Kernel(spec);
}
TEST_F(GpuKernelTest, LoadAndRunKernelFromSymbol) {
MultiKernelLoaderSpec spec(3);
spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "AddI32");
RunAddI32Kernel(spec);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/gpu/gpu_kernel.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/gpu/gpu_kernel_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4a03d074-abd5-4eff-b24a-ddc438954def | cpp | tensorflow/tensorflow | context_map | third_party/xla/xla/stream_executor/gpu/context_map.h | third_party/xla/xla/stream_executor/gpu/context_map_test.cc | #ifndef XLA_STREAM_EXECUTOR_GPU_CONTEXT_MAP_H_
#define XLA_STREAM_EXECUTOR_GPU_CONTEXT_MAP_H_
#include <memory>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/functional/any_invocable.h"
#include "absl/log/check.h"
#include "absl/synchronization/mutex.h"
namespace stream_executor::gpu {
template <class GpuContext, class ContextType>
class ContextMap {
public:
explicit ContextMap(absl::AnyInvocable<int(void* ptr)> find_device_ordinal)
: find_device_ordinal_(std::move(find_device_ordinal)) {}
bool Has(GpuContext context) {
absl::ReaderMutexLock lock(&mutex_);
return gpu_context_to_context_type_map_.find(context) !=
gpu_context_to_context_type_map_.end();
}
ContextType* Add(GpuContext context, int device_ordinal) {
CHECK_NE(context, nullptr);
absl::MutexLock lock(&mutex_);
auto insert_result = gpu_context_to_context_type_map_.insert(
std::make_pair(context, nullptr));
auto it = insert_result.first;
if (insert_result.second) {
it->second = std::make_unique<ContextType>(context, device_ordinal);
ordinal_to_type_map_[device_ordinal].push_back(context);
}
return it->second.get();
}
void Remove(GpuContext context) {
absl::MutexLock lock(&mutex_);
CHECK_NE(context, nullptr);
auto it = gpu_context_to_context_type_map_.find(context);
CHECK(it != gpu_context_to_context_type_map_.end()) << context;
gpu_context_to_context_type_map_.erase(it);
for (auto p : ordinal_to_type_map_) {
auto it2 = std::find(p.second.begin(), p.second.end(), context);
if (it2 != p.second.end()) {
p.second.erase(it2);
if (p.second.empty()) {
ordinal_to_type_map_.erase(p.first);
}
break;
}
}
}
GpuContext GetAnyContext(void* ptr) {
absl::ReaderMutexLock lock(&mutex_);
int device_ordinal = find_device_ordinal_(ptr);
CHECK_EQ(ordinal_to_type_map_.count(device_ordinal), 1);
CHECK(!ordinal_to_type_map_.at(device_ordinal).empty())
<< "Need at least one context.";
return ordinal_to_type_map_.at(device_ordinal)[0];
}
private:
absl::Mutex mutex_;
absl::flat_hash_map<GpuContext, std::unique_ptr<ContextType>>
gpu_context_to_context_type_map_ ABSL_GUARDED_BY(mutex_);
absl::flat_hash_map<int, std::vector<GpuContext>> ordinal_to_type_map_
ABSL_GUARDED_BY(mutex_);
absl::AnyInvocable<int(void* ptr)> find_device_ordinal_;
};
}
#endif | #include "xla/stream_executor/gpu/context_map.h"
#include "tsl/platform/test.h"
namespace stream_executor::gpu {
namespace {
class TestContext {
public:
TestContext(void *context, int device_ordinal)
: context_(context), device_ordinal_(device_ordinal) {}
void *context() const { return context_; }
int device_ordinal() const { return device_ordinal_; }
private:
void *context_;
int device_ordinal_;
};
TEST(ContextMapTest, AddRemoveAndHasWorks) {
int device_ordinal = 1;
void *context = &device_ordinal;
auto ordinal_finder = [device_ordinal](void *ptr) { return device_ordinal; };
ContextMap<void *, TestContext> map(ordinal_finder);
auto *test_context = map.Add(context, device_ordinal);
EXPECT_EQ(test_context->context(), context);
EXPECT_EQ(test_context->device_ordinal(), device_ordinal);
EXPECT_TRUE(map.Has(context));
map.Remove(context);
EXPECT_FALSE(map.Has(context));
}
TEST(ContextMapTest, AddTwiceReturnsSameContext) {
void *context = reinterpret_cast<void *>(2);
constexpr int device_ordinal = 1;
auto ordinal_finder = [](void *ptr) { return device_ordinal; };
ContextMap<void *, TestContext> map(ordinal_finder);
auto *test_context1 = map.Add(context, device_ordinal);
auto *test_context2 = map.Add(context, device_ordinal);
EXPECT_EQ(test_context1, test_context2);
}
TEST(ContextMapTest, GetAnyContextReturnsCorrectContext) {
void *context1 = reinterpret_cast<void *>(2);
void *context2 = reinterpret_cast<void *>(3);
constexpr int device_ordinal1 = 1;
constexpr int device_ordinal2 = 2;
auto ordinal_finder = [](void *ptr) {
static int calls = 0;
++calls;
if (calls <= 1) {
return device_ordinal1;
} else {
return device_ordinal2;
}
};
ContextMap<void *, TestContext> map(ordinal_finder);
auto *test_context1 = map.Add(context1, device_ordinal1);
auto *test_context2 = map.Add(context2, device_ordinal2);
EXPECT_NE(test_context1, test_context2);
auto first_context = map.GetAnyContext(context1);
EXPECT_EQ(first_context, context1);
auto second_context = map.GetAnyContext(context2);
EXPECT_EQ(second_context, context2);
}
TEST(ContextMapTest, GetAnyContextShouldDieWithBadInput) {
void *context1 = reinterpret_cast<void *>(2);
void *context2 = reinterpret_cast<void *>(3);
constexpr int device_ordinal1 = 1;
constexpr int device_ordinal2 = 2;
auto ordinal_finder = [](void *ptr) {
static int calls = 0;
++calls;
if (calls <= 1) {
return device_ordinal1;
} else {
return device_ordinal2;
}
};
ContextMap<void *, TestContext> map(ordinal_finder);
auto *test_context1 = map.Add(context1, device_ordinal1);
auto *test_context2 = map.Add(context2, device_ordinal2);
EXPECT_NE(test_context1, test_context2);
auto first_context = map.GetAnyContext(context1);
EXPECT_EQ(first_context, context1);
auto second_context = map.GetAnyContext(context2);
EXPECT_EQ(second_context, context2);
map.Remove(context2);
EXPECT_DEATH(map.GetAnyContext(context2), "Check failed");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/gpu/context_map.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/gpu/context_map_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8971d4cb-bee3-478d-b5af-bb826df19571 | cpp | tensorflow/tensorflow | gpu_executor | third_party/xla/xla/stream_executor/gpu/gpu_executor.h | third_party/xla/xla/stream_executor/gpu/gpu_executor_test.cc | #ifndef XLA_STREAM_EXECUTOR_GPU_GPU_EXECUTOR_H_
#define XLA_STREAM_EXECUTOR_GPU_GPU_EXECUTOR_H_
#include <cstdint>
#include <memory>
#include <utility>
#include <variant>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/event_based_timer.h"
#include "xla/stream_executor/gpu/context.h"
#include "xla/stream_executor/host_memory_allocation.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/kernel_spec.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/stream_executor/stream_executor_common.h"
namespace stream_executor {
namespace gpu {
class GpuStream;
class GpuExecutor : public StreamExecutorCommon {
public:
GpuExecutor(Platform* platform, int device_ordinal)
: StreamExecutorCommon(platform),
context_(nullptr),
device_ordinal_(device_ordinal) {}
int device_ordinal() const override { return device_ordinal_; };
virtual void UnloadKernel(const Kernel* kernel) = 0;
virtual absl::StatusOr<std::unique_ptr<EventBasedTimer>>
CreateEventBasedTimer(GpuStream* stream, bool use_delay_kernel) = 0;
virtual absl::Status TrimGraphMemory() = 0;
Context* gpu_context() const { return context_; }
absl::StatusOr<std::vector<ApiTrace>> ExtractApiTrace() override {
absl::MutexLock lock(&logger_mu_);
return std::move(argument_logs_);
}
absl::Status RecordApiTrace(ApiTrace call) override {
absl::MutexLock lock(&logger_mu_);
if (std::holds_alternative<GemmCallTrace>(call) &&
(argument_logging_mode_ & kLogGemm)) {
argument_logs_.push_back(call);
}
return absl::OkStatus();
}
bool SetArgumentLoggingMode(uint64_t mode) override {
absl::MutexLock lock(&logger_mu_);
argument_logging_mode_ = mode;
return true;
}
uint64_t GetArgumentLoggingMode() const { return argument_logging_mode_; }
protected:
void set_context(Context* context) { context_ = context; }
private:
Context* context_;
int device_ordinal_;
absl::Mutex logger_mu_;
mutable std::vector<ApiTrace> argument_logs_ ABSL_GUARDED_BY(logger_mu_);
uint64_t argument_logging_mode_ = 0;
GpuExecutor(const GpuExecutor&) = delete;
void operator=(const GpuExecutor&) = delete;
};
inline GpuExecutor* ExtractGpuExecutor(StreamExecutor* stream_exec) {
return static_cast<GpuExecutor*>(stream_exec);
}
}
}
#endif | #include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/strings/ascii.h"
#include "xla/service/platform_util.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream_executor.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace stream_executor {
class GpuExecutorTest : public testing::Test {
public:
Platform* GetPlatform() {
auto name = absl::AsciiStrToLower(
xla::PlatformUtil::CanonicalPlatformName("gpu").value());
return PlatformManager::PlatformWithName(name).value();
}
};
using GetPointerMemorySpaceTest = GpuExecutorTest;
TEST_F(GetPointerMemorySpaceTest, Host) {
StreamExecutor* executor = GetPlatform()->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto host_ptr, executor->HostMemoryAllocate(64));
TF_ASSERT_OK_AND_ASSIGN(auto memory_space,
executor->GetPointerMemorySpace(host_ptr->opaque()))
EXPECT_EQ(memory_space, MemoryType::kHost);
}
TEST_F(GetPointerMemorySpaceTest, Device) {
StreamExecutor* executor = GetPlatform()->ExecutorForDevice(0).value();
auto mem = executor->Allocate(64);
ASSERT_NE(mem, nullptr);
TF_ASSERT_OK_AND_ASSIGN(auto memory_space,
executor->GetPointerMemorySpace(mem.opaque()))
EXPECT_EQ(memory_space, MemoryType::kDevice);
executor->Deallocate(&mem);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/gpu/gpu_executor.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/gpu/gpu_executor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e1e05b95-3168-49b8-bae6-0e35ebebf454 | cpp | tensorflow/tensorflow | ptx_compiler | third_party/xla/xla/stream_executor/cuda/ptx_compiler.h | third_party/xla/xla/stream_executor/cuda/ptx_compiler_test.cc | #ifndef XLA_STREAM_EXECUTOR_CUDA_PTX_COMPILER_H_
#define XLA_STREAM_EXECUTOR_CUDA_PTX_COMPILER_H_
#include <cstdint>
#include <vector>
#include "absl/status/statusor.h"
#include "xla/stream_executor/gpu/gpu_asm_opts.h"
#include "xla/stream_executor/semantic_version.h"
namespace stream_executor {
absl::StatusOr<std::vector<uint8_t>> CompileGpuAsmUsingLibNvPtxCompiler(
int cc_major, int cc_minor, const char* ptx_contents, GpuAsmOpts options,
bool cancel_if_reg_spill);
absl::StatusOr<SemanticVersion> GetLibNvPtxCompilerVersion();
}
#endif | #include "xla/stream_executor/cuda/ptx_compiler.h"
#include <sys/types.h>
#include <cstdint>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/stream_executor/cuda/ptx_compiler_support.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/gpu/gpu_asm_opts.h"
#include "xla/stream_executor/semantic_version.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
namespace {
constexpr const char kSpillingPtx[] = R"(
.version 8.0
.target sm_52
.address_size 64
.visible .entry _Z6kernelPi(
.param .u64 _Z6kernelPi_param_0
)
.maxnreg 16
{
.reg .b32 %r<33>;
.reg .b64 %rd<3>;
ld.param.u64 %rd1, [_Z6kernelPi_param_0];
cvta.to.global.u64 %rd2, %rd1;
ld.global.u32 %r1, [%rd2+4];
ld.global.u32 %r2, [%rd2+8];
ld.global.u32 %r3, [%rd2+12];
ld.global.u32 %r4, [%rd2+16];
ld.global.u32 %r5, [%rd2+20];
ld.global.u32 %r6, [%rd2+24];
ld.global.u32 %r7, [%rd2+28];
ld.global.u32 %r8, [%rd2+32];
ld.global.u32 %r9, [%rd2+36];
ld.global.u32 %r10, [%rd2+40];
ld.global.u32 %r11, [%rd2+44];
ld.global.u32 %r12, [%rd2+48];
ld.global.u32 %r13, [%rd2+52];
ld.global.u32 %r14, [%rd2+56];
ld.global.u32 %r15, [%rd2+60];
add.s32 %r16, %r15, 15;
st.global.u32 [%rd2+60], %r16;
add.s32 %r17, %r14, 15;
st.global.u32 [%rd2+56], %r17;
add.s32 %r18, %r13, 15;
st.global.u32 [%rd2+52], %r18;
add.s32 %r19, %r12, 15;
st.global.u32 [%rd2+48], %r19;
add.s32 %r20, %r11, 15;
st.global.u32 [%rd2+44], %r20;
add.s32 %r21, %r10, 15;
st.global.u32 [%rd2+40], %r21;
add.s32 %r22, %r9, 15;
st.global.u32 [%rd2+36], %r22;
add.s32 %r23, %r8, 15;
st.global.u32 [%rd2+32], %r23;
add.s32 %r24, %r7, 15;
st.global.u32 [%rd2+28], %r24;
add.s32 %r25, %r6, 15;
st.global.u32 [%rd2+24], %r25;
add.s32 %r26, %r5, 15;
st.global.u32 [%rd2+20], %r26;
add.s32 %r27, %r4, 15;
st.global.u32 [%rd2+16], %r27;
add.s32 %r28, %r3, 15;
st.global.u32 [%rd2+12], %r28;
add.s32 %r29, %r2, 15;
st.global.u32 [%rd2+8], %r29;
add.s32 %r30, %r1, 15;
st.global.u32 [%rd2+4], %r30;
ld.global.u32 %r31, [%rd2];
add.s32 %r32, %r31, 15;
st.global.u32 [%rd2], %r32;
ret;
}
)";
constexpr const char kSimplePtx[] = R"(
.version 8.0
.target sm_52
.address_size 64
.visible .entry _Z6kernelPi (
.param .u64 _Z6kernelPi_param_0
)
{
.reg .b32 %r<16>;
.reg .b64 %rd<3>;
ld.param.u64 %rd1, [_Z6kernelPi_param_0];
cvta.to.global.u64 %rd2, %rd1;
mov.u32 %r1, 42;
st.global.u32 [%rd2], %r15;
ret;
})";
constexpr stream_executor::CudaComputeCapability kDefaultComputeCapability{5,
2};
absl::StatusOr<std::vector<uint8_t>> CompileHelper(
stream_executor::CudaComputeCapability cc, const char* const ptx_input,
bool disable_gpuasm_optimizations = false, bool cancel_if_reg_spill = false,
std::vector<std::string> extra_flags = {}) {
stream_executor::GpuAsmOpts options(disable_gpuasm_optimizations,
"", extra_flags);
return stream_executor::CompileGpuAsmUsingLibNvPtxCompiler(
cc.major, cc.minor, ptx_input, options, cancel_if_reg_spill);
}
class PtxCompilerTest : public ::testing::Test {
void SetUp() override {
if (!stream_executor::IsLibNvPtxCompilerSupported()) {
GTEST_SKIP();
}
}
};
TEST_F(PtxCompilerTest, IdentifiesUnsupportedArchitecture) {
EXPECT_THAT(
CompileHelper(stream_executor::CudaComputeCapability{100, 0}, kSimplePtx),
tsl::testing::StatusIs(absl::StatusCode::kUnimplemented));
}
TEST_F(PtxCompilerTest, CanCompileSingleCompilationUnit) {
EXPECT_THAT(CompileHelper(kDefaultComputeCapability, kSimplePtx),
tsl::testing::IsOk());
}
TEST_F(PtxCompilerTest, CancelsOnRegSpill) {
EXPECT_THAT(CompileHelper(kDefaultComputeCapability, kSpillingPtx,
true,
true),
tsl::testing::StatusIs(absl::StatusCode::kCancelled));
EXPECT_THAT(CompileHelper(kDefaultComputeCapability, kSpillingPtx,
true,
false),
tsl::testing::IsOk());
}
TEST_F(PtxCompilerTest, AcceptsExtraArguments) {
auto reference_cubin = CompileHelper(kDefaultComputeCapability, kSimplePtx,
false,
false, {});
auto cubin_with_line_info =
CompileHelper(kDefaultComputeCapability, kSimplePtx,
false,
false, {"--generate-line-info"});
EXPECT_THAT(reference_cubin, tsl::testing::IsOk());
EXPECT_THAT(cubin_with_line_info, tsl::testing::IsOk());
EXPECT_GT(cubin_with_line_info->size(), reference_cubin->size());
EXPECT_THAT(
CompileHelper(kDefaultComputeCapability, kSimplePtx,
false,
false, {"--flag-does-not-exist"}),
tsl::testing::StatusIs(absl::StatusCode::kInternal));
}
TEST_F(PtxCompilerTest, ReturnsReasonableVersion) {
constexpr stream_executor::SemanticVersion kMinSupportedVersion = {12, 0, 0};
EXPECT_THAT(stream_executor::GetLibNvPtxCompilerVersion(),
tsl::testing::IsOkAndHolds(testing::Ge(kMinSupportedVersion)));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/ptx_compiler.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/ptx_compiler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
82253c7c-0a81-4089-ae97-3fa38916f40c | cpp | tensorflow/tensorflow | nvjitlink | third_party/xla/xla/stream_executor/cuda/nvjitlink.h | third_party/xla/xla/stream_executor/cuda/nvjitlink_test.cc | #ifndef XLA_STREAM_EXECUTOR_CUDA_NVJITLINK_H_
#define XLA_STREAM_EXECUTOR_CUDA_NVJITLINK_H_
#include <cstdint>
#include <tuple>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/stream_executor/gpu/gpu_asm_opts.h"
namespace stream_executor {
using NvJitLinkVersion = std::tuple<unsigned, unsigned>;
absl::StatusOr<NvJitLinkVersion> GetNvJitLinkVersion();
struct NvJitLinkInput {
enum class Type { kPtx, kCubin };
Type type;
absl::Span<const uint8_t> bytes;
};
absl::StatusOr<std::vector<uint8_t>> CompileAndLinkUsingLibNvJitLink(
int cc_major, int cc_minor, absl::Span<const NvJitLinkInput> inputs,
GpuAsmOpts options, bool cancel_if_reg_spill);
}
#endif | #include "xla/stream_executor/cuda/nvjitlink.h"
#include <sys/types.h>
#include <cstdint>
#include <cstring>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/str_replace.h"
#include "absl/types/span.h"
#include "xla/stream_executor/cuda/nvjitlink_support.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/gpu/gpu_asm_opts.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
namespace {
constexpr const char kDependeePtx[] = R"(
.version 8.0
.target sm_52
.address_size 64
.visible .func (.param .b32 func_retval0) _Z5magicv()
{
.reg .b32 %r<2>;
mov.u32 %r1, 42;
st.param.b32 [func_retval0+0], %r1;
ret;
})";
constexpr const char kDependentPtx[] = R"(
.version 8.0
.target sm_52
.address_size 64
.extern .func (.param .b32 func_retval0) _Z5magicv
()
;
.visible .entry _Z6kernelPi(
.param .u64 _Z6kernelPi_param_0
)
{
.reg .b32 %r<2>;
.reg .b64 %rd<3>;
ld.param.u64 %rd1, [_Z6kernelPi_param_0];
cvta.to.global.u64 %rd2, %rd1;
{
.reg .b32 temp_param_reg;
.param .b32 retval0;
call.uni (retval0),
_Z5magicv,
(
);
ld.param.b32 %r1, [retval0+0];
}
st.global.u32 [%rd2], %r1;
ret;
})";
constexpr const char kStandalonePtx[] = R"(
.version 8.0
.target sm_52
.address_size 64
.visible .entry _Z6kernelPi (
.param .u64 _Z6kernelPi_param_0
)
{
.reg .b32 %r<16>;
.reg .b64 %rd<3>;
ld.param.u64 %rd1, [_Z6kernelPi_param_0];
cvta.to.global.u64 %rd2, %rd1;
mov.u32 %r1, 42;
st.global.u32 [%rd2], %r15;
ret;
})";
constexpr stream_executor::CudaComputeCapability kDefaultComputeCapability{5,
2};
auto CompileAndLinkHelper(stream_executor::CudaComputeCapability cc,
absl::Span<const char* const> ptx_inputs,
bool disable_gpuasm_optimizations = false,
bool cancel_if_reg_spill = false) {
std::vector<stream_executor::NvJitLinkInput> inputs;
inputs.reserve(ptx_inputs.size());
for (const char* ptx_input : ptx_inputs) {
inputs.emplace_back(stream_executor::NvJitLinkInput{
stream_executor::NvJitLinkInput::Type::kPtx,
absl::Span<const uint8_t>{reinterpret_cast<const uint8_t*>(ptx_input),
std::strlen(ptx_input) + 1}});
}
stream_executor::GpuAsmOpts options{};
options.disable_gpuasm_optimizations = disable_gpuasm_optimizations;
return stream_executor::CompileAndLinkUsingLibNvJitLink(
cc.major, cc.minor, inputs, options, cancel_if_reg_spill);
}
class NvJitLinkTest : public ::testing::Test {
void SetUp() override {
if (!stream_executor::IsLibNvJitLinkSupported()) {
GTEST_SKIP();
}
}
};
TEST_F(NvJitLinkTest, GetVersion) {
EXPECT_THAT(stream_executor::GetNvJitLinkVersion(),
tsl::testing::IsOkAndHolds(
testing::Ge(stream_executor::NvJitLinkVersion{12, 0})));
}
TEST_F(NvJitLinkTest, IdentifiesUnsupportedArchitecture) {
EXPECT_THAT(
CompileAndLinkHelper(stream_executor::CudaComputeCapability{100, 0},
{kStandalonePtx}),
tsl::testing::StatusIs(absl::StatusCode::kUnimplemented));
}
TEST_F(NvJitLinkTest, LinkingTwoCompilationUnitsSucceeds) {
EXPECT_THAT(CompileAndLinkHelper(kDefaultComputeCapability,
{kDependentPtx, kDependeePtx}),
tsl::testing::IsOk());
}
TEST_F(NvJitLinkTest, LinkingFailsWhenDependeeIsMissing) {
EXPECT_THAT(CompileAndLinkHelper(kDefaultComputeCapability, {kDependentPtx}),
tsl::testing::StatusIs(absl::StatusCode::kUnknown));
}
TEST_F(NvJitLinkTest, CanAlsoJustCompileSingleCompilationUnit) {
EXPECT_THAT(CompileAndLinkHelper(kDefaultComputeCapability, {kStandalonePtx}),
tsl::testing::IsOk());
}
TEST_F(NvJitLinkTest, CancelsOnRegSpill) {
std::string dependent_ptx = absl::StrReplaceAll(
kDependentPtx, {{"
EXPECT_THAT(CompileAndLinkHelper(kDefaultComputeCapability,
{dependent_ptx.c_str(), kDependeePtx},
true,
true),
tsl::testing::StatusIs(absl::StatusCode::kCancelled));
EXPECT_THAT(CompileAndLinkHelper(kDefaultComputeCapability,
{dependent_ptx.c_str(), kDependeePtx},
true,
false),
tsl::testing::IsOk());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/nvjitlink.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/nvjitlink_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
167d38bf-d218-48de-bf28-3f9bb553562d | cpp | tensorflow/tensorflow | stringpiece | tensorflow/core/lib/core/stringpiece.h | third_party/xla/third_party/tsl/tsl/platform/stringpiece_test.cc | #ifndef TENSORFLOW_CORE_LIB_CORE_STRINGPIECE_H_
#define TENSORFLOW_CORE_LIB_CORE_STRINGPIECE_H_
#include "tensorflow/core/platform/stringpiece.h"
#endif | #include "tsl/platform/stringpiece.h"
#include <unordered_map>
#include "tsl/platform/test.h"
namespace tsl {
TEST(StringPiece, Ctor) {
{
const char* hello = "hello";
absl::string_view s20(hello);
EXPECT_TRUE(s20.data() == hello);
EXPECT_EQ(5, s20.size());
absl::string_view s21(hello, 4);
EXPECT_TRUE(s21.data() == hello);
EXPECT_EQ(4, s21.size());
absl::string_view s22(hello, 6);
EXPECT_TRUE(s22.data() == hello);
EXPECT_EQ(6, s22.size());
}
{
string hola = "hola";
absl::string_view s30(hola);
EXPECT_TRUE(s30.data() == hola.data());
EXPECT_EQ(4, s30.size());
hola.push_back('\0');
hola.append("h2");
hola.push_back('\0');
absl::string_view s31(hola);
EXPECT_TRUE(s31.data() == hola.data());
EXPECT_EQ(8, s31.size());
}
}
TEST(StringPiece, ConversionToString) {
EXPECT_EQ("", string(absl::string_view("")));
EXPECT_EQ("foo", string(absl::string_view("foo")));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/core/stringpiece.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/stringpiece_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c004a04a-45b9-41d5-9ca1-80bf5198bb3d | cpp | tensorflow/tensorflow | intrusive_ptr | tensorflow/core/platform/intrusive_ptr.h | third_party/xla/third_party/tsl/tsl/platform/intrusive_ptr_test.cc | #ifndef TENSORFLOW_CORE_PLATFORM_INTRUSIVE_PTR_H_
#define TENSORFLOW_CORE_PLATFORM_INTRUSIVE_PTR_H_
#include <algorithm>
#include "tsl/platform/intrusive_ptr.h"
namespace tensorflow {
namespace core {
template <class T>
using IntrusivePtr = tsl::core::IntrusivePtr<T>;
}
}
#endif | #include "tsl/platform/intrusive_ptr.h"
#include "tsl/platform/refcount.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace core {
namespace {
TEST(IntrusivePtr, ConstructorAddRefFalse) {
auto ptr = IntrusivePtr<RefCounted>(new RefCounted(), false);
ASSERT_TRUE(ptr->RefCountIsOne());
}
TEST(IntrusivePtr, ConstructorAddRefTrue) {
auto raw = new RefCounted();
auto ptr = IntrusivePtr<RefCounted>(raw, true);
ASSERT_FALSE(raw->RefCountIsOne());
raw->Unref();
ASSERT_TRUE(raw->RefCountIsOne());
}
TEST(IntrusivePtr, CopyConstructor) {
auto ptr1 = IntrusivePtr<RefCounted>(new RefCounted(), false);
auto ptr2 = IntrusivePtr<RefCounted>(ptr1);
ASSERT_FALSE(ptr2->RefCountIsOne());
}
TEST(IntrusivePtr, CopyAssignment) {
auto ptr1 = IntrusivePtr<RefCounted>(new RefCounted(), false);
auto raw = new RefCounted();
auto ptr2 = IntrusivePtr<RefCounted>(raw, true);
ptr2 = ptr1;
ASSERT_EQ(ptr1.get(), ptr2.get());
ASSERT_FALSE(ptr2->RefCountIsOne());
ASSERT_TRUE(raw->RefCountIsOne());
raw->Unref();
}
TEST(IntrusivePtr, CopyAssignmentIntoEmpty) {
auto ptr1 = IntrusivePtr<RefCounted>(new RefCounted(), false);
auto ptr2 = IntrusivePtr<RefCounted>();
ptr2 = ptr1;
ASSERT_FALSE(ptr2->RefCountIsOne());
}
TEST(IntrusivePtr, MoveConstructor) {
auto ptr1 = IntrusivePtr<RefCounted>(new RefCounted(), false);
auto ptr2 = IntrusivePtr<RefCounted>(std::move(ptr1));
ASSERT_TRUE(ptr2->RefCountIsOne());
ASSERT_EQ(ptr1.get(), nullptr);
}
TEST(IntrusivePtr, MoveAssignment) {
auto ptr1 = IntrusivePtr<RefCounted>(new RefCounted(), false);
auto ptr2 = IntrusivePtr<RefCounted>(new RefCounted(), false);
ptr2 = std::move(ptr1);
ASSERT_TRUE(ptr2->RefCountIsOne());
ASSERT_EQ(ptr1.get(), nullptr);
}
TEST(IntrusivePtr, MoveAssignmentIntoEmpty) {
auto ptr1 = IntrusivePtr<RefCounted>(new RefCounted(), false);
auto ptr2 = IntrusivePtr<RefCounted>();
ptr2 = std::move(ptr1);
ASSERT_TRUE(ptr2->RefCountIsOne());
ASSERT_EQ(ptr1.get(), nullptr);
}
TEST(IntrusivePtr, MoveAssignmentAlias) {
auto ptr = IntrusivePtr<RefCounted>(new RefCounted(), false);
auto& ptr_alias = ptr;
ptr = std::move(ptr_alias);
ASSERT_TRUE(ptr->RefCountIsOne());
}
TEST(IntrusivePtr, Reset) {
auto ptr = IntrusivePtr<RefCounted>(new RefCounted(), false);
ptr.reset(new RefCounted(), false);
ASSERT_TRUE(ptr->RefCountIsOne());
}
TEST(IntrusivePtr, ResetIntoEmpty) {
auto ptr = IntrusivePtr<RefCounted>();
ptr.reset(new RefCounted(), false);
ASSERT_TRUE(ptr->RefCountIsOne());
}
TEST(IntrusivePtr, ResetAlias) {
auto ptr = IntrusivePtr<RefCounted>(new RefCounted(), false);
ASSERT_TRUE(ptr->RefCountIsOne());
ptr.reset(ptr.get(), false);
ASSERT_TRUE(ptr->RefCountIsOne());
}
TEST(IntrusivePtr, ResetRefBeforeUnref) {
class Foo : public RefCounted {
public:
explicit Foo(char label, Foo* ptr = nullptr)
: label_(label), ptr_(ptr, false) {}
char label_;
IntrusivePtr<Foo> ptr_;
};
IntrusivePtr<Foo> x(new Foo{'a', new Foo{'b', new Foo{'c'}}}, false);
x->ptr_ = x->ptr_->ptr_;
}
TEST(IntrusivePtr, ResetStealPtrBeforeUnref) {
class Foo : public RefCounted {
public:
explicit Foo(char label, Foo* ptr = nullptr)
: label_(label), ptr_(ptr, false) {}
char label_;
IntrusivePtr<Foo> ptr_;
};
IntrusivePtr<Foo> x(new Foo{'a', new Foo{'b', new Foo{'c'}}}, false);
x->ptr_ = std::move(x->ptr_->ptr_);
}
TEST(IntrusivePtr, Detach) {
auto ptr = IntrusivePtr<RefCounted>(new RefCounted(), false);
ASSERT_TRUE(ptr->RefCountIsOne());
auto raw = ptr.detach();
ASSERT_TRUE(raw->RefCountIsOne());
raw->Unref();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/intrusive_ptr.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/intrusive_ptr_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
35fb4466-a507-4ae8-a10f-462ec10def06 | cpp | tensorflow/tensorflow | notification | tensorflow/core/lib/core/notification.h | tensorflow/core/lib/core/notification_test.cc | #ifndef TENSORFLOW_CORE_LIB_CORE_NOTIFICATION_H_
#define TENSORFLOW_CORE_LIB_CORE_NOTIFICATION_H_
#include "tensorflow/core/platform/notification.h"
#endif | #include "tensorflow/core/platform/test.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
TEST(NotificationTest, TestSingleNotification) {
thread::ThreadPool* thread_pool =
new thread::ThreadPool(Env::Default(), "test", 1);
int counter = 0;
Notification start;
Notification proceed;
thread_pool->Schedule([&start, &proceed, &counter] {
start.Notify();
proceed.WaitForNotification();
++counter;
});
start.WaitForNotification();
EXPECT_EQ(0, counter);
proceed.Notify();
delete thread_pool;
EXPECT_EQ(1, counter);
}
TEST(NotificationTest, TestMultipleThreadsWaitingOnNotification) {
const int num_closures = 4;
thread::ThreadPool* thread_pool =
new thread::ThreadPool(Env::Default(), "test", num_closures);
mutex lock;
int counter = 0;
Notification n;
for (int i = 0; i < num_closures; ++i) {
thread_pool->Schedule([&n, &lock, &counter] {
n.WaitForNotification();
mutex_lock l(lock);
++counter;
});
}
Env::Default()->SleepForMicroseconds(1 * 1000 * 1000);
EXPECT_EQ(0, counter);
n.Notify();
delete thread_pool;
EXPECT_EQ(4, counter);
}
TEST(NotificationTest, TestWaitWithTimeoutOnNotifiedNotification) {
Notification n;
n.Notify();
EXPECT_TRUE(WaitForNotificationWithTimeout(&n, 1000 * 1000));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/core/notification.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/core/notification_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8f2bbdb6-1130-4e57-b10d-d60bc7696f0b | cpp | tensorflow/tensorflow | tstring | tensorflow/core/platform/tstring.h | third_party/xla/third_party/tsl/tsl/platform/tstring_test.cc | #ifndef TENSORFLOW_CORE_PLATFORM_TSTRING_H_
#define TENSORFLOW_CORE_PLATFORM_TSTRING_H_
#include "tensorflow/core/platform/cord.h"
#include "tensorflow/core/platform/ctstring.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tsl/platform/tstring.h"
namespace tensorflow {
using tstring = tsl::tstring;
}
#endif | #include "tsl/platform/tstring.h"
#include <memory>
#include <string>
#include "tsl/platform/cord.h"
#include "tsl/platform/platform.h"
#include "tsl/platform/stringpiece.h"
#include "tsl/platform/test.h"
using ::tsl::tstring;
static const char kLongString[] =
"abcdefghij"
"klmnopqrst"
"uvwxyz0123"
"456789ABCD"
"EFGHIKLMNO";
const size_t kLongStringLen = sizeof(kLongString) / sizeof(char) - sizeof(char);
TEST(TF_TStringTest, Construction) {
tstring s10;
tstring s11("a\0a", 3);
tstring s12(kLongString);
tstring s13(3, 'b');
tstring s14(absl::string_view("hi"));
tstring s15(std::string("bye"));
EXPECT_EQ("", s10);
EXPECT_TRUE(s10.empty());
EXPECT_EQ(tstring::Type::SMALL, s10.type());
EXPECT_EQ(0, s10.size());
EXPECT_EQ(0, s10.length());
EXPECT_EQ(TF_TString_SmallCapacity, s10.capacity());
EXPECT_EQ(std::string("a\0a", 3), s11);
EXPECT_FALSE(s11.empty());
EXPECT_EQ(3, s11.size());
EXPECT_EQ(3, s11.length());
EXPECT_EQ(kLongString, s12);
EXPECT_EQ(kLongStringLen, s12.size());
EXPECT_EQ(tstring::Type::LARGE, s12.type());
EXPECT_LT(TF_TString_SmallCapacity, s12.capacity());
EXPECT_EQ("bbb", s13);
EXPECT_EQ("hi", s14);
EXPECT_EQ(tstring::Type::SMALL, s14.type());
EXPECT_EQ("bye", s15);
}
TEST(TF_TStringTest, CopyMove) {
tstring s20(kLongString);
tstring s21(s20);
tstring s22;
EXPECT_EQ(s20, s21);
s22 = std::move(s21);
EXPECT_EQ(s20, s22);
EXPECT_EQ("", s21);
EXPECT_EQ(tstring::Type::SMALL, s21.type());
}
TEST(TF_TStringTest, Assignment) {
tstring s30("123456789012345678901234567890");
tstring s31;
tstring s32;
s31 = s30;
EXPECT_EQ(s30, s31);
EXPECT_EQ(tstring::Type::LARGE, s31.type());
EXPECT_EQ(s30.size(), s31.size());
s32 = std::move(s30);
EXPECT_EQ(s31, s32);
EXPECT_EQ("", s30);
EXPECT_EQ(tstring::Type::SMALL, s30.type());
EXPECT_EQ(tstring::Type::LARGE, s32.type());
s32 = tstring::view(kLongString);
EXPECT_EQ(kLongString, s32);
EXPECT_EQ(tstring::Type::VIEW, s32.type());
EXPECT_EQ(kLongStringLen, s32.size());
EXPECT_EQ(0, s32.capacity());
tstring s33(std::move(s32));
EXPECT_EQ(kLongString, s33);
EXPECT_EQ(tstring::Type::VIEW, s33.type());
EXPECT_EQ(kLongStringLen, s33.size());
s32 = std::string(kLongString);
EXPECT_EQ(kLongString, s32);
EXPECT_EQ(tstring::Type::LARGE, s32.type());
EXPECT_EQ(kLongStringLen, s32.size());
s32 = "hello";
EXPECT_EQ("hello", s32);
EXPECT_EQ(tstring::Type::SMALL, s32.type());
EXPECT_EQ(5, s32.size());
s33 = 'a';
EXPECT_EQ("a", s33);
EXPECT_EQ(tstring::Type::SMALL, s33.type());
EXPECT_EQ(1, s33.size());
s32 = absl::string_view(kLongString);
EXPECT_EQ(kLongString, s32);
EXPECT_EQ(tstring::Type::LARGE, s32.type());
EXPECT_EQ(kLongStringLen, s32.size());
s32.resize(TF_TString_SmallCapacity * 2);
EXPECT_EQ(absl::string_view(kLongString, TF_TString_SmallCapacity * 2), s32);
EXPECT_EQ(tstring::Type::LARGE, s32.type());
EXPECT_EQ(TF_TString_SmallCapacity * 2, s32.size());
s32 = tstring::view(kLongString, kLongStringLen);
EXPECT_EQ(kLongString, s32);
EXPECT_EQ(tstring::Type::VIEW, s32.type());
EXPECT_EQ(kLongStringLen, s32.size());
s32.assign("hello1");
EXPECT_EQ("hello1", s32);
s32.assign("hello2", 5);
EXPECT_EQ("hello", s32);
s30.assign_as_view(kLongString);
EXPECT_EQ(tstring::Type::VIEW, s30.type());
s31.assign_as_view(s30);
EXPECT_EQ(tstring::Type::VIEW, s31.type());
EXPECT_EQ(kLongString, s30.c_str());
EXPECT_EQ(kLongString, s31.c_str());
std::string tmp(kLongString);
s32.assign_as_view(tmp);
EXPECT_EQ(tstring::Type::VIEW, s32.type());
EXPECT_STREQ(kLongString, s32.c_str());
s33.assign_as_view(kLongString, 2);
EXPECT_EQ(2, s33.size());
s32.assign_as_view(absl::string_view(kLongString));
EXPECT_EQ(tstring::Type::VIEW, s32.type());
EXPECT_EQ(kLongString, s32.c_str());
#ifdef PLATFORM_GOOGLE
s33 = absl::Cord(kLongString);
EXPECT_EQ(kLongString, s33);
EXPECT_EQ(tstring::Type::LARGE, s33.type());
EXPECT_EQ(kLongStringLen, s33.size());
tstring s34((absl::Cord(kLongString)));
EXPECT_EQ(kLongString, s34);
EXPECT_EQ(tstring::Type::LARGE, s34.type());
EXPECT_EQ(kLongStringLen, s34.size());
#endif
}
TEST(TF_TStringTest, Comparison) {
tstring empty("");
tstring a("a");
tstring aa("aa");
tstring a_("a");
tstring b("b");
const char c[] = "c";
tstring nulla("\0a", 2);
tstring nullb("\0b", 2);
tstring nullaa("\0aa", 3);
EXPECT_TRUE(a < b);
EXPECT_TRUE(a != b);
EXPECT_FALSE(a > b);
EXPECT_FALSE(a == b);
EXPECT_TRUE(a < aa);
EXPECT_TRUE(a != aa);
EXPECT_FALSE(a > aa);
EXPECT_FALSE(a == aa);
EXPECT_TRUE(b > a);
EXPECT_TRUE(b != a);
EXPECT_FALSE(b < a);
EXPECT_FALSE(b == a);
EXPECT_FALSE(a == b);
EXPECT_FALSE(b == c);
EXPECT_TRUE(b != c);
EXPECT_TRUE(empty < a);
EXPECT_TRUE(empty != a);
EXPECT_FALSE(empty > a);
EXPECT_FALSE(empty == a);
EXPECT_TRUE(a > empty);
EXPECT_TRUE(a != empty);
EXPECT_FALSE(a < empty);
EXPECT_FALSE(a == empty);
EXPECT_FALSE(a < a_);
EXPECT_FALSE(a != a_);
EXPECT_FALSE(a > a_);
EXPECT_TRUE(a == a_);
EXPECT_TRUE(nulla < nullaa);
EXPECT_TRUE(nulla != nullaa);
EXPECT_FALSE(nulla > nullaa);
EXPECT_FALSE(nulla == nullaa);
EXPECT_TRUE(nulla < nullb);
EXPECT_TRUE(nullaa > nulla);
EXPECT_TRUE(nullaa != nulla);
EXPECT_FALSE(nullaa < nulla);
EXPECT_FALSE(nullaa == nulla);
}
TEST(TF_TStringTest, Conversion) {
tstring s50(kLongString);
std::string s51(s50);
absl::string_view s52(s50);
EXPECT_EQ(kLongString, s51);
EXPECT_EQ(kLongStringLen, s51.size());
EXPECT_EQ(kLongString, s52);
EXPECT_EQ(kLongStringLen, s52.size());
#ifdef PLATFORM_GOOGLE
absl::AlphaNum s53(s50);
EXPECT_STREQ(kLongString, s53.data());
EXPECT_EQ(kLongStringLen, s53.size());
#endif
}
TEST(TF_TStringTest, Allocation) {
tstring s60;
s60.resize(2);
EXPECT_EQ(std::string("\0\0", 2), s60);
EXPECT_EQ(2, s60.size());
EXPECT_EQ(2, s60.length());
s60.resize(6, 'a');
EXPECT_EQ(std::string("\0\0aaaa", 6), s60);
EXPECT_EQ(6, s60.size());
EXPECT_EQ(6, s60.length());
s60.resize(3, 'b');
EXPECT_EQ(std::string("\0\0a", 3), s60);
EXPECT_EQ(3, s60.size());
EXPECT_EQ(3, s60.length());
s60.clear();
EXPECT_EQ("", s60);
EXPECT_TRUE(s60.empty());
EXPECT_EQ(0, s60.size());
EXPECT_EQ(0, s60.length());
s60.reserve(100);
EXPECT_EQ(111, s60.capacity());
s60.reserve(100);
}
TEST(TF_TStringTest, ElementAccess) {
tstring s70(kLongString);
EXPECT_STREQ(kLongString, s70.data());
EXPECT_EQ(s70.data(), s70.c_str());
for (size_t i = 0; i < s70.size(); i++) {
EXPECT_EQ(kLongString[i], s70.data()[i]);
}
tstring::const_iterator i = s70.begin();
const char* j = kLongString;
for (; *j != '\0'; i++, j++) {
EXPECT_EQ(*j, *i);
}
EXPECT_EQ('\0', *s70.end());
EXPECT_EQ(*i, *s70.end());
EXPECT_EQ(*(i - 1), s70.back());
}
TEST(TF_TStringTest, Modifiers) {
tstring s80("ba");
tstring s81;
tstring s82(kLongString);
s81.append(s80);
EXPECT_EQ("ba", s81);
s81.append(s80);
EXPECT_EQ("baba", s81);
s81.append("\0c", 2);
EXPECT_EQ(std::string("baba\0c", 6), s81);
s81.append("dd");
EXPECT_EQ(std::string("baba\0cdd", 8), s81);
s81.append(3, 'z');
EXPECT_EQ(tstring("baba\0cddzzz", 11), s81);
s81.append(0, 'z');
s81.append("dd", 0);
s81.append("");
s81.append(tstring());
EXPECT_EQ(std::string("baba\0cddzzz", 11), s81);
s81.erase(0, 1);
EXPECT_EQ(std::string("aba\0cddzzz", 10), s81);
s81.erase(4, 6);
EXPECT_EQ(std::string("aba\0", 4), s81);
s81.insert(1, tstring("\0moo\0", 5), 1, 4);
EXPECT_EQ(std::string("amoo\0ba\0", 8), s81);
s81.insert(0, 2, '\0');
s81.insert(s81.size() - 1, 1, 'q');
EXPECT_EQ(std::string("\0\0amoo\0baq\0", 11), s81);
s81.erase(0, s81.size());
EXPECT_EQ(tstring(), s81);
s80.swap(s82);
EXPECT_EQ(kLongString, s80);
EXPECT_EQ("ba", s82);
s82.push_back('\0');
s82.push_back('q');
EXPECT_EQ(std::string("ba\0q", 4), s82);
}
TEST(TF_TStringTest, Friends) {
tstring s90("b");
tstring s91("\0a\0", 3);
tstring s92;
EXPECT_EQ("b", s90 + s92);
EXPECT_EQ("b", s92 + s90);
EXPECT_EQ(std::string("\0a\0", 3), s92 + s91);
EXPECT_EQ(std::string("\0a\0", 3), s91 + s92);
EXPECT_EQ(std::string("b\0a\0", 4), s90 + s91);
EXPECT_EQ(std::string("\0a\0b", 4), s91 + s90);
std::stringstream ss;
ss << s91;
EXPECT_EQ(std::string("\0a\0", 3), ss.str());
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/tstring.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/tstring_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
baaeddd6-ac64-4c73-9a7a-ec433d59b970 | cpp | tensorflow/tensorflow | ctstring | tensorflow/core/platform/ctstring.h | third_party/xla/third_party/tsl/tsl/platform/ctstring_test.cc | #ifndef TENSORFLOW_CORE_PLATFORM_CTSTRING_H_
#define TENSORFLOW_CORE_PLATFORM_CTSTRING_H_
#include "tsl/platform/ctstring.h"
#endif | #include "tsl/platform/ctstring.h"
#include <memory>
#include <string>
#include "tsl/platform/ctstring_internal.h"
#include "tsl/platform/test.h"
static const char kLongString[] =
"abcdefghij"
"klmnopqrst"
"uvwxyz0123"
"456789ABCD"
"EFGHIKLMNO";
const size_t kLongStringLen = sizeof(kLongString) / sizeof(char) - sizeof(char);
TEST(TF_CTStringTest, InitAssignMoveDealloc) {
EXPECT_GT(::strlen(kLongString), TF_TString_SmallCapacity);
{
TF_TString s10, s11, s12;
TF_TString_Init(&s10);
TF_TString_Init(&s11);
TF_TString_Init(&s12);
EXPECT_EQ(0, TF_TString_GetSize(&s10));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s10));
EXPECT_STREQ("", TF_TString_GetDataPointer(&s10));
EXPECT_STREQ("", TF_TString_GetMutableDataPointer(&s10));
TF_TString_Assign(&s11, &s10);
EXPECT_EQ(0, TF_TString_GetSize(&s11));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s10));
EXPECT_STREQ("", TF_TString_GetDataPointer(&s11));
EXPECT_STREQ("", TF_TString_GetMutableDataPointer(&s11));
TF_TString_Move(&s12, &s11);
EXPECT_EQ(0, TF_TString_GetSize(&s11));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s10));
EXPECT_STREQ("", TF_TString_GetDataPointer(&s11));
EXPECT_STREQ("", TF_TString_GetMutableDataPointer(&s11));
EXPECT_EQ(0, TF_TString_GetSize(&s12));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s10));
EXPECT_STREQ("", TF_TString_GetDataPointer(&s12));
EXPECT_STREQ("", TF_TString_GetMutableDataPointer(&s12));
TF_TString_Dealloc(&s10);
TF_TString_Dealloc(&s11);
TF_TString_Dealloc(&s12);
}
{
TF_TString s20, s21, s22;
TF_TString_Init(&s20);
TF_TString_Init(&s21);
TF_TString_Init(&s22);
TF_TString_Copy(&s20, "a", 1);
EXPECT_EQ(1, TF_TString_GetSize(&s20));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s20));
EXPECT_STREQ("a", TF_TString_GetDataPointer(&s20));
EXPECT_STREQ("a", TF_TString_GetMutableDataPointer(&s20));
EXPECT_EQ(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s20));
TF_TString_Assign(&s21, &s20);
EXPECT_EQ(1, TF_TString_GetSize(&s21));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s21));
EXPECT_STREQ("a", TF_TString_GetDataPointer(&s21));
EXPECT_STREQ("a", TF_TString_GetMutableDataPointer(&s21));
EXPECT_EQ(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s21));
TF_TString_Move(&s22, &s21);
EXPECT_EQ(1, TF_TString_GetSize(&s22));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s22));
EXPECT_STREQ("a", TF_TString_GetDataPointer(&s22));
EXPECT_STREQ("a", TF_TString_GetMutableDataPointer(&s22));
EXPECT_EQ(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s22));
TF_TString_Dealloc(&s20);
TF_TString_Dealloc(&s21);
TF_TString_Dealloc(&s22);
}
{
TF_TString s30, s31;
TF_TString_Init(&s30);
TF_TString_Init(&s31);
size_t s = TF_TString_SmallCapacity - 1;
EXPECT_EQ(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s30));
TF_TString_Copy(&s30, kLongString, s);
EXPECT_STREQ(std::string(kLongString, s).data(),
TF_TString_GetDataPointer(&s30));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s30));
EXPECT_GT(TF_TString_SmallCapacity, TF_TString_GetSize(&s30));
EXPECT_EQ(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s30));
TF_TString_AppendN(&s30, &kLongString[s++], 1);
EXPECT_STREQ(std::string(kLongString, s).data(),
TF_TString_GetDataPointer(&s30));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s30));
EXPECT_EQ(TF_TString_SmallCapacity, TF_TString_GetSize(&s30));
EXPECT_EQ(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s30));
TF_TString_AppendN(&s30, &kLongString[s++], 1);
EXPECT_STREQ(std::string(kLongString, s).data(),
TF_TString_GetDataPointer(&s30));
EXPECT_STREQ(std::string(kLongString, s).data(),
TF_TString_GetMutableDataPointer(&s30));
EXPECT_EQ(TF_TSTR_LARGE, TF_TString_GetType(&s30));
EXPECT_EQ(s, TF_TString_GetSize(&s30));
EXPECT_LT(TF_TString_SmallCapacity, TF_TString_GetSize(&s30));
EXPECT_LT(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s30));
TF_TString_Move(&s31, &s30);
EXPECT_STREQ("", TF_TString_GetDataPointer(&s30));
EXPECT_STREQ("", TF_TString_GetMutableDataPointer(&s30));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s30));
EXPECT_EQ(0, TF_TString_GetSize(&s30));
EXPECT_STREQ(std::string(kLongString, s).data(),
TF_TString_GetDataPointer(&s31));
EXPECT_STREQ(std::string(kLongString, s).data(),
TF_TString_GetMutableDataPointer(&s31));
EXPECT_EQ(TF_TSTR_LARGE, TF_TString_GetType(&s31));
EXPECT_EQ(s, TF_TString_GetSize(&s31));
EXPECT_LT(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s31));
TF_TString_Dealloc(&s30);
TF_TString_Dealloc(&s31);
}
{
const char kStr[] = "abcdef";
const char kStrLen = sizeof(kStr) / sizeof(char) - sizeof(char);
TF_TString s40, s41;
TF_TString_Init(&s40);
TF_TString_Init(&s41);
TF_TString_Copy(&s40, kLongString, kLongStringLen);
EXPECT_EQ(kLongStringLen, TF_TString_GetSize(&s40));
TF_TString_Assign(&s41, &s40);
EXPECT_STREQ(kLongString, TF_TString_GetDataPointer(&s40));
EXPECT_STREQ(kLongString, TF_TString_GetMutableDataPointer(&s40));
EXPECT_EQ(kLongStringLen, TF_TString_GetSize(&s41));
TF_TString_AppendN(&s40, kLongString, kLongStringLen);
TF_TString_Append(&s40, &s41);
std::string longerString(kLongString);
longerString += kLongString;
longerString += kLongString;
EXPECT_STREQ(longerString.data(), TF_TString_GetDataPointer(&s40));
EXPECT_STREQ(longerString.data(), TF_TString_GetMutableDataPointer(&s40));
EXPECT_EQ(longerString.size(), TF_TString_GetSize(&s40));
TF_TString_AssignView(&s40, kStr, kStrLen);
EXPECT_EQ(TF_TSTR_VIEW, TF_TString_GetType(&s40));
EXPECT_EQ(kStr, TF_TString_GetDataPointer(&s40));
EXPECT_EQ(6, TF_TString_GetSize(&s40));
EXPECT_EQ(0, TF_TString_GetCapacity(&s40));
EXPECT_NE(kStr, TF_TString_GetMutableDataPointer(&s40));
EXPECT_STREQ(kStr, TF_TString_GetMutableDataPointer(&s40));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s40));
EXPECT_EQ(6, TF_TString_GetSize(&s40));
EXPECT_EQ(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s40));
TF_TString_Dealloc(&s40);
TF_TString_Dealloc(&s41);
}
{
TF_TString s50;
TF_TString_Init(&s50);
TF_TString_Copy(&s50, "a", 1);
EXPECT_STREQ("a", TF_TString_GetDataPointer(&s50));
EXPECT_STREQ("a", TF_TString_GetMutableDataPointer(&s50));
EXPECT_EQ(1, TF_TString_GetSize(&s50));
TF_TString_Copy(&s50, kLongString, kLongStringLen);
EXPECT_STREQ(kLongString, TF_TString_GetDataPointer(&s50));
EXPECT_STREQ(kLongString, TF_TString_GetMutableDataPointer(&s50));
EXPECT_EQ(kLongStringLen, TF_TString_GetSize(&s50));
size_t cap1 = TF_TString_GetCapacity(&s50);
TF_TString_Copy(&s50, kLongString, TF_TString_SmallCapacity + 1);
size_t cap2 = TF_TString_GetCapacity(&s50);
EXPECT_STREQ(std::string(kLongString, TF_TString_SmallCapacity + 1).data(),
TF_TString_GetMutableDataPointer(&s50));
EXPECT_EQ(TF_TSTR_LARGE, TF_TString_GetType(&s50));
EXPECT_GT(cap1, cap2);
TF_TString_Copy(&s50, "c", 1);
EXPECT_STREQ("c", TF_TString_GetDataPointer(&s50));
EXPECT_STREQ("c", TF_TString_GetMutableDataPointer(&s50));
EXPECT_EQ(1, TF_TString_GetSize(&s50));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s50));
TF_TString_Dealloc(&s50);
}
}
TEST(TF_CTStringTest, ResizeReserve) {
{
TF_TString s60;
TF_TString_Init(&s60);
TF_TString_Resize(&s60, 2, 'a');
EXPECT_EQ(0, ::memcmp("aa", TF_TString_GetDataPointer(&s60), 2));
TF_TString_Resize(&s60, 4, '\0');
EXPECT_EQ(0, ::memcmp("aa\0\0", TF_TString_GetDataPointer(&s60), 4));
TF_TString_Resize(&s60, 6, 'b');
EXPECT_EQ(0, ::memcmp("aa\0\0bb", TF_TString_GetDataPointer(&s60), 6));
TF_TString_Resize(&s60, 2, 'c');
EXPECT_EQ(0, ::memcmp("aa", TF_TString_GetDataPointer(&s60), 2));
TF_TString_Dealloc(&s60);
}
{
TF_TString s70;
TF_TString_Init(&s70);
TF_TString_Reserve(&s70, TF_TString_SmallCapacity - 1);
EXPECT_EQ(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s70));
EXPECT_EQ(0, TF_TString_GetSize(&s70));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s70));
TF_TString_Reserve(&s70, TF_TString_SmallCapacity);
EXPECT_EQ(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s70));
EXPECT_EQ(0, TF_TString_GetSize(&s70));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s70));
TF_TString_Copy(&s70, "hello", 5);
EXPECT_EQ(5, TF_TString_GetSize(&s70));
EXPECT_EQ(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s70));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s70));
TF_TString_Reserve(&s70, 100);
EXPECT_EQ(111, TF_TString_GetCapacity(&s70));
EXPECT_EQ(5, TF_TString_GetSize(&s70));
EXPECT_EQ(TF_TSTR_LARGE, TF_TString_GetType(&s70));
TF_TString_AssignView(&s70, kLongString, kLongStringLen);
TF_TString_Reserve(&s70, 10);
EXPECT_EQ(TF_TSTR_VIEW, TF_TString_GetType(&s70));
EXPECT_EQ(0, TF_TString_GetCapacity(&s70));
TF_TString_Reserve(&s70, 100);
EXPECT_EQ(TF_TSTR_LARGE, TF_TString_GetType(&s70));
EXPECT_EQ(111, TF_TString_GetCapacity(&s70));
TF_TString_Reserve(&s70, 200);
EXPECT_EQ(TF_TSTR_LARGE, TF_TString_GetType(&s70));
EXPECT_EQ(207, TF_TString_GetCapacity(&s70));
TF_TString_Dealloc(&s70);
}
{
TF_TString s70;
TF_TString_Init(&s70);
TF_TString_ReserveAmortized(&s70, TF_TString_SmallCapacity - 1);
EXPECT_EQ(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s70));
EXPECT_EQ(0, TF_TString_GetSize(&s70));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s70));
TF_TString_ReserveAmortized(&s70, TF_TString_SmallCapacity);
EXPECT_EQ(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s70));
EXPECT_EQ(0, TF_TString_GetSize(&s70));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s70));
TF_TString_Copy(&s70, "hello", 5);
EXPECT_EQ(5, TF_TString_GetSize(&s70));
EXPECT_EQ(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s70));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s70));
TF_TString_ReserveAmortized(&s70, 100);
EXPECT_EQ(111, TF_TString_GetCapacity(&s70));
EXPECT_EQ(5, TF_TString_GetSize(&s70));
EXPECT_EQ(TF_TSTR_LARGE, TF_TString_GetType(&s70));
TF_TString_AssignView(&s70, kLongString, kLongStringLen);
TF_TString_ReserveAmortized(&s70, 10);
EXPECT_EQ(TF_TSTR_VIEW, TF_TString_GetType(&s70));
EXPECT_EQ(0, TF_TString_GetCapacity(&s70));
TF_TString_ReserveAmortized(&s70, 100);
EXPECT_EQ(TF_TSTR_LARGE, TF_TString_GetType(&s70));
EXPECT_EQ(111, TF_TString_GetCapacity(&s70));
TF_TString_ReserveAmortized(&s70, 200);
EXPECT_EQ(TF_TSTR_LARGE, TF_TString_GetType(&s70));
EXPECT_EQ(223, TF_TString_GetCapacity(&s70));
TF_TString_Dealloc(&s70);
}
}
TEST(TF_CTStringTest, OffsetType) {
{
uint8_t str[] = "test";
constexpr size_t str_size = sizeof(str) / sizeof(str[0]);
uint8_t buf[sizeof(TF_TString) + str_size];
memcpy(buf + sizeof(TF_TString), str, str_size);
TF_TString *offsets = (TF_TString *)buf;
TF_TString_Init(offsets);
offsets[0].u.offset.size = TF_le32toh(str_size << 2 | TF_TSTR_OFFSET);
offsets[0].u.offset.offset = TF_le32toh(sizeof(TF_TString));
offsets[0].u.offset.count = TF_le32toh(1);
EXPECT_EQ(str_size, TF_TString_GetSize(offsets));
EXPECT_EQ(TF_TSTR_OFFSET, TF_TString_GetType(offsets));
EXPECT_EQ(0, ::memcmp(str, TF_TString_GetDataPointer(offsets), str_size));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/ctstring.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/ctstring_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
23ee1f36-b69d-4cd2-82cb-48f9dc42b1dc | cpp | tensorflow/tensorflow | numa | tensorflow/core/platform/numa.h | third_party/xla/third_party/tsl/tsl/platform/numa_test.cc | #ifndef TENSORFLOW_CORE_PLATFORM_NUMA_H_
#define TENSORFLOW_CORE_PLATFORM_NUMA_H_
#include "tensorflow/core/platform/platform.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/numa.h"
namespace tensorflow {
namespace port {
using tsl::port::kNUMANoAffinity;
using tsl::port::NUMAEnabled;
using tsl::port::NUMAFree;
using tsl::port::NUMAGetMemAffinity;
using tsl::port::NUMAGetThreadNodeAffinity;
using tsl::port::NUMAMalloc;
using tsl::port::NUMANumNodes;
using tsl::port::NUMASetThreadNodeAffinity;
}
}
#endif | #include "tsl/platform/numa.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace internal {
TEST(Numa, NumNodes) {
if (port::NUMAEnabled()) {
EXPECT_GE(port::NUMANumNodes(), 1);
}
}
TEST(Numa, Malloc) {
if (port::NUMAEnabled()) {
int num_nodes = port::NUMANumNodes();
for (int request_node = 0; request_node < num_nodes; ++request_node) {
void* ptr = port::NUMAMalloc(request_node, 8, 0);
EXPECT_NE(ptr, nullptr);
*(reinterpret_cast<int*>(ptr)) = 0;
int affinity_node = port::NUMAGetMemAffinity(ptr);
EXPECT_EQ(affinity_node, request_node);
port::NUMAFree(ptr, 8);
}
}
}
TEST(Numa, SetNodeAffinity) {
EXPECT_EQ(-1, port::NUMAGetThreadNodeAffinity());
if (port::NUMAEnabled()) {
int num_nodes = port::NUMANumNodes();
for (int request_node = 0; request_node < num_nodes; ++request_node) {
port::NUMASetThreadNodeAffinity(request_node);
int affinity_node = port::NUMAGetThreadNodeAffinity();
EXPECT_EQ(affinity_node, request_node);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/numa.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/numa_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b76f2da2-2c1b-4341-879a-45c8de64721a | cpp | tensorflow/tensorflow | snappy | tensorflow/core/platform/snappy.h | third_party/xla/xla/tsl/lib/io/snappy/snappy_test.cc | #ifndef TENSORFLOW_CORE_PLATFORM_SNAPPY_H_
#define TENSORFLOW_CORE_PLATFORM_SNAPPY_H_
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/snappy.h"
#if !defined(PLATFORM_WINDOWS)
#include <sys/uio.h>
#else
namespace tensorflow {
using tsl::iovec;
}
#endif
namespace tensorflow {
namespace port {
using tsl::port::Snappy_Compress;
using tsl::port::Snappy_CompressFromIOVec;
using tsl::port::Snappy_GetUncompressedLength;
using tsl::port::Snappy_Uncompress;
using tsl::port::Snappy_UncompressToIOVec;
}
}
#endif | #include <memory>
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/lib/io/inputbuffer.h"
#include "xla/tsl/lib/io/random_inputstream.h"
#include "xla/tsl/lib/io/snappy/snappy_inputbuffer.h"
#include "xla/tsl/lib/io/snappy/snappy_inputstream.h"
#include "xla/tsl/lib/io/snappy/snappy_outputbuffer.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
namespace tsl {
static void CheckPrefixSuffix(absl::string_view str, const string& prefix,
const string& suffix) {
CHECK_GE(str.size(), prefix.size());
CHECK_GE(str.size(), suffix.size());
CHECK_EQ(str.substr(0, prefix.length()), prefix);
CHECK_EQ(str.substr(str.length() - suffix.length()), suffix);
}
static string GetRecord() {
static const string lorem_ipsum =
"Lorem ipsum dolor sit amet, consectetur adipiscing elit."
" Fusce vehicula tincidunt libero sit amet ultrices. Vestibulum non "
"felis augue. Duis vitae augue id lectus lacinia congue et ut purus. "
"Donec auctor, nisl at dapibus volutpat, diam ante lacinia dolor, vel"
"dignissim lacus nisi sed purus. Duis fringilla nunc ac lacus sagittis"
" efficitur. Praesent tincidunt egestas eros, eu vehicula urna ultrices"
" et. Aliquam erat volutpat. Maecenas vehicula risus consequat risus"
" dictum, luctus tincidunt nibh imperdiet. Aenean bibendum ac erat"
" cursus scelerisque. Cras lacinia in enim dapibus iaculis. Nunc porta"
" felis lectus, ac tincidunt massa pharetra quis. Fusce feugiat dolor"
" vel ligula rutrum egestas. Donec vulputate quam eros, et commodo"
" purus lobortis sed.";
return lorem_ipsum;
}
static string GenTestString(int copies = 1) {
string result = "";
for (int i = 0; i < copies; i++) {
result += GetRecord();
}
return result;
}
absl::Status TestMultipleWritesWriteFile(size_t compress_input_buf_size,
size_t compress_output_buf_size,
int num_writes, bool with_flush,
int num_copies,
bool corrupt_compressed_file,
string& fname, string& data,
string& expected_result) {
Env* env = Env::Default();
fname = testing::TmpDir() + "/snappy_buffers_test";
data = GenTestString(num_copies);
std::unique_ptr<WritableFile> file_writer;
TF_RETURN_IF_ERROR(env->NewWritableFile(fname, &file_writer));
io::SnappyOutputBuffer out(file_writer.get(), compress_input_buf_size,
compress_output_buf_size);
for (int i = 0; i < num_writes; i++) {
TF_RETURN_IF_ERROR(out.Write(absl::string_view(data)));
if (with_flush) {
TF_RETURN_IF_ERROR(out.Flush());
}
strings::StrAppend(&expected_result, data);
}
TF_RETURN_IF_ERROR(out.Flush());
TF_RETURN_IF_ERROR(file_writer->Flush());
TF_RETURN_IF_ERROR(file_writer->Close());
if (corrupt_compressed_file) {
string corrupt_fname = testing::TmpDir() + "/snappy_buffers_test_corrupt";
std::unique_ptr<WritableFile> corrupt_file_writer;
TF_RETURN_IF_ERROR(
env->NewWritableFile(corrupt_fname, &corrupt_file_writer));
std::unique_ptr<RandomAccessFile> file_reader;
TF_RETURN_IF_ERROR(env->NewRandomAccessFile(fname, &file_reader));
absl::string_view data;
size_t file_pos = 0;
size_t bytes_to_read = 256;
char* scratch = new char[bytes_to_read];
char* buffer = new char[bytes_to_read];
size_t buffer_size = 0;
while ((file_reader->Read(file_pos, bytes_to_read, &data, scratch)).ok()) {
file_pos += data.size();
TF_CHECK_OK(
corrupt_file_writer->Append(absl::string_view(buffer, buffer_size)));
memcpy(buffer, data.data(), data.size());
buffer_size = data.size();
}
TF_CHECK_OK(corrupt_file_writer->Append(
absl::string_view(buffer, buffer_size - 1)));
TF_CHECK_OK(corrupt_file_writer->Flush());
TF_CHECK_OK(corrupt_file_writer->Close());
delete[] scratch;
delete[] buffer;
fname = corrupt_fname;
}
return absl::OkStatus();
}
absl::Status TestMultipleWrites(size_t compress_input_buf_size,
size_t compress_output_buf_size,
size_t uncompress_input_buf_size,
size_t uncompress_output_buf_size,
int num_writes = 1, bool with_flush = false,
int num_copies = 1,
bool corrupt_compressed_file = false) {
Env* env = Env::Default();
string expected_result;
string fname;
string data;
TF_RETURN_IF_ERROR(TestMultipleWritesWriteFile(
compress_input_buf_size, compress_output_buf_size, num_writes, with_flush,
num_copies, corrupt_compressed_file, fname, data, expected_result));
std::unique_ptr<RandomAccessFile> file_reader;
TF_RETURN_IF_ERROR(env->NewRandomAccessFile(fname, &file_reader));
io::SnappyInputBuffer in(file_reader.get(), uncompress_input_buf_size,
uncompress_output_buf_size);
for (int attempt = 0; attempt < 2; ++attempt) {
string actual_result;
for (int i = 0; i < num_writes; i++) {
tstring decompressed_output;
TF_RETURN_IF_ERROR(in.ReadNBytes(data.size(), &decompressed_output));
strings::StrAppend(&actual_result, decompressed_output);
}
if (actual_result != expected_result) {
return errors::DataLoss("Actual and expected results don't match.");
}
TF_RETURN_IF_ERROR(in.Reset());
}
return absl::OkStatus();
}
absl::Status TestMultipleWritesInputStream(
size_t compress_input_buf_size, size_t compress_output_buf_size,
size_t uncompress_input_buf_size, size_t uncompress_output_buf_size,
int num_writes = 1, bool with_flush = false, int num_copies = 1,
bool corrupt_compressed_file = false) {
Env* env = Env::Default();
string expected_result;
string fname;
string data;
TF_RETURN_IF_ERROR(TestMultipleWritesWriteFile(
compress_input_buf_size, compress_output_buf_size, num_writes, with_flush,
num_copies, corrupt_compressed_file, fname, data, expected_result));
std::unique_ptr<RandomAccessFile> file_reader;
TF_RETURN_IF_ERROR(env->NewRandomAccessFile(fname, &file_reader));
io::RandomAccessInputStream random_input_stream(file_reader.get(), false);
io::SnappyInputStream snappy_input_stream(&random_input_stream,
uncompress_output_buf_size);
for (int attempt = 0; attempt < 2; ++attempt) {
string actual_result;
for (int i = 0; i < num_writes; ++i) {
tstring decompressed_output;
TF_RETURN_IF_ERROR(
snappy_input_stream.ReadNBytes(data.size(), &decompressed_output));
strings::StrAppend(&actual_result, decompressed_output);
}
if (actual_result != expected_result) {
return errors::DataLoss("Actual and expected results don't match.");
}
TF_RETURN_IF_ERROR(snappy_input_stream.Reset());
}
return absl::OkStatus();
}
void TestTellWriteFile(size_t compress_input_buf_size,
size_t compress_output_buf_size,
size_t uncompress_input_buf_size,
size_t uncompress_output_buf_size, int num_copies,
string& fname, string& data) {
Env* env = Env::Default();
fname = testing::TmpDir() + "/snappy_buffers_test";
data = GenTestString(num_copies);
std::unique_ptr<WritableFile> file_writer;
TF_CHECK_OK(env->NewWritableFile(fname, &file_writer));
io::SnappyOutputBuffer out(file_writer.get(), compress_input_buf_size,
compress_output_buf_size);
TF_CHECK_OK(out.Write(absl::string_view(data)));
TF_CHECK_OK(out.Flush());
TF_CHECK_OK(file_writer->Flush());
TF_CHECK_OK(file_writer->Close());
}
void TestTell(size_t compress_input_buf_size, size_t compress_output_buf_size,
size_t uncompress_input_buf_size,
size_t uncompress_output_buf_size, int num_copies = 1) {
Env* env = Env::Default();
string data;
string fname;
TestTellWriteFile(compress_input_buf_size, compress_output_buf_size,
uncompress_input_buf_size, uncompress_output_buf_size,
num_copies, fname, data);
tstring first_half(string(data, 0, data.size() / 2));
tstring bytes_read;
std::unique_ptr<RandomAccessFile> file_reader;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file_reader));
io::SnappyInputBuffer in(file_reader.get(), uncompress_input_buf_size,
uncompress_output_buf_size);
TF_CHECK_OK(in.ReadNBytes(first_half.size(), &bytes_read));
EXPECT_EQ(in.Tell(), first_half.size());
EXPECT_EQ(bytes_read, first_half);
tstring second_half;
TF_CHECK_OK(in.ReadNBytes(data.size() - first_half.size(), &second_half));
EXPECT_EQ(in.Tell(), data.size());
bytes_read.append(second_half);
EXPECT_EQ(bytes_read, data);
}
void TestTellInputStream(size_t compress_input_buf_size,
size_t compress_output_buf_size,
size_t uncompress_input_buf_size,
size_t uncompress_output_buf_size,
int num_copies = 1) {
Env* env = Env::Default();
string data;
string fname;
TestTellWriteFile(compress_input_buf_size, compress_output_buf_size,
uncompress_input_buf_size, uncompress_output_buf_size,
num_copies, fname, data);
tstring first_half(string(data, 0, data.size() / 2));
tstring bytes_read;
std::unique_ptr<RandomAccessFile> file_reader;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file_reader));
io::RandomAccessInputStream random_input_stream(file_reader.get(), false);
io::SnappyInputStream in(&random_input_stream, uncompress_output_buf_size);
TF_CHECK_OK(in.ReadNBytes(first_half.size(), &bytes_read));
EXPECT_EQ(in.Tell(), first_half.size());
EXPECT_EQ(bytes_read, first_half);
tstring second_half;
TF_CHECK_OK(in.ReadNBytes(data.size() - first_half.size(), &second_half));
EXPECT_EQ(in.Tell(), data.size());
bytes_read.append(second_half);
EXPECT_EQ(bytes_read, data);
}
static bool SnappyCompressionSupported() {
string out;
absl::string_view in = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
return port::Snappy_Compress(in.data(), in.size(), &out);
}
TEST(SnappyBuffers, MultipleWritesWithoutFlush) {
if (!SnappyCompressionSupported()) {
fprintf(stderr, "Snappy disabled. Skipping test\n");
return;
}
TF_CHECK_OK(TestMultipleWrites(10000, 10000, 10000, 10000, 2));
TF_CHECK_OK(TestMultipleWritesInputStream(10000, 10000, 10000, 10000, 2));
}
TEST(SnappyBuffers, MultipleWriteCallsWithFlush) {
if (!SnappyCompressionSupported()) {
fprintf(stderr, "skipping compression tests\n");
return;
}
TF_CHECK_OK(TestMultipleWrites(10000, 10000, 10000, 10000, 2, true));
TF_CHECK_OK(
TestMultipleWritesInputStream(10000, 10000, 10000, 10000, 2, true));
}
TEST(SnappyBuffers, SmallUncompressInputBuffer) {
if (!SnappyCompressionSupported()) {
fprintf(stderr, "skipping compression tests\n");
return;
}
absl::Status status = TestMultipleWrites(10000, 10000, 10, 10000, 2, true);
CHECK_EQ(status.code(), error::Code::RESOURCE_EXHAUSTED);
CheckPrefixSuffix(
status.message(),
"Input buffer(size: 10 bytes) too small. Should be larger than ",
" bytes.");
}
TEST(SnappyBuffers, SmallUncompressInputStream) {
if (!SnappyCompressionSupported()) {
fprintf(stderr, "skipping compression tests\n");
return;
}
CHECK_EQ(TestMultipleWritesInputStream(10000, 10000, 10000, 10, 2, true),
errors::ResourceExhausted(
"Output buffer(size: 10 bytes) too small. ",
"Should be larger than ", GetRecord().size(), " bytes."));
}
TEST(SnappyBuffers, CorruptBlock) {
if (!SnappyCompressionSupported()) {
fprintf(stderr, "skipping compression tests\n");
return;
}
absl::Status status =
TestMultipleWrites(10000, 10000, 700, 10000, 2, true, 1, true);
CHECK_EQ(status.code(), error::Code::DATA_LOSS);
CheckPrefixSuffix(status.message(), "Failed to read ",
" bytes from file. Possible data corruption.");
}
TEST(SnappyBuffers, CorruptBlockInputStream) {
if (!SnappyCompressionSupported()) {
fprintf(stderr, "skipping compression tests\n");
return;
}
absl::Status status =
TestMultipleWritesInputStream(10000, 10000, 700, 10000, 2, true, 1, true);
CHECK_EQ(status.code(), error::Code::DATA_LOSS);
CheckPrefixSuffix(status.message(), "Failed to read ",
" bytes from file. Possible data corruption.");
}
TEST(SnappyBuffers, CorruptBlockLargeInputBuffer) {
if (!SnappyCompressionSupported()) {
fprintf(stderr, "skipping compression tests\n");
return;
}
CHECK_EQ(TestMultipleWrites(10000, 10000, 2000, 10000, 2, true, 1, true),
errors::OutOfRange("EOF reached"));
}
TEST(SnappyBuffers, CorruptBlockLargeInputStream) {
if (!SnappyCompressionSupported()) {
fprintf(stderr, "skipping compression tests\n");
return;
}
absl::Status status = TestMultipleWritesInputStream(10000, 10000, 2000, 10000,
2, true, 1, true);
CHECK_EQ(status.code(), error::Code::DATA_LOSS);
CheckPrefixSuffix(status.message(), "Failed to read ",
" bytes from file. Possible data corruption.");
}
TEST(SnappyBuffers, Tell) {
if (!SnappyCompressionSupported()) {
fprintf(stderr, "skipping compression tests\n");
return;
}
TestTell(10000, 10000, 2000, 10000, 2);
}
TEST(SnappyBuffers, TellInputStream) {
if (!SnappyCompressionSupported()) {
fprintf(stderr, "skipping compression tests\n");
return;
}
TestTellInputStream(10000, 10000, 2000, 10000, 2);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/snappy.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/io/snappy/snappy_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c6cef358-abd6-4c20-b354-33736b80705d | cpp | tensorflow/tensorflow | retrying_file_system | tensorflow/core/platform/retrying_file_system.h | third_party/xla/third_party/tsl/tsl/platform/retrying_file_system_test.cc | #ifndef TENSORFLOW_CORE_PLATFORM_RETRYING_FILE_SYSTEM_H_
#define TENSORFLOW_CORE_PLATFORM_RETRYING_FILE_SYSTEM_H_
#include <functional>
#include <string>
#include <vector>
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/file_system.h"
#include "tensorflow/core/platform/retrying_utils.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/retrying_file_system.h"
namespace tensorflow {
using tsl::RetryingFileSystem;
}
#endif | #include "tsl/platform/retrying_file_system.h"
#include <fstream>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace {
typedef std::vector<std::tuple<string, absl::Status>> ExpectedCalls;
ExpectedCalls CreateRetriableErrors(const string& method, int n) {
ExpectedCalls expected_calls;
expected_calls.reserve(n);
for (int i = 0; i < n; i++) {
expected_calls.emplace_back(std::make_tuple(
method, errors::Unavailable(strings::StrCat("Retriable error #", i))));
}
return expected_calls;
}
class MockCallSequence {
public:
explicit MockCallSequence(const ExpectedCalls& calls) : calls_(calls) {}
~MockCallSequence() {
EXPECT_TRUE(calls_.empty())
<< "Not all expected calls have been made, "
<< "the next expected call: " << std::get<0>(calls_.front());
}
absl::Status ConsumeNextCall(const string& method) {
EXPECT_FALSE(calls_.empty()) << "No more calls were expected.";
auto call = calls_.front();
calls_.erase(calls_.begin());
EXPECT_EQ(std::get<0>(call), method) << "Unexpected method called.";
return std::get<1>(call);
}
private:
ExpectedCalls calls_;
};
class MockRandomAccessFile : public RandomAccessFile {
public:
explicit MockRandomAccessFile(const ExpectedCalls& calls) : calls_(calls) {}
absl::Status Name(absl::string_view* result) const override {
return calls_.ConsumeNextCall("Name");
}
absl::Status Read(uint64 offset, size_t n, absl::string_view* result,
char* scratch) const override {
return calls_.ConsumeNextCall("Read");
}
private:
mutable MockCallSequence calls_;
};
class MockWritableFile : public WritableFile {
public:
explicit MockWritableFile(const ExpectedCalls& calls) : calls_(calls) {}
absl::Status Append(absl::string_view data) override {
return calls_.ConsumeNextCall("Append");
}
absl::Status Close() override { return calls_.ConsumeNextCall("Close"); }
absl::Status Flush() override { return calls_.ConsumeNextCall("Flush"); }
absl::Status Name(absl::string_view* result) const override {
return calls_.ConsumeNextCall("Name");
}
absl::Status Sync() override { return calls_.ConsumeNextCall("Sync"); }
absl::Status Tell(int64_t* position) override {
return calls_.ConsumeNextCall("Tell");
}
private:
mutable MockCallSequence calls_;
};
class MockFileSystem : public FileSystem {
public:
explicit MockFileSystem(const ExpectedCalls& calls, bool* flushed = nullptr)
: calls_(calls), flushed_(flushed) {}
TF_USE_FILESYSTEM_METHODS_WITH_NO_TRANSACTION_SUPPORT;
absl::Status NewRandomAccessFile(
const string& fname, TransactionToken* token,
std::unique_ptr<RandomAccessFile>* result) override {
*result = std::move(random_access_file_to_return);
return calls_.ConsumeNextCall("NewRandomAccessFile");
}
absl::Status NewWritableFile(const string& fname, TransactionToken* token,
std::unique_ptr<WritableFile>* result) override {
*result = std::move(writable_file_to_return);
return calls_.ConsumeNextCall("NewWritableFile");
}
absl::Status NewAppendableFile(
const string& fname, TransactionToken* token,
std::unique_ptr<WritableFile>* result) override {
*result = std::move(writable_file_to_return);
return calls_.ConsumeNextCall("NewAppendableFile");
}
absl::Status NewReadOnlyMemoryRegionFromFile(
const string& fname, TransactionToken* token,
std::unique_ptr<ReadOnlyMemoryRegion>* result) override {
return calls_.ConsumeNextCall("NewReadOnlyMemoryRegionFromFile");
}
absl::Status FileExists(const string& fname,
TransactionToken* token) override {
return calls_.ConsumeNextCall("FileExists");
}
absl::Status GetChildren(const string& dir, TransactionToken* token,
std::vector<string>* result) override {
return calls_.ConsumeNextCall("GetChildren");
}
absl::Status GetMatchingPaths(const string& dir, TransactionToken* token,
std::vector<string>* result) override {
return calls_.ConsumeNextCall("GetMatchingPaths");
}
absl::Status Stat(const string& fname, TransactionToken* token,
FileStatistics* stat) override {
return calls_.ConsumeNextCall("Stat");
}
absl::Status DeleteFile(const string& fname,
TransactionToken* token) override {
return calls_.ConsumeNextCall("DeleteFile");
}
absl::Status CreateDir(const string& dirname,
TransactionToken* token) override {
return calls_.ConsumeNextCall("CreateDir");
}
absl::Status DeleteDir(const string& dirname,
TransactionToken* token) override {
return calls_.ConsumeNextCall("DeleteDir");
}
absl::Status GetFileSize(const string& fname, TransactionToken* token,
uint64* file_size) override {
return calls_.ConsumeNextCall("GetFileSize");
}
absl::Status RenameFile(const string& src, const string& target,
TransactionToken* token) override {
return calls_.ConsumeNextCall("RenameFile");
}
absl::Status IsDirectory(const string& dirname,
TransactionToken* token) override {
return calls_.ConsumeNextCall("IsDirectory");
}
absl::Status DeleteRecursively(const string& dirname, TransactionToken* token,
int64_t* undeleted_files,
int64_t* undeleted_dirs) override {
return calls_.ConsumeNextCall("DeleteRecursively");
}
void FlushCaches(TransactionToken* token) override {
if (flushed_) {
*flushed_ = true;
}
}
std::unique_ptr<WritableFile> writable_file_to_return;
std::unique_ptr<RandomAccessFile> random_access_file_to_return;
private:
MockCallSequence calls_;
bool* flushed_ = nullptr;
};
TEST(RetryingFileSystemTest, NewRandomAccessFile_ImmediateSuccess) {
ExpectedCalls expected_file_calls(
{std::make_tuple("Name", absl::OkStatus()),
std::make_tuple("Read", absl::OkStatus())});
std::unique_ptr<RandomAccessFile> base_file(
new MockRandomAccessFile(expected_file_calls));
ExpectedCalls expected_fs_calls(
{std::make_tuple("NewRandomAccessFile", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
base_fs->random_access_file_to_return = std::move(base_file);
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::unique_ptr<RandomAccessFile> random_access_file;
TF_EXPECT_OK(
fs.NewRandomAccessFile("filename.txt", nullptr, &random_access_file));
absl::string_view result;
TF_EXPECT_OK(random_access_file->Name(&result));
EXPECT_EQ(result, "");
char scratch[10];
TF_EXPECT_OK(random_access_file->Read(0, 10, &result, scratch));
}
TEST(RetryingFileSystemTest, NewRandomAccessFile_SuccessWith3rdTry) {
ExpectedCalls expected_file_calls(
{std::make_tuple("Read", errors::Unavailable("Something is wrong")),
std::make_tuple("Read", errors::Unavailable("Wrong again")),
std::make_tuple("Read", absl::OkStatus())});
std::unique_ptr<RandomAccessFile> base_file(
new MockRandomAccessFile(expected_file_calls));
ExpectedCalls expected_fs_calls(
{std::make_tuple("NewRandomAccessFile", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
base_fs->random_access_file_to_return = std::move(base_file);
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::unique_ptr<RandomAccessFile> random_access_file;
TF_EXPECT_OK(
fs.NewRandomAccessFile("filename.txt", nullptr, &random_access_file));
absl::string_view result;
char scratch[10];
TF_EXPECT_OK(random_access_file->Read(0, 10, &result, scratch));
}
TEST(RetryingFileSystemTest, NewRandomAccessFile_AllRetriesFailed) {
ExpectedCalls expected_file_calls = CreateRetriableErrors("Read", 11);
std::unique_ptr<RandomAccessFile> base_file(
new MockRandomAccessFile(expected_file_calls));
ExpectedCalls expected_fs_calls(
{std::make_tuple("NewRandomAccessFile", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
base_fs->random_access_file_to_return = std::move(base_file);
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::unique_ptr<RandomAccessFile> random_access_file;
TF_EXPECT_OK(
fs.NewRandomAccessFile("filename.txt", nullptr, &random_access_file));
absl::string_view result;
char scratch[10];
const auto& status = random_access_file->Read(0, 10, &result, scratch);
EXPECT_TRUE(absl::StrContains(status.message(), "Retriable error #10"))
<< status;
}
TEST(RetryingFileSystemTest, NewRandomAccessFile_NoRetriesForSomeErrors) {
ExpectedCalls expected_file_calls({
std::make_tuple("Read",
errors::FailedPrecondition("Failed precondition")),
});
std::unique_ptr<RandomAccessFile> base_file(
new MockRandomAccessFile(expected_file_calls));
ExpectedCalls expected_fs_calls(
{std::make_tuple("NewRandomAccessFile", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
base_fs->random_access_file_to_return = std::move(base_file);
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::unique_ptr<RandomAccessFile> random_access_file;
TF_EXPECT_OK(
fs.NewRandomAccessFile("filename.txt", nullptr, &random_access_file));
absl::string_view result;
char scratch[10];
EXPECT_EQ("Failed precondition",
random_access_file->Read(0, 10, &result, scratch).message());
}
TEST(RetryingFileSystemTest, NewWritableFile_ImmediateSuccess) {
ExpectedCalls expected_file_calls(
{std::make_tuple("Name", absl::OkStatus()),
std::make_tuple("Sync", absl::OkStatus()),
std::make_tuple("Close", absl::OkStatus())});
std::unique_ptr<WritableFile> base_file(
new MockWritableFile(expected_file_calls));
ExpectedCalls expected_fs_calls(
{std::make_tuple("NewWritableFile", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
base_fs->writable_file_to_return = std::move(base_file);
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::unique_ptr<WritableFile> writable_file;
TF_EXPECT_OK(fs.NewWritableFile("filename.txt", nullptr, &writable_file));
absl::string_view result;
TF_EXPECT_OK(writable_file->Name(&result));
EXPECT_EQ(result, "");
TF_EXPECT_OK(writable_file->Sync());
}
TEST(RetryingFileSystemTest, NewWritableFile_SuccessWith3rdTry) {
ExpectedCalls expected_file_calls(
{std::make_tuple("Sync", errors::Unavailable("Something is wrong")),
std::make_tuple("Sync", errors::Unavailable("Something is wrong again")),
std::make_tuple("Sync", absl::OkStatus()),
std::make_tuple("Close", absl::OkStatus())});
std::unique_ptr<WritableFile> base_file(
new MockWritableFile(expected_file_calls));
ExpectedCalls expected_fs_calls(
{std::make_tuple("NewWritableFile", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
base_fs->writable_file_to_return = std::move(base_file);
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::unique_ptr<WritableFile> writable_file;
TF_EXPECT_OK(fs.NewWritableFile("filename.txt", nullptr, &writable_file));
TF_EXPECT_OK(writable_file->Sync());
}
TEST(RetryingFileSystemTest, NewWritableFile_SuccessWith3rdTry_ViaDestructor) {
ExpectedCalls expected_file_calls(
{std::make_tuple("Close", errors::Unavailable("Something is wrong")),
std::make_tuple("Close",
errors::Unavailable("Something is wrong again")),
std::make_tuple("Close", absl::OkStatus())});
std::unique_ptr<WritableFile> base_file(
new MockWritableFile(expected_file_calls));
ExpectedCalls expected_fs_calls(
{std::make_tuple("NewWritableFile", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
base_fs->writable_file_to_return = std::move(base_file);
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::unique_ptr<WritableFile> writable_file;
TF_EXPECT_OK(fs.NewWritableFile("filename.txt", nullptr, &writable_file));
writable_file.reset();
}
TEST(RetryingFileSystemTest, NewAppendableFile_SuccessWith3rdTry) {
ExpectedCalls expected_file_calls(
{std::make_tuple("Sync", errors::Unavailable("Something is wrong")),
std::make_tuple("Sync", errors::Unavailable("Something is wrong again")),
std::make_tuple("Sync", absl::OkStatus()),
std::make_tuple("Close", absl::OkStatus())});
std::unique_ptr<WritableFile> base_file(
new MockWritableFile(expected_file_calls));
ExpectedCalls expected_fs_calls(
{std::make_tuple("NewAppendableFile", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
base_fs->writable_file_to_return = std::move(base_file);
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::unique_ptr<WritableFile> writable_file;
TF_EXPECT_OK(fs.NewAppendableFile("filename.txt", nullptr, &writable_file));
TF_EXPECT_OK(writable_file->Sync());
}
TEST(RetryingFileSystemTest, NewWritableFile_AllRetriesFailed) {
ExpectedCalls expected_file_calls = CreateRetriableErrors("Sync", 11);
expected_file_calls.emplace_back(std::make_tuple("Close", absl::OkStatus()));
std::unique_ptr<WritableFile> base_file(
new MockWritableFile(expected_file_calls));
ExpectedCalls expected_fs_calls(
{std::make_tuple("NewWritableFile", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
base_fs->writable_file_to_return = std::move(base_file);
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::unique_ptr<WritableFile> writable_file;
TF_EXPECT_OK(fs.NewWritableFile("filename.txt", nullptr, &writable_file));
const auto& status = writable_file->Sync();
EXPECT_TRUE(absl::StrContains(status.message(), "Retriable error #10"))
<< status;
}
TEST(RetryingFileSystemTest,
NewReadOnlyMemoryRegionFromFile_SuccessWith2ndTry) {
ExpectedCalls expected_fs_calls(
{std::make_tuple("NewReadOnlyMemoryRegionFromFile",
errors::Unavailable("Something is wrong")),
std::make_tuple("NewReadOnlyMemoryRegionFromFile", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::unique_ptr<ReadOnlyMemoryRegion> result;
TF_EXPECT_OK(
fs.NewReadOnlyMemoryRegionFromFile("filename.txt", nullptr, &result));
}
TEST(RetryingFileSystemTest, NewReadOnlyMemoryRegionFromFile_AllRetriesFailed) {
ExpectedCalls expected_fs_calls =
CreateRetriableErrors("NewReadOnlyMemoryRegionFromFile", 11);
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::unique_ptr<ReadOnlyMemoryRegion> result;
const auto& status =
fs.NewReadOnlyMemoryRegionFromFile("filename.txt", nullptr, &result);
EXPECT_TRUE(absl::StrContains(status.message(), "Retriable error #10"))
<< status;
}
TEST(RetryingFileSystemTest, GetChildren_SuccessWith2ndTry) {
ExpectedCalls expected_fs_calls(
{std::make_tuple("GetChildren",
errors::Unavailable("Something is wrong")),
std::make_tuple("GetChildren", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::vector<string> result;
TF_EXPECT_OK(fs.GetChildren("gs:
}
TEST(RetryingFileSystemTest, GetChildren_AllRetriesFailed) {
ExpectedCalls expected_fs_calls = CreateRetriableErrors("GetChildren", 11);
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::vector<string> result;
const auto& status = fs.GetChildren("gs:
EXPECT_TRUE(absl::StrContains(status.message(), "Retriable error #10"))
<< status;
}
TEST(RetryingFileSystemTest, GetMatchingPaths_SuccessWith2ndTry) {
ExpectedCalls expected_fs_calls(
{std::make_tuple("GetMatchingPaths",
errors::Unavailable("Something is wrong")),
std::make_tuple("GetMatchingPaths", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::vector<string> result;
TF_EXPECT_OK(fs.GetMatchingPaths("gs:
}
TEST(RetryingFileSystemTest, GetMatchingPaths_AllRetriesFailed) {
ExpectedCalls expected_fs_calls =
CreateRetriableErrors("GetMatchingPaths", 11);
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
std::vector<string> result;
const auto& status = fs.GetMatchingPaths("gs:
EXPECT_TRUE(absl::StrContains(status.message(), "Retriable error #10"))
<< status;
}
TEST(RetryingFileSystemTest, DeleteFile_SuccessWith2ndTry) {
ExpectedCalls expected_fs_calls(
{std::make_tuple("DeleteFile", errors::Unavailable("Something is wrong")),
std::make_tuple("DeleteFile", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
TF_EXPECT_OK(fs.DeleteFile("gs:
}
TEST(RetryingFileSystemTest, DeleteFile_AllRetriesFailed) {
ExpectedCalls expected_fs_calls = CreateRetriableErrors("DeleteFile", 11);
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
const auto& status = fs.DeleteFile("gs:
EXPECT_TRUE(absl::StrContains(status.message(), "Retriable error #10"))
<< status;
}
TEST(RetryingFileSystemTest, CreateDir_SuccessWith2ndTry) {
ExpectedCalls expected_fs_calls(
{std::make_tuple("CreateDir", errors::Unavailable("Something is wrong")),
std::make_tuple("CreateDir", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
TF_EXPECT_OK(fs.CreateDir("gs:
}
TEST(RetryingFileSystemTest, CreateDir_AllRetriesFailed) {
ExpectedCalls expected_fs_calls = CreateRetriableErrors("CreateDir", 11);
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
const auto& status = fs.CreateDir("gs:
EXPECT_TRUE(absl::StrContains(status.message(), "Retriable error #10"))
<< status;
}
TEST(RetryingFileSystemTest, DeleteDir_SuccessWith2ndTry) {
ExpectedCalls expected_fs_calls(
{std::make_tuple("DeleteDir", errors::Unavailable("Something is wrong")),
std::make_tuple("DeleteDir", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
TF_EXPECT_OK(fs.DeleteDir("gs:
}
TEST(RetryingFileSystemTest, DeleteDir_AllRetriesFailed) {
ExpectedCalls expected_fs_calls = CreateRetriableErrors("DeleteDir", 11);
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
const auto& status = fs.DeleteDir("gs:
EXPECT_TRUE(absl::StrContains(status.message(), "Retriable error #10"))
<< status;
}
TEST(RetryingFileSystemTest, GetFileSize_SuccessWith2ndTry) {
ExpectedCalls expected_fs_calls(
{std::make_tuple("GetFileSize",
errors::Unavailable("Something is wrong")),
std::make_tuple("GetFileSize", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
uint64 size;
TF_EXPECT_OK(fs.GetFileSize("gs:
}
TEST(RetryingFileSystemTest, GetFileSize_AllRetriesFailed) {
ExpectedCalls expected_fs_calls = CreateRetriableErrors("GetFileSize", 11);
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
uint64 size;
const auto& status = fs.GetFileSize("gs:
EXPECT_TRUE(absl::StrContains(status.message(), "Retriable error #10"))
<< status;
}
TEST(RetryingFileSystemTest, RenameFile_SuccessWith2ndTry) {
ExpectedCalls expected_fs_calls(
{std::make_tuple("RenameFile", errors::Unavailable("Something is wrong")),
std::make_tuple("RenameFile", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
TF_EXPECT_OK(fs.RenameFile("old_name", "new_name", nullptr));
}
TEST(RetryingFileSystemTest, RenameFile_AllRetriesFailed) {
ExpectedCalls expected_fs_calls = CreateRetriableErrors("RenameFile", 11);
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
const auto& status = fs.RenameFile("old_name", "new_name", nullptr);
EXPECT_TRUE(absl::StrContains(status.message(), "Retriable error #10"))
<< status;
}
TEST(RetryingFileSystemTest, Stat_SuccessWith2ndTry) {
ExpectedCalls expected_fs_calls(
{std::make_tuple("Stat", errors::Unavailable("Something is wrong")),
std::make_tuple("Stat", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
FileStatistics stat;
TF_EXPECT_OK(fs.Stat("file_name", nullptr, &stat));
}
TEST(RetryingFileSystemTest, Stat_AllRetriesFailed) {
ExpectedCalls expected_fs_calls = CreateRetriableErrors("Stat", 11);
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
FileStatistics stat;
const auto& status = fs.Stat("file_name", nullptr, &stat);
EXPECT_TRUE(absl::StrContains(status.message(), "Retriable error #10"))
<< status;
}
TEST(RetryingFileSystemTest, FileExists_AllRetriesFailed) {
ExpectedCalls expected_fs_calls = CreateRetriableErrors("FileExists", 11);
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
const auto& status = fs.FileExists("file_name", nullptr);
EXPECT_TRUE(absl::StrContains(status.message(), "Retriable error #10"))
<< status;
}
TEST(RetryingFileSystemTest, FileExists_SuccessWith2ndTry) {
ExpectedCalls expected_fs_calls(
{std::make_tuple("FileExists", errors::Unavailable("Something is wrong")),
std::make_tuple("FileExists", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
TF_EXPECT_OK(fs.FileExists("gs:
}
TEST(RetryingFileSystemTest, IsDirectory_SuccessWith2ndTry) {
ExpectedCalls expected_fs_calls(
{std::make_tuple("IsDirectory",
errors::Unavailable("Something is wrong")),
std::make_tuple("IsDirectory", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
TF_EXPECT_OK(fs.IsDirectory("gs:
}
TEST(RetryingFileSystemTest, IsDirectory_AllRetriesFailed) {
ExpectedCalls expected_fs_calls = CreateRetriableErrors("IsDirectory", 11);
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
const auto& status = fs.IsDirectory("gs:
EXPECT_TRUE(absl::StrContains(status.message(), "Retriable error #10"))
<< status;
}
TEST(RetryingFileSystemTest, DeleteRecursively_SuccessWith2ndTry) {
ExpectedCalls expected_fs_calls(
{std::make_tuple("DeleteRecursively",
errors::Unavailable("Something is wrong")),
std::make_tuple("DeleteRecursively", absl::OkStatus())});
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
int64_t undeleted_files, undeleted_dirs;
TF_EXPECT_OK(fs.DeleteRecursively("gs:
&undeleted_dirs));
}
TEST(RetryingFileSystemTest, DeleteRecursively_AllRetriesFailed) {
ExpectedCalls expected_fs_calls =
CreateRetriableErrors("DeleteRecursively", 11);
std::unique_ptr<MockFileSystem> base_fs(
new MockFileSystem(expected_fs_calls));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
int64_t undeleted_files, undeleted_dirs;
const auto& status = fs.DeleteRecursively("gs:
&undeleted_files, &undeleted_dirs);
EXPECT_TRUE(absl::StrContains(status.message(), "Retriable error #10"))
<< status;
}
TEST(RetryingFileSystemTest, FlushCaches) {
ExpectedCalls none;
bool flushed = false;
std::unique_ptr<MockFileSystem> base_fs(new MockFileSystem(none, &flushed));
RetryingFileSystem<MockFileSystem> fs(
std::move(base_fs), RetryConfig(0 ));
fs.FlushCaches(nullptr);
EXPECT_TRUE(flushed);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/retrying_file_system.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/retrying_file_system_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a8dc82e0-6cfb-4171-9acc-706391937f7d | cpp | tensorflow/tensorflow | refcount | tensorflow/core/lib/core/refcount.h | third_party/xla/third_party/tsl/tsl/platform/refcount_test.cc | #ifndef TENSORFLOW_CORE_LIB_CORE_REFCOUNT_H_
#define TENSORFLOW_CORE_LIB_CORE_REFCOUNT_H_
#include "tensorflow/core/platform/refcount.h"
#endif | #include "tsl/platform/refcount.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace tsl {
namespace core {
namespace {
class RefTest : public ::testing::Test {
public:
RefTest() {
constructed_ = 0;
destroyed_ = 0;
}
static int constructed_;
static int destroyed_;
};
int RefTest::constructed_;
int RefTest::destroyed_;
class MyRef : public RefCounted {
public:
MyRef() { RefTest::constructed_++; }
~MyRef() override { RefTest::destroyed_++; }
};
TEST_F(RefTest, New) {
MyRef* ref = new MyRef;
ASSERT_EQ(1, constructed_);
ASSERT_EQ(0, destroyed_);
ref->Unref();
ASSERT_EQ(1, constructed_);
ASSERT_EQ(1, destroyed_);
}
TEST_F(RefTest, RefUnref) {
MyRef* ref = new MyRef;
ASSERT_EQ(1, constructed_);
ASSERT_EQ(0, destroyed_);
ref->Ref();
ASSERT_EQ(0, destroyed_);
ref->Unref();
ASSERT_EQ(0, destroyed_);
ref->Unref();
ASSERT_EQ(1, destroyed_);
}
TEST_F(RefTest, RefCountOne) {
MyRef* ref = new MyRef;
ASSERT_TRUE(ref->RefCountIsOne());
ref->Unref();
}
TEST_F(RefTest, RefCountNotOne) {
MyRef* ref = new MyRef;
ref->Ref();
ASSERT_FALSE(ref->RefCountIsOne());
ref->Unref();
ref->Unref();
}
TEST_F(RefTest, ConstRefUnref) {
const MyRef* cref = new MyRef;
ASSERT_EQ(1, constructed_);
ASSERT_EQ(0, destroyed_);
cref->Ref();
ASSERT_EQ(0, destroyed_);
cref->Unref();
ASSERT_EQ(0, destroyed_);
cref->Unref();
ASSERT_EQ(1, destroyed_);
}
TEST_F(RefTest, ReturnOfUnref) {
MyRef* ref = new MyRef;
ref->Ref();
EXPECT_FALSE(ref->Unref());
EXPECT_TRUE(ref->Unref());
}
TEST_F(RefTest, ScopedUnref) {
{ ScopedUnref unref(new MyRef); }
EXPECT_EQ(destroyed_, 1);
}
TEST_F(RefTest, ScopedUnref_Nullptr) {
{ ScopedUnref unref(nullptr); }
EXPECT_EQ(destroyed_, 0);
}
TEST_F(RefTest, RefCountPtr) {
const RefCountPtr<MyRef> cref = RefCountPtr<MyRef>(new MyRef);
ASSERT_TRUE(cref.get() != nullptr);
ASSERT_EQ(cref->RefCount(), 1);
{
const RefCountPtr<MyRef> cref2 = cref.GetNewRef();
ASSERT_EQ(cref->RefCount(), 2);
}
ASSERT_EQ(cref->RefCount(), 1);
}
class ObjType : public WeakRefCounted {
public:
ObjType() : ObjType(unused_dtor_called_) {}
explicit ObjType(int& dtor_called) : dtor_called_(dtor_called) {}
~ObjType() override { dtor_called_++; }
int& dtor_called_;
static int unused_dtor_called_;
};
int ObjType::unused_dtor_called_ = 0;
TEST(WeakPtr, SingleThread) {
auto obj = new ObjType();
WeakPtr<ObjType> weakptr(obj);
ASSERT_TRUE(obj->RefCountIsOne());
EXPECT_EQ(obj->WeakRefCount(), 1);
EXPECT_NE(weakptr.GetNewRef(), nullptr);
obj->Unref();
EXPECT_EQ(weakptr.GetNewRef(), nullptr);
}
TEST(WeakPtr, MultiThreadedWeakRef) {
std::atomic<int> hit_destructed{0};
auto env = Env::Default();
for (int i = 0; i < 100; i++) {
auto obj = new ObjType();
WeakPtr<ObjType> weakptr(obj);
bool obj_destructed = false;
EXPECT_EQ(obj->WeakRefCount(), 1);
auto fn = [&]() {
auto ref = weakptr.GetNewRef();
if (ref != nullptr) {
EXPECT_EQ(ref.get(), obj);
EXPECT_EQ(ref->WeakRefCount(), 1);
EXPECT_GE(ref->RefCount(), 1);
} else {
hit_destructed++;
EXPECT_TRUE(obj_destructed);
}
};
auto t1 = env->StartThread(ThreadOptions{}, "thread-1", fn);
auto t2 = env->StartThread(ThreadOptions{}, "thread-2", fn);
env->SleepForMicroseconds(10);
obj_destructed = true;
obj->Unref();
delete t1;
delete t2;
EXPECT_EQ(weakptr.GetNewRef(), nullptr);
}
if (hit_destructed == 0) {
LOG(WARNING) << "The destructed weakref test branch is not exercised.";
}
if (hit_destructed == 200) {
LOG(WARNING) << "The valid weakref test branch is not exercised.";
}
}
TEST(WeakPtr, NotifyCalled) {
auto obj = new ObjType();
int num_calls1 = 0;
int num_calls2 = 0;
auto notify_fn1 = [&num_calls1]() { num_calls1++; };
auto notify_fn2 = [&num_calls2]() { num_calls2++; };
WeakPtr<ObjType> weakptr1(obj, notify_fn1);
WeakPtr<ObjType> weakptr2(obj, notify_fn2);
ASSERT_TRUE(obj->RefCountIsOne());
EXPECT_EQ(obj->WeakRefCount(), 2);
EXPECT_NE(weakptr1.GetNewRef(), nullptr);
EXPECT_NE(weakptr2.GetNewRef(), nullptr);
EXPECT_EQ(num_calls1, 0);
EXPECT_EQ(num_calls2, 0);
obj->Unref();
EXPECT_EQ(weakptr1.GetNewRef(), nullptr);
EXPECT_EQ(weakptr2.GetNewRef(), nullptr);
EXPECT_EQ(num_calls1, 1);
EXPECT_EQ(num_calls2, 1);
}
TEST(WeakPtr, NotifyCalledBeforeDestructor) {
int dtor_called = 0;
auto obj = new ObjType(dtor_called);
int num_calls1 = 0;
auto notify_fn1 = [&num_calls1, &dtor_called]() {
num_calls1++;
EXPECT_EQ(dtor_called, 0);
};
WeakPtr<ObjType> weakptr1(obj, notify_fn1);
ASSERT_TRUE(obj->RefCountIsOne());
EXPECT_EQ(obj->WeakRefCount(), 1);
EXPECT_NE(weakptr1.GetNewRef(), nullptr);
EXPECT_EQ(num_calls1, 0);
obj->Unref();
EXPECT_EQ(weakptr1.GetNewRef(), nullptr);
EXPECT_EQ(num_calls1, 1);
EXPECT_EQ(dtor_called, 1);
}
TEST(WeakPtr, CopyTargetCalled) {
auto obj = new ObjType();
int num_calls1 = 0;
int num_calls2 = 0;
auto notify_fn1 = [&num_calls1]() { num_calls1++; };
auto notify_fn2 = [&num_calls2]() { num_calls2++; };
WeakPtr<ObjType> weakptr1(obj, notify_fn1);
WeakPtr<ObjType> weakptr2(obj, notify_fn2);
WeakPtr<ObjType> weakptr3(weakptr1);
weakptr2 = weakptr1;
ASSERT_TRUE(obj->RefCountIsOne());
EXPECT_EQ(obj->WeakRefCount(), 3);
EXPECT_NE(weakptr2.GetNewRef(), nullptr);
EXPECT_NE(weakptr3.GetNewRef(), nullptr);
EXPECT_EQ(num_calls1, 0);
EXPECT_EQ(num_calls2, 0);
obj->Unref();
EXPECT_EQ(weakptr2.GetNewRef(), nullptr);
EXPECT_EQ(weakptr3.GetNewRef(), nullptr);
EXPECT_EQ(num_calls1, 3);
EXPECT_EQ(num_calls2, 0);
}
TEST(WeakPtr, MoveTargetNotCalled) {
auto obj = new ObjType();
int num_calls1 = 0;
int num_calls2 = 0;
int num_calls3 = 0;
auto notify_fn1 = [&num_calls1]() { num_calls1++; };
auto notify_fn2 = [&num_calls2]() { num_calls2++; };
auto notify_fn3 = [&num_calls3]() { num_calls3++; };
WeakPtr<ObjType> weakptr1(obj, notify_fn1);
WeakPtr<ObjType> weakptr2(obj, notify_fn2);
WeakPtr<ObjType> weakptr3(WeakPtr<ObjType>(obj, notify_fn3));
weakptr2 = std::move(weakptr1);
ASSERT_TRUE(obj->RefCountIsOne());
EXPECT_EQ(obj->WeakRefCount(), 2);
EXPECT_NE(weakptr2.GetNewRef(), nullptr);
EXPECT_NE(weakptr3.GetNewRef(), nullptr);
EXPECT_EQ(num_calls1, 0);
EXPECT_EQ(num_calls2, 0);
EXPECT_EQ(num_calls3, 0);
obj->Unref();
EXPECT_EQ(weakptr2.GetNewRef(), nullptr);
EXPECT_EQ(weakptr3.GetNewRef(), nullptr);
EXPECT_EQ(num_calls1, 1);
EXPECT_EQ(num_calls2, 0);
EXPECT_EQ(num_calls3, 1);
}
TEST(WeakPtr, DestroyedNotifyNotCalled) {
auto obj = new ObjType();
int num_calls = 0;
auto notify_fn = [&num_calls]() { num_calls++; };
{ WeakPtr<ObjType> weakptr(obj, notify_fn); }
ASSERT_TRUE(obj->RefCountIsOne());
EXPECT_EQ(obj->WeakRefCount(), 0);
EXPECT_EQ(num_calls, 0);
obj->Unref();
EXPECT_EQ(num_calls, 0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/core/refcount.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/refcount_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d9291186-f644-4803-84a1-9f17601d1838 | cpp | tensorflow/tensorflow | threadpool_async_executor | third_party/xla/third_party/tsl/tsl/platform/threadpool_async_executor.h | third_party/xla/third_party/tsl/tsl/platform/threadpool_async_executor_test.cc | #ifndef TENSORFLOW_TSL_PLATFORM_THREADPOOL_ASYNC_EXECUTOR_H_
#define TENSORFLOW_TSL_PLATFORM_THREADPOOL_ASYNC_EXECUTOR_H_
#include <utility>
#include "xla/tsl/concurrency/async_value.h"
#include "tsl/platform/threadpool.h"
namespace tsl::thread {
class ThreadPoolAsyncExecutor : public AsyncValue::Executor {
public:
explicit ThreadPoolAsyncExecutor(ThreadPool* thread_pool)
: thread_pool_(thread_pool) {}
void Execute(Task task) final {
auto* task_ptr = new Task(std::move(task));
thread_pool_->Schedule([task_ptr] {
(*task_ptr)();
delete task_ptr;
});
}
private:
ThreadPool* thread_pool_;
};
}
#endif | #include "tsl/platform/threadpool_async_executor.h"
#include "absl/synchronization/notification.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace tsl::thread {
namespace {
TEST(ThreadPoolAsyncExecutorTest, ExecuteTasks) {
ThreadPool thread_pool(Env::Default(), "test", 4);
ThreadPoolAsyncExecutor executor(&thread_pool);
absl::Notification notification;
executor.Execute([&] { notification.Notify(); });
notification.WaitForNotification();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/threadpool_async_executor.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/threadpool_async_executor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5253ef1e-f139-4a56-b343-b128d7405c88 | cpp | tensorflow/tensorflow | fingerprint | tensorflow/core/platform/fingerprint.h | third_party/xla/third_party/tsl/tsl/platform/fingerprint_test.cc | #ifndef TENSORFLOW_CORE_PLATFORM_FINGERPRINT_H_
#define TENSORFLOW_CORE_PLATFORM_FINGERPRINT_H_
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/fingerprint.h"
namespace tensorflow {
using Fprint128 = tsl::Fprint128;
using Fprint128Hasher = tsl::Fprint128Hasher;
using tsl::Fingerprint128;
using tsl::Fingerprint32;
using tsl::Fingerprint64;
using tsl::FingerprintCat64;
}
#endif | #include "tsl/platform/fingerprint.h"
#include <unordered_set>
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace {
TEST(Fingerprint64, IsForeverFrozen) {
EXPECT_EQ(15404698994557526151ULL, Fingerprint64("Hello"));
EXPECT_EQ(18308117990299812472ULL, Fingerprint64("World"));
}
TEST(Fingerprint128, IsForeverFrozen) {
{
const Fprint128 fingerprint = Fingerprint128("Hello");
EXPECT_EQ(1163506517679092766ULL, fingerprint.low64);
EXPECT_EQ(10829806600034513965ULL, fingerprint.high64);
}
{
const Fprint128 fingerprint = Fingerprint128("World");
EXPECT_EQ(14404540403896557767ULL, fingerprint.low64);
EXPECT_EQ(4859093245152058524ULL, fingerprint.high64);
}
}
TEST(Fingerprint128, Fprint128Hasher) {
const std::unordered_set<Fprint128, Fprint128Hasher> map = {{1, 2}, {3, 4}};
}
TEST(FingerprintCat64, IsForeverFrozen) {
EXPECT_EQ(16877292868973613377ULL,
FingerprintCat64(Fingerprint64("Hello"), Fingerprint64("World")));
EXPECT_EQ(7158413233176775252ULL,
FingerprintCat64(Fingerprint64("World"), Fingerprint64("Hello")));
}
TEST(FingerprintCat64, Idempotence) {
const uint64_t orig =
FingerprintCat64(Fingerprint64("Hello"), Fingerprint64("World"));
EXPECT_EQ(orig,
FingerprintCat64(Fingerprint64("Hello"), Fingerprint64("World")));
EXPECT_NE(FingerprintCat64(Fingerprint64("Hello"), Fingerprint64("Hi")),
FingerprintCat64(Fingerprint64("Hello"), Fingerprint64("World")));
EXPECT_EQ(orig,
FingerprintCat64(Fingerprint64("Hello"), Fingerprint64("World")));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/fingerprint.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/fingerprint_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
dd9b6d0b-cfde-4480-9c14-d8bc39df53b2 | cpp | tensorflow/tensorflow | criticality | third_party/xla/third_party/tsl/tsl/platform/default/criticality.h | third_party/xla/third_party/tsl/tsl/platform/criticality_test.cc | #ifndef TENSORFLOW_TSL_PLATFORM_DEFAULT_CRITICALITY_H_
#define TENSORFLOW_TSL_PLATFORM_DEFAULT_CRITICALITY_H_
namespace tsl {
namespace criticality {
inline Criticality GetCriticality() {
return Criticality::kCritical;
}
}
}
#endif | #include "tsl/platform/criticality.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace criticality {
TEST(CriticalityTest, Basic) {
EXPECT_EQ(GetCriticality(), Criticality::kCritical);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/default/criticality.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/criticality_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
68be2b50-dfd0-4196-b8b4-1bdb1ae68974 | cpp | tensorflow/tensorflow | integral_types | third_party/xla/third_party/tsl/tsl/platform/default/integral_types.h | third_party/xla/third_party/tsl/tsl/platform/integral_types_test.cc | #ifndef TENSORFLOW_TSL_PLATFORM_DEFAULT_INTEGRAL_TYPES_H_
#define TENSORFLOW_TSL_PLATFORM_DEFAULT_INTEGRAL_TYPES_H_
#include <cstdint>
namespace tsl {
typedef signed char int8;
typedef short int16;
typedef int int32;
typedef ::std::int64_t int64;
typedef unsigned char uint8;
typedef unsigned short uint16;
typedef unsigned int uint32;
typedef std::uint64_t uint64;
}
#endif | #include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace {
TEST(IntegralTypes, Basic) {
EXPECT_EQ(1, sizeof(int8));
EXPECT_EQ(2, sizeof(int16));
EXPECT_EQ(4, sizeof(int32));
EXPECT_EQ(8, sizeof(int64_t));
EXPECT_EQ(1, sizeof(uint8));
EXPECT_EQ(2, sizeof(uint16));
EXPECT_EQ(4, sizeof(uint32));
EXPECT_EQ(8, sizeof(uint64));
}
TEST(IntegralTypes, MinAndMaxConstants) {
EXPECT_EQ(static_cast<uint8>(kint8min), static_cast<uint8>(kint8max) + 1);
EXPECT_EQ(static_cast<uint16>(kint16min), static_cast<uint16>(kint16max) + 1);
EXPECT_EQ(static_cast<uint32>(kint32min), static_cast<uint32>(kint32max) + 1);
EXPECT_EQ(static_cast<uint64>(kint64min), static_cast<uint64>(kint64max) + 1);
EXPECT_EQ(0, static_cast<uint8>(kuint8max + 1));
EXPECT_EQ(0, static_cast<uint16>(kuint16max + 1));
EXPECT_EQ(0, static_cast<uint32>(kuint32max + 1));
EXPECT_EQ(0, static_cast<uint64>(kuint64max + 1));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/default/integral_types.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/integral_types_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
67b5dcbe-b63d-4c16-852c-8b8844a3576f | cpp | tensorflow/tensorflow | expiring_lru_cache | tensorflow/c/experimental/filesystem/plugins/gcs/expiring_lru_cache.h | tensorflow/c/experimental/filesystem/plugins/gcs/expiring_lru_cache_test.cc | #ifndef TENSORFLOW_C_EXPERIMENTAL_FILESYSTEM_PLUGINS_GCS_EXPIRING_LRU_CACHE_H_
#define TENSORFLOW_C_EXPERIMENTAL_FILESYSTEM_PLUGINS_GCS_EXPIRING_LRU_CACHE_H_
#include <functional>
#include <list>
#include <map>
#include <memory>
#include <string>
#include "absl/base/thread_annotations.h"
#include "absl/synchronization/mutex.h"
#include "tensorflow/c/env.h"
#include "tensorflow/c/tf_status.h"
namespace tf_gcs_filesystem {
template <typename T>
class ExpiringLRUCache {
public:
ExpiringLRUCache(uint64_t max_age, size_t max_entries,
std::function<uint64_t()> timer_seconds = TF_NowSeconds)
: max_age_(max_age),
max_entries_(max_entries),
timer_seconds_(timer_seconds) {}
void Insert(const std::string& key, const T& value) {
if (max_age_ == 0) {
return;
}
absl::MutexLock lock(&mu_);
InsertLocked(key, value);
}
bool Delete(const std::string& key) {
absl::MutexLock lock(&mu_);
return DeleteLocked(key);
}
bool Lookup(const std::string& key, T* value) {
if (max_age_ == 0) {
return false;
}
absl::MutexLock lock(&mu_);
return LookupLocked(key, value);
}
typedef std::function<void(const std::string&, T*, TF_Status*)> ComputeFunc;
void LookupOrCompute(const std::string& key, T* value,
const ComputeFunc& compute_func, TF_Status* status) {
if (max_age_ == 0) {
return compute_func(key, value, status);
}
absl::MutexLock lock(&mu_);
if (LookupLocked(key, value)) {
return TF_SetStatus(status, TF_OK, "");
}
compute_func(key, value, status);
if (TF_GetCode(status) == TF_OK) {
InsertLocked(key, *value);
}
}
void Clear() {
absl::MutexLock lock(&mu_);
cache_.clear();
lru_list_.clear();
}
uint64_t max_age() const { return max_age_; }
size_t max_entries() const { return max_entries_; }
private:
struct Entry {
uint64_t timestamp;
T value;
std::list<std::string>::iterator lru_iterator;
};
bool LookupLocked(const std::string& key, T* value)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
auto it = cache_.find(key);
if (it == cache_.end()) {
return false;
}
lru_list_.erase(it->second.lru_iterator);
if (timer_seconds_() - it->second.timestamp > max_age_) {
cache_.erase(it);
return false;
}
*value = it->second.value;
lru_list_.push_front(it->first);
it->second.lru_iterator = lru_list_.begin();
return true;
}
void InsertLocked(const std::string& key, const T& value)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
lru_list_.push_front(key);
Entry entry{timer_seconds_(), value, lru_list_.begin()};
auto insert = cache_.insert(std::make_pair(key, entry));
if (!insert.second) {
lru_list_.erase(insert.first->second.lru_iterator);
insert.first->second = entry;
} else if (max_entries_ > 0 && cache_.size() > max_entries_) {
cache_.erase(lru_list_.back());
lru_list_.pop_back();
}
}
bool DeleteLocked(const std::string& key) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
auto it = cache_.find(key);
if (it == cache_.end()) {
return false;
}
lru_list_.erase(it->second.lru_iterator);
cache_.erase(it);
return true;
}
const uint64_t max_age_;
const size_t max_entries_;
std::function<uint64_t()> timer_seconds_;
absl::Mutex mu_;
std::map<std::string, Entry> cache_ ABSL_GUARDED_BY(mu_);
std::list<std::string> lru_list_ ABSL_GUARDED_BY(mu_);
};
}
#endif | #include "tensorflow/c/experimental/filesystem/plugins/gcs/expiring_lru_cache.h"
#include <memory>
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_status_internal.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/cloud/now_seconds_env.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(ExpiringLRUCacheTest, MaxAge) {
const string key = "a";
std::unique_ptr<NowSecondsEnv> env(new NowSecondsEnv);
tf_gcs_filesystem::ExpiringLRUCache<int> cache(
1, 0, [&env]() { return env->NowSeconds(); });
env->SetNowSeconds(1);
cache.Insert(key, 41);
env->SetNowSeconds(2);
cache.Insert(key, 42);
env->SetNowSeconds(3);
int value = 0;
EXPECT_TRUE(cache.Lookup(key, &value));
EXPECT_EQ(value, 42);
env->SetNowSeconds(4);
EXPECT_FALSE(cache.Lookup(key, &value));
cache.Insert(key, 43);
EXPECT_TRUE(cache.Lookup(key, &value));
EXPECT_EQ(value, 43);
env->SetNowSeconds(5);
value = 0;
EXPECT_TRUE(cache.Lookup(key, &value));
EXPECT_EQ(value, 43);
env->SetNowSeconds(6);
EXPECT_FALSE(cache.Lookup(key, &value));
}
TEST(ExpiringLRUCacheTest, MaxEntries) {
tf_gcs_filesystem::ExpiringLRUCache<int> cache1(0, 4);
cache1.Insert("a", 1);
int value = 0;
EXPECT_FALSE(cache1.Lookup("a", &value));
tf_gcs_filesystem::ExpiringLRUCache<int> cache2(1, 4);
cache2.Insert("a", 1);
cache2.Insert("b", 2);
cache2.Insert("c", 3);
cache2.Insert("d", 4);
EXPECT_TRUE(cache2.Lookup("a", &value));
EXPECT_EQ(value, 1);
EXPECT_TRUE(cache2.Lookup("b", &value));
EXPECT_EQ(value, 2);
EXPECT_TRUE(cache2.Lookup("c", &value));
EXPECT_EQ(value, 3);
EXPECT_TRUE(cache2.Lookup("d", &value));
EXPECT_EQ(value, 4);
cache2.Insert("e", 5);
EXPECT_FALSE(cache2.Lookup("a", &value));
EXPECT_TRUE(cache2.Lookup("b", &value));
EXPECT_EQ(value, 2);
EXPECT_TRUE(cache2.Lookup("c", &value));
EXPECT_EQ(value, 3);
EXPECT_TRUE(cache2.Lookup("d", &value));
EXPECT_EQ(value, 4);
EXPECT_TRUE(cache2.Lookup("e", &value));
EXPECT_EQ(value, 5);
}
TEST(ExpiringLRUCacheTest, LookupOrCompute) {
uint64 num_compute_calls = 0;
tf_gcs_filesystem::ExpiringLRUCache<int>::ComputeFunc compute_func =
[&num_compute_calls](const string& key, int* value, TF_Status* status) {
*value = num_compute_calls;
num_compute_calls++;
return TF_SetStatus(status, TF_OK, "");
};
tf_gcs_filesystem::ExpiringLRUCache<int> cache1(0, 4);
int value = -1;
TF_Status status;
cache1.LookupOrCompute("a", &value, compute_func, &status);
TF_EXPECT_OK(status.status);
EXPECT_EQ(value, 0);
EXPECT_EQ(num_compute_calls, 1);
cache1.LookupOrCompute("a", &value, compute_func, &status);
TF_EXPECT_OK(status.status);
EXPECT_EQ(value, 1);
EXPECT_EQ(num_compute_calls, 2);
tf_gcs_filesystem::ExpiringLRUCache<int> cache2(2, 4);
num_compute_calls = 0;
value = -1;
cache2.LookupOrCompute("a", &value, compute_func, &status);
TF_EXPECT_OK(status.status);
EXPECT_EQ(value, 0);
EXPECT_EQ(num_compute_calls, 1);
cache2.LookupOrCompute("a", &value, compute_func, &status);
TF_EXPECT_OK(status.status);
EXPECT_EQ(value, 0);
EXPECT_EQ(num_compute_calls, 1);
cache2.LookupOrCompute("b", &value, compute_func, &status);
TF_EXPECT_OK(status.status);
EXPECT_EQ(value, 1);
EXPECT_EQ(num_compute_calls, 2);
cache2.LookupOrCompute("c", &value, compute_func, &status);
TF_EXPECT_OK(status.status);
EXPECT_EQ(value, 2);
EXPECT_EQ(num_compute_calls, 3);
cache2.LookupOrCompute("d", &value, compute_func, &status);
TF_EXPECT_OK(status.status);
EXPECT_EQ(value, 3);
EXPECT_EQ(num_compute_calls, 4);
cache2.LookupOrCompute("e", &value, compute_func, &status);
TF_EXPECT_OK(status.status);
EXPECT_EQ(value, 4);
EXPECT_EQ(num_compute_calls, 5);
cache2.LookupOrCompute("b", &value, compute_func, &status);
TF_EXPECT_OK(status.status);
EXPECT_EQ(value, 1);
EXPECT_EQ(num_compute_calls, 5);
cache2.LookupOrCompute("c", &value, compute_func, &status);
TF_EXPECT_OK(status.status);
EXPECT_EQ(value, 2);
EXPECT_EQ(num_compute_calls, 5);
cache2.LookupOrCompute("d", &value, compute_func, &status);
TF_EXPECT_OK(status.status);
EXPECT_EQ(value, 3);
EXPECT_EQ(num_compute_calls, 5);
cache2.LookupOrCompute("a", &value, compute_func, &status);
TF_EXPECT_OK(status.status);
EXPECT_EQ(value, 5);
EXPECT_EQ(num_compute_calls, 6);
}
TEST(ExpiringLRUCacheTest, Clear) {
tf_gcs_filesystem::ExpiringLRUCache<int> cache(1, 4);
cache.Insert("a", 1);
cache.Insert("b", 2);
cache.Insert("c", 3);
cache.Insert("d", 4);
int value = 0;
EXPECT_TRUE(cache.Lookup("a", &value));
EXPECT_EQ(value, 1);
EXPECT_TRUE(cache.Lookup("b", &value));
EXPECT_EQ(value, 2);
EXPECT_TRUE(cache.Lookup("c", &value));
EXPECT_EQ(value, 3);
EXPECT_TRUE(cache.Lookup("d", &value));
EXPECT_EQ(value, 4);
cache.Clear();
EXPECT_FALSE(cache.Lookup("a", &value));
EXPECT_FALSE(cache.Lookup("b", &value));
EXPECT_FALSE(cache.Lookup("c", &value));
EXPECT_FALSE(cache.Lookup("d", &value));
}
TEST(ExpiringLRUCacheTest, Delete) {
tf_gcs_filesystem::ExpiringLRUCache<int> cache(1, 4);
cache.Insert("a", 1);
int value = 0;
EXPECT_TRUE(cache.Lookup("a", &value));
EXPECT_EQ(value, 1);
EXPECT_TRUE(cache.Delete("a"));
EXPECT_FALSE(cache.Lookup("a", &value));
EXPECT_FALSE(cache.Delete("a"));
EXPECT_FALSE(cache.Lookup("a", &value));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/filesystem/plugins/gcs/expiring_lru_cache.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/filesystem/plugins/gcs/expiring_lru_cache_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
dc4dd8f4-6c45-415b-b3d5-1c28a442ce4a | cpp | tensorflow/tensorflow | scoped_annotation | tensorflow/core/profiler/lib/scoped_annotation.h | third_party/xla/third_party/tsl/tsl/profiler/lib/scoped_annotation_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_LIB_SCOPED_ANNOTATION_H_
#define TENSORFLOW_CORE_PROFILER_LIB_SCOPED_ANNOTATION_H_
#include <stddef.h>
#include <atomic>
#include <string>
#include <string_view>
#include <utility>
#include "absl/base/macros.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/profiler/lib/scoped_annotation.h"
#if !defined(IS_MOBILE_PLATFORM)
#include "xla/tsl/profiler/backends/cpu/annotation_stack.h"
#endif
#ifndef ABSL_DEPRECATE_AND_INLINE
#define ABSL_DEPRECATE_AND_INLINE()
#endif
namespace tensorflow {
namespace profiler {
using ScopedAnnotation ABSL_DEPRECATE_AND_INLINE() =
tsl::profiler::ScopedAnnotation;
}
}
#endif | #include "tsl/profiler/lib/scoped_annotation.h"
#include <string>
#include "absl/strings/str_cat.h"
#include "xla/tsl/profiler/backends/cpu/annotation_stack.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace tsl {
namespace profiler {
namespace {
TEST(ScopedAnnotation, Simple) {
{
ScopedAnnotation trace("blah");
EXPECT_EQ(AnnotationStack::Get(), "");
}
{
AnnotationStack::Enable(true);
ScopedAnnotation trace("blah");
EXPECT_EQ(AnnotationStack::Get(), "blah");
AnnotationStack::Enable(false);
}
{
AnnotationStack::Enable(true);
ScopedAnnotation outer("foo");
ScopedAnnotation inner("bar");
EXPECT_EQ(AnnotationStack::Get(), "foo::bar");
AnnotationStack::Enable(false);
}
{
AnnotationStack::Enable(true);
PushAnnotation("foo");
PushAnnotation("bar");
EXPECT_EQ(AnnotationStack::Get(), "foo::bar");
PopAnnotation();
PopAnnotation();
AnnotationStack::Enable(false);
}
EXPECT_EQ(AnnotationStack::Get(), "");
}
std::string GenerateRandomString(int length) {
return std::string(length, 'a');
}
void BM_ScopedAnnotationDisabled(::testing::benchmark::State& state) {
const int annotation_size = state.range(0);
std::string annotation = GenerateRandomString(annotation_size);
for (auto s : state) {
ScopedAnnotation trace(annotation);
}
}
BENCHMARK(BM_ScopedAnnotationDisabled)->Arg(8)->Arg(32)->Arg(128);
void BM_ScopedAnnotationEnabled(::testing::benchmark::State& state) {
const int annotation_size = state.range(0);
std::string annotation = GenerateRandomString(annotation_size);
AnnotationStack::Enable(true);
for (auto s : state) {
ScopedAnnotation trace(annotation);
}
AnnotationStack::Enable(false);
}
BENCHMARK(BM_ScopedAnnotationEnabled)->Arg(8)->Arg(32)->Arg(128);
void BM_ScopedAnnotationEnabled_Nested(::testing::benchmark::State& state) {
const int annotation_size = state.range(0);
std::string annotation = GenerateRandomString(annotation_size);
AnnotationStack::Enable(true);
for (auto s : state) {
ScopedAnnotation trace(annotation);
{ ScopedAnnotation trace(annotation); }
}
AnnotationStack::Enable(false);
}
BENCHMARK(BM_ScopedAnnotationEnabled_Nested)->Arg(8)->Arg(32)->Arg(128);
void BM_ScopedAnnotationEnabled_Adhoc(::testing::benchmark::State& state) {
AnnotationStack::Enable(true);
int i = 0;
for (auto s : state) {
ScopedAnnotation trace(absl::StrCat(i, "-", i * i));
++i;
}
AnnotationStack::Enable(false);
}
BENCHMARK(BM_ScopedAnnotationEnabled_Adhoc);
void BM_ScopedAnnotationDisabled_Lambda(::testing::benchmark::State& state) {
int i = 0;
for (auto s : state) {
ScopedAnnotation trace([&]() { return absl::StrCat(i, "-", i * i); });
++i;
}
}
BENCHMARK(BM_ScopedAnnotationDisabled_Lambda);
void BM_ScopedAnnotationEnabled_Adhoc_Lambda(
::testing::benchmark::State& state) {
AnnotationStack::Enable(true);
int i = 0;
for (auto s : state) {
ScopedAnnotation trace([&]() { return absl::StrCat(i, "-", i * i); });
++i;
}
AnnotationStack::Enable(false);
}
BENCHMARK(BM_ScopedAnnotationEnabled_Adhoc_Lambda);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/lib/scoped_annotation.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/profiler/lib/scoped_annotation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
51393e4d-12b3-4607-9e39-c7ef2d2956e3 | cpp | tensorflow/tensorflow | traceme_encode | tensorflow/core/profiler/lib/traceme_encode.h | third_party/xla/third_party/tsl/tsl/profiler/lib/traceme_encode_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_LIB_TRACEME_ENCODE_H_
#define TENSORFLOW_CORE_PROFILER_LIB_TRACEME_ENCODE_H_
#include <string.h>
#include <initializer_list>
#include <string>
#include <utility>
#include "absl/base/macros.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tsl/profiler/lib/traceme_encode.h"
#ifndef ABSL_DEPRECATE_AND_INLINE
#define ABSL_DEPRECATE_AND_INLINE()
#endif
namespace tensorflow {
namespace profiler {
using TraceMeArg ABSL_DEPRECATE_AND_INLINE() =
tsl::profiler::TraceMeArg;
ABSL_DEPRECATE_AND_INLINE()
inline std::string TraceMeEncode(
std::string name, std::initializer_list<tsl::profiler::TraceMeArg> args) {
return tsl::profiler::TraceMeEncode(std::move(name), args);
}
ABSL_DEPRECATE_AND_INLINE()
inline std::string TraceMeEncode(
absl::string_view name,
std::initializer_list<tsl::profiler::TraceMeArg> args) {
return tsl::profiler::TraceMeEncode(name, args);
}
ABSL_DEPRECATE_AND_INLINE()
inline std::string TraceMeEncode(
const char* name, std::initializer_list<tsl::profiler::TraceMeArg> args) {
return tsl::profiler::TraceMeEncode(name, args);
}
ABSL_DEPRECATE_AND_INLINE()
inline std::string TraceMeEncode(
std::initializer_list<tsl::profiler::TraceMeArg> args) {
return tsl::profiler::TraceMeEncode(args);
}
ABSL_DEPRECATE_AND_INLINE()
inline std::string TraceMeOp(absl::string_view op_name,
absl::string_view op_type) {
return tsl::profiler::TraceMeOp(op_name, op_type);
}
ABSL_DEPRECATE_AND_INLINE()
inline std::string TraceMeOp(const char* op_name, const char* op_type) {
return tsl::profiler::TraceMeOp(op_name, op_type);
}
ABSL_DEPRECATE_AND_INLINE()
inline std::string TraceMeOp(std::string&& op_name, absl::string_view op_type) {
return tsl::profiler::TraceMeOp(op_name, op_type);
}
ABSL_DEPRECATE_AND_INLINE()
inline std::string TraceMeOpOverride(absl::string_view op_name,
absl::string_view op_type) {
return tsl::profiler::TraceMeOpOverride(op_name, op_type);
}
ABSL_DEPRECATE_AND_INLINE()
inline std::string TraceMeOpOverride(const char* op_name, const char* op_type) {
return tsl::profiler::TraceMeOpOverride(op_name, op_type);
}
}
}
#endif | #include "tsl/profiler/lib/traceme_encode.h"
#include <string>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "tsl/platform/platform.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace tsl {
namespace profiler {
namespace {
TEST(TraceMeEncodeTest, NoArgTest) {
EXPECT_EQ(TraceMeEncode("Hello!", {}), "Hello!");
}
TEST(TraceMeEncodeTest, OneArgTest) {
EXPECT_EQ(TraceMeEncode("Hello", {{"context", "World"}}),
"Hello#context=World#");
}
TEST(TraceMeEncodeTest, TwoArgsTest) {
EXPECT_EQ(TraceMeEncode("Hello", {{"context", "World"}, {"request_id", 42}}),
"Hello#context=World,request_id=42#");
}
TEST(TraceMeEncodeTest, ThreeArgsTest) {
EXPECT_EQ(TraceMeEncode("Hello", {{"context", "World"},
{"request_id", 42},
{"addr", absl::Hex(0xdeadbeef)}}),
"Hello#context=World,request_id=42,addr=deadbeef#");
}
#if !defined(PLATFORM_WINDOWS)
TEST(TraceMeEncodeTest, TemporaryStringTest) {
EXPECT_EQ(TraceMeEncode("Hello", {{std::string("context"),
absl::StrCat("World:", 2020)}}),
"Hello#context=World:2020#");
}
#endif
#if defined(PLATFORM_GOOGLE)
struct Point {
template <typename Sink>
friend void AbslStringify(Sink& sink, const Point& p) {
absl::Format(&sink, "(%d, %d)", p.x, p.y);
}
int x;
int y;
};
TEST(TraceMeEncodeTest, AbslStringifyTest) {
EXPECT_EQ(TraceMeEncode("Plot", {{"point", Point{10, 20}}}),
"Plot#point=(10, 20)#");
}
#endif
TEST(TraceMeEncodeTest, NoNameTest) {
EXPECT_EQ(TraceMeEncode({{"context", "World"}, {"request_id", 42}}),
"#context=World,request_id=42#");
}
}
void BM_TraceMeEncode(::testing::benchmark::State& state) {
for (auto s : state) {
TraceMeEncode(
"MyTestEvent",
{{"Lorem ipsum dolor sit amet", 1},
{"consectetur adipiscing elit", 2},
{"sed do eiusmod tempor incididunt", 3.52},
{"ut labore et dolore magna aliqua", "Ut enim ad minim veniam"},
{"quis nostrud exercitation ullamco", "laboris nisi ut aliquip ex"},
{"ea commodo consequat.", 11111.1111},
{"Duis aute", 1234567890},
{"irure dolor in", " reprehenderit in voluptate"},
{"velit esse cillum dolore", "eu fugiat nulla pariatur."},
{"Excepteur sint", "occaecat cupidatat non proident, sunt in"},
{"culpa qui officia", "deserunt mollit anim id est laborum."}});
}
}
BENCHMARK(BM_TraceMeEncode);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/lib/traceme_encode.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/profiler/lib/traceme_encode_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8745af98-1afa-496d-808c-672e91d24dea | cpp | tensorflow/tensorflow | dtensor_operation | tensorflow/dtensor/cc/dtensor_operation.h | tensorflow/dtensor/tests/dtensor_operation_test.cc | #ifndef TENSORFLOW_DTENSOR_CC_DTENSOR_OPERATION_H_
#define TENSORFLOW_DTENSOR_CC_DTENSOR_OPERATION_H_
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/dtensor/cc/tensor_layout.h"
namespace tensorflow {
namespace dtensor {
struct DTensorOperation {
const char* name;
const FunctionDef* function_def;
const Mesh default_mesh;
const StackTracesMap& stack_traces;
inline bool is_func() const { return function_def != nullptr; }
inline bool is_pure() const {
if (is_func()) {
return false;
}
const OpDef* op_def = nullptr;
Status status = OpRegistry::Global()->LookUpOpDef(name, &op_def);
DCHECK(status.ok());
if (!status.ok()) {
return false;
}
return !op_def->is_stateful();
}
};
}
}
#endif | #include "tensorflow/dtensor/cc/dtensor_operation.h"
#include <memory>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/op.h"
namespace tensorflow {
namespace dtensor {
namespace {
REGISTER_OP("OutputResource").Output("resource: resource");
REGISTER_OP("InputResource").Input("resource: resource");
REGISTER_OP("Stateful").SetIsStateful();
REGISTER_OP("Pure");
TEST(DTensorOperationTest, TestEagerIsNotPure) {
DTensorOperation output{"OutputResource", nullptr, {}, {}};
DTensorOperation input{"InputResource", nullptr, {}, {}};
DTensorOperation stateful{"Stateful", nullptr, {}, {}};
DTensorOperation pure{"Pure", nullptr, {}, {}};
EXPECT_FALSE(output.is_pure());
EXPECT_FALSE(input.is_pure());
EXPECT_FALSE(stateful.is_pure());
EXPECT_TRUE(pure.is_pure());
}
TEST(DTensorOperationTest, TestFunctionIsNotPure) {
FunctionDef fdef;
DTensorOperation op{"func", &fdef, {}, {}};
EXPECT_FALSE(op.is_pure());
}
TEST(DTensorOperationTest, TestIsFunc) {
FunctionDef fdef;
DTensorOperation func_op{"func", &fdef, {}, {}};
DTensorOperation eager_op{"Pure", nullptr, {}, {}};
EXPECT_TRUE(func_op.is_func());
EXPECT_FALSE(eager_op.is_func());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/dtensor/cc/dtensor_operation.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/dtensor/tests/dtensor_operation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c9a4925f-1dc5-4ac0-9aa6-484ddcff8724 | cpp | tensorflow/tensorflow | composable_splitter | tensorflow/tools/proto_splitter/cc/composable_splitter.h | tensorflow/tools/proto_splitter/cc/composable_splitter_test.cc | #ifndef TENSORFLOW_TOOLS_PROTO_SPLITTER_CC_COMPOSABLE_SPLITTER_H_
#define TENSORFLOW_TOOLS_PROTO_SPLITTER_CC_COMPOSABLE_SPLITTER_H_
#include <vector>
#include "tensorflow/tools/proto_splitter/cc/composable_splitter_base.h"
#include "tensorflow/tools/proto_splitter/cc/util.h"
#include "tensorflow/tools/proto_splitter/chunk.pb.h"
#include "tsl/platform/protobuf.h"
namespace tensorflow {
namespace tools::proto_splitter {
class ComposableSplitter : public ComposableSplitterBase {
public:
explicit ComposableSplitter(tsl::protobuf::Message* message)
: ComposableSplitterBase(message), message_(message) {}
explicit ComposableSplitter(tsl::protobuf::Message* message,
ComposableSplitterBase* parent_splitter,
std::vector<FieldType>* fields_in_parent)
: ComposableSplitterBase(message, parent_splitter, fields_in_parent),
message_(message) {}
protected:
tsl::protobuf::Message* message() { return message_; }
private:
tsl::protobuf::Message* message_;
};
}
}
#endif | #include "tensorflow/tools/proto_splitter/cc/composable_splitter.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "riegeli/base/maker.h"
#include "riegeli/bytes/cord_reader.h"
#include "riegeli/bytes/fd_reader.h"
#include "riegeli/bytes/string_reader.h"
#include "riegeli/records/record_reader.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/file_system_helper.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/tools/proto_splitter/cc/test_util.h"
#include "tensorflow/tools/proto_splitter/cc/util.h"
#include "tensorflow/tools/proto_splitter/chunk.pb.h"
#include "tensorflow/tools/proto_splitter/testdata/test_message.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#define IS_OSS true
namespace tensorflow {
namespace tools::proto_splitter {
namespace {
using ::tensorflow::proto_splitter::ChunkedMessage;
using ::tensorflow::proto_splitter::ChunkMetadata;
using ::tensorflow::proto_splitter_testdata::RepeatedRepeatedString;
using ::tensorflow::proto_splitter_testdata::RepeatedString;
using ::testing::HasSubstr;
using ::testing::SizeIs;
using tsl::testing::StatusIs;
using namespace std::string_literals;
class RepeatedStringSplitter : public ComposableSplitter {
friend class ComposableSplitter;
public:
using ComposableSplitter::ComposableSplitter;
absl::Status BuildChunks() override {
RepeatedString* repeated_string =
tsl::protobuf::DynamicCastToGenerated<RepeatedString>(message());
auto strings = repeated_string->strings();
if (strings.empty()) {
TF_RETURN_IF_ERROR(SetMessageAsBaseChunk());
return absl::OkStatus();
}
for (int i = 0; i < strings.size(); i++) {
auto s = std::make_unique<MessageBytes>(strings[i]);
std::vector<FieldType> fields = {"strings"s, i};
TF_RETURN_IF_ERROR(AddChunk(std::move(s), &fields));
}
return absl::OkStatus();
}
};
RepeatedString SetUpRepeatedString(std::vector<string> strings) {
RepeatedString message;
*message.mutable_strings() = {strings.begin(), strings.end()};
return message;
}
TEST(RepeatedStringSplitterTest, TestSplitChunks) {
std::vector<string> strings = {"piece-1", "piece-2", "piece-3"};
auto message = SetUpRepeatedString(strings);
RepeatedStringSplitter splitter = RepeatedStringSplitter(&message);
TF_ASSERT_OK_AND_ASSIGN(auto ret, splitter.Split());
std::vector<MessageBytes>* chunks = ret.chunks;
ASSERT_NE(chunks, nullptr);
ChunkedMessage* chunked_message = ret.chunked_message;
ASSERT_NE(chunked_message, nullptr);
for (int i = 0; i < chunks->size(); i++) {
MessageBytes chunk = (*chunks)[i];
EXPECT_THAT(chunk, ::testing::VariantWith<std::string>(strings[i]));
}
EXPECT_THAT(*chunked_message, EqualsProto(R"pb(chunked_fields {
field_tag { field: 1 }
field_tag { index: 0 }
message { chunk_index: 0 }
}
chunked_fields {
field_tag { field: 1 }
field_tag { index: 1 }
message { chunk_index: 1 }
}
chunked_fields {
field_tag { field: 1 }
field_tag { index: 2 }
message { chunk_index: 2 }
})pb"));
TF_ASSERT_OK_AND_ASSIGN(auto ret2, splitter.Split());
std::vector<MessageBytes>* chunks2 = ret2.chunks;
ChunkedMessage* chunked_message2 = ret2.chunked_message;
EXPECT_EQ(chunks2, chunks);
EXPECT_EQ(chunked_message2, chunked_message);
}
static void CheckChunks(riegeli::RecordReaderBase& reader,
std::vector<string>& strings) {
ChunkMetadata chunk_metadata;
reader.Seek(reader.Size().value());
reader.SeekBack();
reader.ReadRecord(chunk_metadata);
auto& chunk_info = chunk_metadata.chunks();
EXPECT_EQ(chunk_info.size(), strings.size());
for (int i = 0; i < chunk_info.size(); i++) {
reader.Seek(chunk_info[i].offset());
absl::string_view chunk;
reader.ReadRecord(chunk);
EXPECT_EQ(strings[i], std::string(chunk));
}
EXPECT_THAT(chunk_metadata.message(),
EqualsProto(R"pb(chunked_fields {
field_tag { field: 1 }
field_tag { index: 0 }
message { chunk_index: 0 }
}
chunked_fields {
field_tag { field: 1 }
field_tag { index: 1 }
message { chunk_index: 1 }
}
chunked_fields {
field_tag { field: 1 }
field_tag { index: 2 }
message { chunk_index: 2 }
})pb"));
}
TEST(RepeatedStringSplitterTest, TestWrite) {
std::vector<string> strings = {"piece-1", "piece-2", "piece-3"};
auto message = SetUpRepeatedString(strings);
RepeatedStringSplitter splitter = RepeatedStringSplitter(&message);
std::string output_prefix = tensorflow::io::GetTempFilename("");
TF_ASSERT_OK(splitter.Write(output_prefix));
std::string expected_file = absl::StrCat(output_prefix, ".cpb");
TF_ASSERT_OK_AND_ASSIGN(auto exists,
internal::FileExists(Env::Default(), expected_file));
EXPECT_TRUE(exists);
riegeli::RecordReader file_reader(
riegeli::Maker<riegeli::FdReader>(std::move(expected_file)));
CheckChunks(file_reader, strings);
}
TEST(RepeatedStringSplitterTest, TestWriteToString) {
std::vector<string> strings = {"piece-1", "piece-2", "piece-3"};
auto message = SetUpRepeatedString(strings);
RepeatedStringSplitter splitter = RepeatedStringSplitter(&message);
auto string_output_results = splitter.WriteToString();
TF_EXPECT_OK(string_output_results.status());
std::string string_output = std::get<0>(string_output_results.value());
bool is_chunked = std::get<1>(string_output_results.value());
EXPECT_TRUE(is_chunked);
riegeli::RecordReader string_reader(
riegeli::Maker<riegeli::StringReader>(string_output));
CheckChunks(string_reader, strings);
}
#if !IS_OSS
TEST(RepeatedStringSplitterTest, TestWriteToCord) {
std::vector<string> strings = {"piece-1", "piece-2", "piece-3"};
auto message = SetUpRepeatedString(strings);
RepeatedStringSplitter splitter = RepeatedStringSplitter(&message);
auto cord_output_results = splitter.WriteToCord();
TF_EXPECT_OK(cord_output_results.status());
absl::Cord cord_output = std::get<0>(cord_output_results.value());
bool is_chunked = std::get<1>(cord_output_results.value());
EXPECT_TRUE(is_chunked);
riegeli::RecordReader cord_reader(
riegeli::Maker<riegeli::CordReader>(&cord_output));
CheckChunks(cord_reader, strings);
}
#endif
TEST(RepeatedStringSplitterTest, TestNoSplit) {
RepeatedString message;
RepeatedStringSplitter splitter = RepeatedStringSplitter(&message);
TF_ASSERT_OK_AND_ASSIGN(auto ret, splitter.Split());
std::vector<MessageBytes>* chunks = ret.chunks;
ASSERT_NE(chunks, nullptr);
ChunkedMessage* chunked_message = ret.chunked_message;
ASSERT_NE(chunked_message, nullptr);
EXPECT_THAT(*chunks, SizeIs(1));
EXPECT_THAT(*std::get<tsl::protobuf::Message*>((*chunks)[0]),
EqualsProto(""));
EXPECT_THAT(*chunked_message, EqualsProto(R"pb(chunk_index: 0)pb"));
}
class RepeatedRepeatedStringSplitter : public ComposableSplitter {
public:
using ComposableSplitter::ComposableSplitter;
absl::Status BuildChunks() override {
TF_RETURN_IF_ERROR(SetMessageAsBaseChunk());
RepeatedRepeatedString* msg =
tsl::protobuf::DynamicCastToGenerated<RepeatedRepeatedString>(
message());
auto repeated_strings = msg->rs();
for (int i = 0; i < repeated_strings.size(); i++) {
std::vector<FieldType> fields = {"rs"s, i};
auto splitter =
RepeatedStringSplitter(&repeated_strings[i], this, &fields);
TF_RETURN_IF_ERROR(splitter.BuildChunks());
}
return absl::OkStatus();
}
};
TEST(ComposableTest, RepeatedRepeatedStringTest) {
std::vector<string> strings1 = {"piece-1", "piece-2", "piece-3"};
auto rs1 = SetUpRepeatedString(strings1);
std::vector<string> strings2 = {"new-strings-1"};
auto rs2 = SetUpRepeatedString(strings2);
std::vector<string> strings3 = {"foo-1", "foo-2"};
auto rs3 = SetUpRepeatedString(strings3);
std::vector<RepeatedString> rs = {rs1, rs2, rs3};
RepeatedRepeatedString message;
message.mutable_rs()->Add(rs.begin(), rs.end());
RepeatedRepeatedStringSplitter splitter =
RepeatedRepeatedStringSplitter(&message);
TF_ASSERT_OK_AND_ASSIGN(auto ret, splitter.Split());
std::vector<MessageBytes>* chunks = ret.chunks;
ASSERT_NE(chunks, nullptr);
ChunkedMessage* chunked_message = ret.chunked_message;
ASSERT_NE(chunked_message, nullptr);
std::vector<string> expected_chunks = {"piece-1", "piece-2", "piece-3",
"new-strings-1", "foo-1", "foo-2"};
EXPECT_THAT(*chunks, SizeIs(7));
EXPECT_THAT(*std::get<tsl::protobuf::Message*>((*chunks)[0]),
EqualsProto(message));
for (int i = 1; i < chunks->size(); i++) {
MessageBytes chunk = (*chunks)[i];
EXPECT_THAT(chunk,
::testing::VariantWith<std::string>(expected_chunks[i - 1]));
}
EXPECT_THAT(chunked_message->chunked_fields()[4],
EqualsProto(R"pb(field_tag { field: 2 }
field_tag { index: 2 }
field_tag { field: 1 }
field_tag { index: 0 }
message { chunk_index: 5 })pb"));
}
TEST(ComposableTest, ChildSplitterTest) {
std::vector<string> strings1 = {"piece-1", "piece-2", "piece-3"};
auto message1 = SetUpRepeatedString(strings1);
RepeatedStringSplitter splitter(&message1);
std::vector<FieldType> fields = {};
std::vector<string> strings2 = {"s1", "s2"};
auto message2 = SetUpRepeatedString(strings2);
RepeatedStringSplitter child(&message2, &splitter, &fields);
TF_EXPECT_OK(child.BuildChunks());
TF_ASSERT_OK_AND_ASSIGN(auto ret, splitter.Split());
std::vector<MessageBytes>* chunks = ret.chunks;
ASSERT_NE(chunks, nullptr);
EXPECT_THAT(*chunks, SizeIs(5));
}
TEST(ComposableTest, ChildSplitterUnimplementedTest) {
RepeatedString message;
RepeatedStringSplitter splitter(&message);
std::vector<FieldType> fields = {};
RepeatedStringSplitter child(&message, &splitter, &fields);
EXPECT_THAT(child.Split(), StatusIs(absl::StatusCode::kUnimplemented,
HasSubstr("`Split` function behavior")));
EXPECT_THAT(child.Write("str"),
StatusIs(absl::StatusCode::kUnimplemented,
HasSubstr("`Write` function behavior")));
}
class NoOpSplitter : public ComposableSplitter {
public:
using ComposableSplitter::ComposableSplitter;
absl::Status BuildChunks() override { return absl::OkStatus(); }
};
TEST(NoOpSplitterTest, TestWrite) {
std::vector<string> strings = {"piece-1", "piece-2", "piece-3"};
auto message = SetUpRepeatedString(strings);
NoOpSplitter splitter(&message);
std::string output_prefix = tensorflow::io::GetTempFilename("");
TF_ASSERT_OK(splitter.Write(output_prefix));
std::string expected_file = absl::StrCat(output_prefix, ".pb");
TF_ASSERT_OK_AND_ASSIGN(auto exists,
internal::FileExists(Env::Default(), expected_file));
EXPECT_TRUE(exists);
RepeatedString read_message;
auto status = tensorflow::ReadBinaryProto(tensorflow::Env::Default(),
expected_file, &read_message);
EXPECT_THAT(read_message, EqualsProto(message));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/proto_splitter/cc/composable_splitter.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/proto_splitter/cc/composable_splitter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9ed3e975-ab7f-4ee0-99bc-cac0be8541df | cpp | tensorflow/tensorflow | eigen_backward_cuboid_convolutions | tensorflow/core/kernels/eigen_backward_cuboid_convolutions.h | tensorflow/core/kernels/eigen_backward_cuboid_convolutions_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_EIGEN_BACKWARD_CUBOID_CONVOLUTIONS_H_
#define TENSORFLOW_CORE_KERNELS_EIGEN_BACKWARD_CUBOID_CONVOLUTIONS_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/kernels/eigen_cuboid_convolution.h"
namespace Eigen {
template <typename OutputBackward, typename Kernel>
EIGEN_ALWAYS_INLINE static const std::conditional_t<
internal::traits<OutputBackward>::Layout == ColMajor,
TensorReshapingOp<
const DSizes<typename internal::traits<OutputBackward>::Index,
internal::traits<OutputBackward>::NumDimensions>,
const TensorContractionOp<
const array<
IndexPair<typename internal::traits<OutputBackward>::Index>, 1>,
const Eigen::TensorForcedEvalOp<const TensorReshapingOp<
const DSizes<typename internal::traits<OutputBackward>::Index,
2>,
const TensorShufflingOp<
const array<
typename internal::traits<OutputBackward>::Index, 5>,
const TensorReverseOp<const Eigen::array<bool, 5>,
const Kernel>>>>,
const TensorReshapingOp<
const DSizes<typename internal::traits<OutputBackward>::Index,
2>,
const TensorVolumePatchOp<Dynamic, Dynamic, Dynamic,
const OutputBackward>>>>,
TensorReshapingOp<
const DSizes<typename internal::traits<OutputBackward>::Index,
internal::traits<OutputBackward>::NumDimensions>,
const TensorContractionOp<
const array<
IndexPair<typename internal::traits<OutputBackward>::Index>, 1>,
const TensorReshapingOp<
const DSizes<typename internal::traits<OutputBackward>::Index,
2>,
const TensorVolumePatchOp<Dynamic, Dynamic, Dynamic,
const OutputBackward>>,
const Eigen::TensorForcedEvalOp<const TensorReshapingOp<
const DSizes<typename internal::traits<OutputBackward>::Index,
2>,
const TensorShufflingOp<
const array<
typename internal::traits<OutputBackward>::Index, 5>,
const TensorReverseOp<const Eigen::array<bool, 5>,
const Kernel>>>>>>>
CuboidConvolutionBackwardInput(
const Kernel& kernel, const OutputBackward& output_backward,
typename internal::traits<OutputBackward>::Index inputPlanes,
typename internal::traits<OutputBackward>::Index inputRows,
typename internal::traits<OutputBackward>::Index inputCols,
const DenseIndex plane_stride = 1, const DenseIndex row_stride = 1,
const DenseIndex col_stride = 1) {
typedef typename internal::traits<OutputBackward>::Index TensorIndex;
const TensorRef<const Tensor<typename internal::traits<Kernel>::Scalar,
internal::traits<Kernel>::NumDimensions,
internal::traits<Kernel>::Layout, TensorIndex>>
kern(kernel);
const TensorRef<
const Tensor<typename internal::traits<OutputBackward>::Scalar,
internal::traits<OutputBackward>::NumDimensions,
internal::traits<OutputBackward>::Layout, TensorIndex>>
out(output_backward);
EIGEN_STATIC_ASSERT(internal::traits<Kernel>::Layout ==
internal::traits<OutputBackward>::Layout,
YOU_MADE_A_PROGRAMMING_MISTAKE);
static const bool isColMajor =
(internal::traits<OutputBackward>::Layout == ColMajor);
static const int NumDims = internal::traits<OutputBackward>::NumDimensions;
const TensorIndex kernelFilters =
isColMajor ? kern.dimensions()[0] : kern.dimensions()[4];
const TensorIndex kernelChannels =
isColMajor ? kern.dimensions()[1] : kern.dimensions()[3];
const TensorIndex kernelPlanes =
isColMajor ? kern.dimensions()[2] : kern.dimensions()[2];
const TensorIndex kernelRows =
isColMajor ? kern.dimensions()[3] : kern.dimensions()[1];
const TensorIndex kernelCols =
isColMajor ? kern.dimensions()[4] : kern.dimensions()[0];
const TensorIndex outputPlanes =
isColMajor ? out.dimensions()[1] : out.dimensions()[NumDims - 2];
const TensorIndex outputRows =
isColMajor ? out.dimensions()[2] : out.dimensions()[NumDims - 3];
const TensorIndex outputCols =
isColMajor ? out.dimensions()[3] : out.dimensions()[NumDims - 4];
const TensorIndex kernelPlanesEff = kernelPlanes;
const TensorIndex kernelRowsEff = kernelRows;
const TensorIndex kernelColsEff = kernelCols;
const TensorIndex forward_pad_top_z = numext::maxi<Index>(
0,
((outputPlanes - 1) * plane_stride + kernelPlanesEff - inputPlanes) / 2);
const TensorIndex forward_pad_top = numext::maxi<Index>(
0, ((outputRows - 1) * row_stride + kernelRowsEff - inputRows) / 2);
const TensorIndex forward_pad_left = numext::maxi<Index>(
0, ((outputCols - 1) * col_stride + kernelColsEff - inputCols) / 2);
const TensorIndex padding_top_z = kernelPlanesEff - 1 - forward_pad_top_z;
const TensorIndex padding_top = kernelRowsEff - 1 - forward_pad_top;
const TensorIndex padding_left = kernelColsEff - 1 - forward_pad_left;
const TensorIndex padding_bottom_z = inputPlanes -
(outputPlanes - 1) * plane_stride - 2 -
padding_top_z + kernelPlanesEff;
const TensorIndex padding_bottom = inputRows - (outputRows - 1) * row_stride -
2 - padding_top + kernelRowsEff;
const TensorIndex padding_right = inputCols - (outputCols - 1) * col_stride -
2 - padding_left + kernelColsEff;
eigen_assert(padding_top_z >= 0);
eigen_assert(padding_top >= 0);
eigen_assert(padding_left >= 0);
eigen_assert(padding_bottom_z >= 0);
eigen_assert(padding_bottom >= 0);
eigen_assert(padding_right >= 0);
Eigen::array<bool, 5> kernel_reverse;
if (isColMajor) {
kernel_reverse[0] = false;
kernel_reverse[1] = false;
kernel_reverse[2] = true;
kernel_reverse[3] = true;
kernel_reverse[4] = true;
} else {
kernel_reverse[0] = true;
kernel_reverse[1] = true;
kernel_reverse[2] = true;
kernel_reverse[3] = false;
kernel_reverse[4] = false;
}
array<TensorIndex, 5> kernel_shuffle;
if (isColMajor) {
kernel_shuffle[0] = 0;
kernel_shuffle[1] = 2;
kernel_shuffle[2] = 3;
kernel_shuffle[3] = 4;
kernel_shuffle[4] = 1;
} else {
kernel_shuffle[0] = 3;
kernel_shuffle[1] = 0;
kernel_shuffle[2] = 1;
kernel_shuffle[3] = 2;
kernel_shuffle[4] = 4;
}
DSizes<TensorIndex, 2> kernel_dims;
if (isColMajor) {
kernel_dims[0] = kernelFilters * kernelPlanes * kernelRows * kernelCols;
kernel_dims[1] = kernelChannels;
} else {
kernel_dims[1] = kernelFilters * kernelPlanes * kernelRows * kernelCols;
kernel_dims[0] = kernelChannels;
}
DSizes<TensorIndex, 2> pre_contract_dims;
if (isColMajor) {
pre_contract_dims[0] =
kernelFilters * kernelPlanes * kernelRows * kernelCols;
pre_contract_dims[1] = inputPlanes * inputRows * inputCols;
for (int i = 4; i < NumDims; ++i) {
pre_contract_dims[1] *= out.dimension(i);
}
} else {
pre_contract_dims[1] =
kernelFilters * kernelPlanes * kernelRows * kernelCols;
pre_contract_dims[0] = inputPlanes * inputRows * inputCols;
for (int i = 0; i < NumDims - 4; ++i) {
pre_contract_dims[0] *= out.dimension(i);
}
}
array<IndexPair<TensorIndex>, 1> contract_dims;
if (isColMajor) {
contract_dims[0] = IndexPair<TensorIndex>(0, 0);
} else {
contract_dims[0] = IndexPair<TensorIndex>(1, 1);
}
DSizes<TensorIndex, NumDims> post_contract_dims;
if (isColMajor) {
post_contract_dims[0] = kernelChannels;
post_contract_dims[1] = inputPlanes;
post_contract_dims[2] = inputRows;
post_contract_dims[3] = inputCols;
for (int i = 4; i < NumDims; ++i) {
post_contract_dims[i] = out.dimension(i);
}
} else {
post_contract_dims[NumDims - 1] = kernelChannels;
post_contract_dims[NumDims - 2] = inputPlanes;
post_contract_dims[NumDims - 3] = inputRows;
post_contract_dims[NumDims - 4] = inputCols;
for (int i = 0; i < NumDims - 4; ++i) {
post_contract_dims[i] = out.dimension(i);
}
}
return choose(
Cond<internal::traits<OutputBackward>::Layout == ColMajor>(),
kernel.reverse(kernel_reverse)
.shuffle(kernel_shuffle)
.reshape(kernel_dims)
.eval()
.contract(output_backward
.extract_volume_patches(
kernelPlanes, kernelRows, kernelCols, 1, 1, 1,
plane_stride, row_stride, col_stride, padding_top_z,
padding_bottom_z, padding_top, padding_bottom,
padding_left, padding_right)
.reshape(pre_contract_dims),
contract_dims)
.reshape(post_contract_dims),
output_backward
.extract_volume_patches(kernelPlanes, kernelRows, kernelCols, 1, 1, 1,
plane_stride, row_stride, col_stride,
padding_top_z, padding_bottom_z, padding_top,
padding_bottom, padding_left, padding_right)
.reshape(pre_contract_dims)
.contract(kernel.reverse(kernel_reverse)
.shuffle(kernel_shuffle)
.reshape(kernel_dims)
.eval(),
contract_dims)
.reshape(post_contract_dims));
}
template <typename OutputBackward, typename Input>
EIGEN_ALWAYS_INLINE static const std::conditional_t<
internal::traits<Input>::Layout == ColMajor,
const TensorReverseOp<
const Eigen::array<typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const Eigen::TensorShufflingOp<
const Eigen::array<typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const Eigen::TensorReshapingOp<
const Eigen::DSizes<typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const TensorContractionOp<
const array<
IndexPair<typename internal::traits<Input>::Index>, 1>,
const Eigen::TensorForcedEvalOp<const TensorReshapingOp<
const DSizes<typename internal::traits<Input>::Index,
2>,
const Eigen::TensorShufflingOp<
const Eigen::array<
typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const OutputBackward>>>,
const TensorReshapingOp<
const DSizes<typename internal::traits<Input>::Index,
2>,
const TensorVolumePatchOp<
Dynamic, Dynamic, Dynamic,
const Eigen::TensorForcedEvalOp<
const Eigen::TensorShufflingOp<
const Eigen::array<
typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const Input>>>>>>>>,
const TensorReverseOp<
const Eigen::array<typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const Eigen::TensorShufflingOp<
const Eigen::array<typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const Eigen::TensorReshapingOp<
const Eigen::DSizes<typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const TensorContractionOp<
const array<
IndexPair<typename internal::traits<Input>::Index>, 1>,
const TensorReshapingOp<
const DSizes<typename internal::traits<Input>::Index,
2>,
const TensorVolumePatchOp<
Dynamic, Dynamic, Dynamic,
const Eigen::TensorForcedEvalOp<
const Eigen::TensorShufflingOp<
const Eigen::array<
typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const Input>>>>,
const Eigen::TensorForcedEvalOp<const TensorReshapingOp<
const DSizes<typename internal::traits<Input>::Index,
2>,
const Eigen::TensorShufflingOp<
const Eigen::array<
typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const OutputBackward>>>>>>>>
CuboidConvolutionBackwardKernel(
const Input& input, const OutputBackward& output_backward,
typename internal::traits<Input>::Index kernelPlanes,
typename internal::traits<Input>::Index kernelRows,
typename internal::traits<Input>::Index kernelCols,
const DenseIndex stridePlanes = 1, const DenseIndex strideRows = 1,
const DenseIndex strideCols = 1) {
typedef typename internal::traits<Input>::Index TensorIndex;
TensorRef<Tensor<typename internal::traits<Input>::Scalar,
internal::traits<Input>::NumDimensions,
internal::traits<Input>::Layout, TensorIndex>>
in(input);
TensorRef<Tensor<typename internal::traits<OutputBackward>::Scalar,
internal::traits<OutputBackward>::NumDimensions,
internal::traits<OutputBackward>::Layout, TensorIndex>>
out(output_backward);
EIGEN_STATIC_ASSERT(internal::traits<Input>::Layout ==
internal::traits<OutputBackward>::Layout,
YOU_MADE_A_PROGRAMMING_MISTAKE);
static const bool isColMajor = (internal::traits<Input>::Layout == ColMajor);
static const int NumDims = internal::traits<Input>::NumDimensions;
EIGEN_STATIC_ASSERT(internal::traits<Input>::NumDimensions ==
internal::traits<OutputBackward>::NumDimensions,
YOU_MADE_A_PROGRAMMING_MISTAKE);
EIGEN_STATIC_ASSERT(internal::traits<Input>::NumDimensions == 5,
YOU_MADE_A_PROGRAMMING_MISTAKE);
const TensorIndex inputPlanes =
isColMajor ? in.dimension(1) : in.dimension(NumDims - 2);
const TensorIndex inputRows =
isColMajor ? in.dimension(2) : in.dimension(NumDims - 3);
const TensorIndex inputCols =
isColMajor ? in.dimension(3) : in.dimension(NumDims - 4);
const TensorIndex outputPlanes =
isColMajor ? out.dimension(1) : out.dimension(NumDims - 2);
const TensorIndex outputRows =
isColMajor ? out.dimension(2) : out.dimension(NumDims - 3);
const TensorIndex outputCols =
isColMajor ? out.dimension(3) : out.dimension(NumDims - 4);
const TensorIndex kernelFilters =
isColMajor ? out.dimension(0) : out.dimension(NumDims - 1);
const TensorIndex kernelChannels =
isColMajor ? in.dimension(0) : in.dimension(NumDims - 1);
const TensorIndex batch =
isColMajor ? in.dimension(4) : in.dimension(NumDims - 5);
const TensorIndex kernelPlanesEff = kernelPlanes;
const TensorIndex kernelRowsEff = kernelRows;
const TensorIndex kernelColsEff = kernelCols;
const TensorIndex padPlanes = numext::maxi<Index>(
0, (outputPlanes - 1) * stridePlanes + kernelPlanesEff - inputPlanes);
const TensorIndex padRows = numext::maxi<Index>(
0, (outputRows - 1) * strideRows + kernelRowsEff - inputRows);
const TensorIndex padCols = numext::maxi<Index>(
0, (outputCols - 1) * strideCols + kernelColsEff - inputCols);
const TensorIndex padding_top_z = padPlanes / 2;
const TensorIndex padding_top = padRows / 2;
const TensorIndex padding_left = padCols / 2;
const auto expanded_out_planes = (outputPlanes - 1) * stridePlanes + 1;
const auto expanded_out_rows = (outputRows - 1) * strideRows + 1;
const auto expanded_out_cols = (outputCols - 1) * strideCols + 1;
const auto padded_out_planes = inputPlanes + kernelPlanes - 1;
const auto padded_out_rows = inputRows + kernelRows - 1;
const auto padded_out_cols = inputCols + kernelCols - 1;
const auto top_pad_planes = kernelPlanes - 1 - padding_top_z;
const auto top_pad_rows = kernelRows - 1 - padding_top;
const auto left_pad_cols = kernelCols - 1 - padding_left;
const auto bottom_pad_planes =
padded_out_planes - expanded_out_planes - top_pad_planes;
const auto bottom_pad_rows =
padded_out_rows - expanded_out_rows - top_pad_rows;
const auto right_pad_cols =
padded_out_cols - expanded_out_cols - left_pad_cols;
array<TensorIndex, 5> output_backward_shuffle;
if (isColMajor) {
output_backward_shuffle = {4, 1, 2, 3, 0};
} else {
output_backward_shuffle = {4, 1, 2, 3, 0};
}
array<TensorIndex, 5> input_shuffle;
if (isColMajor) {
input_shuffle = {0, 4, 1, 2, 3};
} else {
input_shuffle = {1, 2, 3, 0, 4};
}
DSizes<TensorIndex, 2> input_dims;
if (isColMajor) {
input_dims[0] = kernelChannels;
input_dims[1] = batch * inputPlanes * inputRows * inputCols;
} else {
input_dims[1] = kernelChannels;
input_dims[0] = inputCols * inputRows * inputPlanes * batch;
}
DSizes<TensorIndex, 2> pre_contract_dims;
if (isColMajor) {
pre_contract_dims[0] = batch * inputPlanes * inputRows * inputCols;
pre_contract_dims[1] =
kernelPlanes * kernelRows * kernelCols * kernelFilters;
} else {
pre_contract_dims[1] = inputCols * inputRows * inputPlanes * batch;
pre_contract_dims[0] =
kernelFilters * kernelCols * kernelRows * kernelPlanes;
}
array<IndexPair<TensorIndex>, 1> contract_dims;
contract_dims[0] = IndexPair<TensorIndex>(1, 0);
DSizes<TensorIndex, NumDims> post_contract_dims;
if (isColMajor) {
post_contract_dims[0] = kernelChannels;
post_contract_dims[1] = kernelPlanes;
post_contract_dims[2] = kernelRows;
post_contract_dims[3] = kernelCols;
post_contract_dims[4] = kernelFilters;
} else {
post_contract_dims[0] = kernelFilters;
post_contract_dims[1] = kernelCols;
post_contract_dims[2] = kernelRows;
post_contract_dims[3] = kernelPlanes;
post_contract_dims[4] = kernelChannels;
}
array<TensorIndex, 5> kernel_shuffle;
if (isColMajor) {
kernel_shuffle = {4, 0, 1, 2, 3};
} else {
kernel_shuffle = {1, 2, 3, 4, 0};
}
array<TensorIndex, 5> kernel_reverse;
if (isColMajor) {
kernel_reverse = {false, false, true, true, true};
} else {
kernel_reverse = {true, true, true, false, false};
}
const auto the_input =
output_backward.shuffle(output_backward_shuffle).eval();
const auto the_kernel =
input.shuffle(input_shuffle).reshape(input_dims).eval();
return choose(Cond<internal::traits<Input>::Layout == ColMajor>(),
the_kernel.contract(
the_input
.extract_volume_patches(
inputPlanes, inputRows, inputCols, 1, 1, 1,
stridePlanes, strideRows, strideCols,
top_pad_planes, bottom_pad_planes, top_pad_rows,
bottom_pad_rows, left_pad_cols, right_pad_cols)
.reshape(pre_contract_dims),
contract_dims),
the_input
.extract_volume_patches(
inputPlanes, inputRows, inputCols, 1, 1, 1,
stridePlanes, strideRows, strideCols, top_pad_planes,
bottom_pad_planes, top_pad_rows, bottom_pad_rows,
left_pad_cols, right_pad_cols)
.reshape(pre_contract_dims)
.contract(the_kernel, contract_dims))
.reshape(post_contract_dims)
.shuffle(kernel_shuffle)
.reverse(kernel_reverse);
}
}
#endif | #include "tensorflow/core/kernels/eigen_backward_cuboid_convolutions.h"
#include "tensorflow/core/platform/test.h"
namespace Eigen {
namespace {
void EigenApprox(float a, float b) {
ASSERT_TRUE(std::abs(a - b) <= std::min(std::abs(a), std::abs(b)) * 1e-3);
}
static int ceil_div(int a, int b) { return (a + b - 1) / b; }
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_simple_cuboid_convolution_backward_input_valid) {
const int input_depth = 2;
const int input_planes = 5;
const int input_rows = 3;
const int input_cols = 4;
const int patch_rows = 2;
const int patch_cols = 2;
const int patch_planes = 2;
const int output_rows = input_rows - patch_rows + 1;
const int output_cols = input_cols - patch_cols + 1;
const int output_planes = input_planes - patch_planes + 1;
const int output_depth = 5;
Tensor<float, 4> input_backward(input_depth, input_planes, input_rows,
input_cols);
Tensor<float, 5> kernel(output_depth, input_depth, patch_planes, patch_rows,
patch_cols);
Tensor<float, 4> output_backward(output_depth, output_planes, output_rows,
output_cols);
output_backward = output_backward.constant(11.0f) + output_backward.random();
kernel = kernel.constant(2.0f) + kernel.random();
input_backward.setRandom();
input_backward = CuboidConvolutionBackwardInput(
kernel, output_backward, input_planes, input_rows, input_cols);
EXPECT_EQ(input_backward.dimension(3), input_cols);
EXPECT_EQ(input_backward.dimension(2), input_rows);
EXPECT_EQ(input_backward.dimension(1), input_planes);
EXPECT_EQ(input_backward.dimension(0), input_depth);
for (int id = 0; id < input_depth; ++id) {
for (int i = 0; i < input_planes; ++i) {
for (int j = 0; j < input_rows; ++j) {
for (int k = 0; k < input_cols; ++k) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int p = 0; p < patch_planes; ++p) {
for (int od = 0; od < output_depth; ++od) {
int output_j = j - r;
int output_k = k - c;
int output_i = i - p;
if (output_i >= 0 && output_i < output_planes &&
output_j >= 0 && output_j < output_rows &&
output_k >= 0 && output_k < output_cols) {
expected +=
output_backward(od, output_i, output_j, output_k) *
kernel(od, id, p, r, c);
}
}
}
}
}
EigenApprox(input_backward(id, i, j, k), expected);
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_simple_cuboid_convolution_backward_input_valid_row_major) {
const int input_depth = 2;
const int input_planes = 5;
const int input_rows = 3;
const int input_cols = 4;
const int patch_rows = 2;
const int patch_cols = 2;
const int patch_planes = 2;
const int output_rows = input_rows - patch_rows + 1;
const int output_cols = input_cols - patch_cols + 1;
const int output_planes = input_planes - patch_planes + 1;
const int output_depth = 5;
Tensor<float, 4, RowMajor> input_backward(input_cols, input_rows,
input_planes, input_depth);
Tensor<float, 5, RowMajor> kernel(patch_cols, patch_rows, patch_planes,
input_depth, output_depth);
Tensor<float, 4, RowMajor> output_backward(output_cols, output_rows,
output_planes, output_depth);
output_backward = output_backward.constant(11.0f) + output_backward.random();
kernel = kernel.constant(2.0f) + kernel.random();
input_backward.setRandom();
input_backward = CuboidConvolutionBackwardInput(
kernel, output_backward, input_planes, input_rows, input_cols);
EXPECT_EQ(input_backward.dimension(0), input_cols);
EXPECT_EQ(input_backward.dimension(1), input_rows);
EXPECT_EQ(input_backward.dimension(2), input_planes);
EXPECT_EQ(input_backward.dimension(3), input_depth);
for (int id = 0; id < input_depth; ++id) {
for (int i = 0; i < input_planes; ++i) {
for (int j = 0; j < input_rows; ++j) {
for (int k = 0; k < input_cols; ++k) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int p = 0; p < patch_planes; ++p) {
for (int od = 0; od < output_depth; ++od) {
int output_j = j - r;
int output_k = k - c;
int output_i = i - p;
if (output_i >= 0 && output_i < output_planes &&
output_j >= 0 && output_j < output_rows &&
output_k >= 0 && output_k < output_cols) {
expected +=
output_backward(output_k, output_j, output_i, od) *
kernel(c, r, p, id, od);
}
}
}
}
}
EigenApprox(input_backward(k, j, i, id), expected);
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_simple_cuboid_convolution_backward_input_same) {
const int input_depth = 2;
const int input_planes = 5;
const int input_rows = 3;
const int input_cols = 4;
const int patch_rows = 3;
const int patch_cols = 2;
const int patch_planes = 4;
const int output_rows = input_rows;
const int output_cols = input_cols;
const int output_planes = input_planes;
const int output_depth = 5;
Tensor<float, 4> input_backward(input_depth, input_planes, input_rows,
input_cols);
Tensor<float, 5> kernel(output_depth, input_depth, patch_planes, patch_rows,
patch_cols);
Tensor<float, 4> output_backward(output_depth, output_planes, output_rows,
output_cols);
output_backward = output_backward.constant(11.0f) + output_backward.random();
kernel = kernel.constant(2.0f) + kernel.random();
input_backward.setRandom();
input_backward = CuboidConvolutionBackwardInput(
kernel, output_backward, input_planes, input_rows, input_cols);
EXPECT_EQ(input_backward.dimension(3), input_cols);
EXPECT_EQ(input_backward.dimension(2), input_rows);
EXPECT_EQ(input_backward.dimension(1), input_planes);
EXPECT_EQ(input_backward.dimension(0), input_depth);
const int dz = patch_planes - 1;
const int dy = patch_rows - 1;
const int dx = patch_cols - 1;
const int forward_pad_x = dx / 2;
const int forward_pad_y = dy / 2;
const int forward_pad_z = dz / 2;
for (int id = 0; id < input_depth; ++id) {
for (int i = 0; i < input_planes; ++i) {
for (int j = 0; j < input_rows; ++j) {
for (int k = 0; k < input_cols; ++k) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int p = 0; p < patch_planes; ++p) {
for (int od = 0; od < output_depth; ++od) {
int output_i = i - p + forward_pad_z;
int output_j = j - r + forward_pad_y;
int output_k = k - c + forward_pad_x;
if (output_i >= 0 && output_i < output_planes &&
output_j >= 0 && output_j < output_rows &&
output_k >= 0 && output_k < output_cols) {
expected +=
output_backward(od, output_i, output_j, output_k) *
kernel(od, id, p, r, c);
}
}
}
}
}
EigenApprox(input_backward(id, i, j, k), expected);
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_simple_cuboid_convolution_backward_input_same_row_major) {
const int input_depth = 2;
const int input_planes = 5;
const int input_rows = 3;
const int input_cols = 4;
const int patch_rows = 2;
const int patch_cols = 3;
const int patch_planes = 4;
const int output_rows = input_rows;
const int output_cols = input_cols;
const int output_planes = input_planes;
const int output_depth = 5;
Tensor<float, 4, RowMajor> input_backward(input_cols, input_rows,
input_planes, input_depth);
Tensor<float, 5, RowMajor> kernel(patch_cols, patch_rows, patch_planes,
input_depth, output_depth);
Tensor<float, 4, RowMajor> output_backward(output_cols, output_rows,
output_planes, output_depth);
output_backward = output_backward.constant(11.0f) + output_backward.random();
kernel = kernel.constant(2.0f) + kernel.random();
input_backward.setRandom();
input_backward = CuboidConvolutionBackwardInput(
kernel, output_backward, input_planes, input_rows, input_cols);
EXPECT_EQ(input_backward.dimension(0), input_cols);
EXPECT_EQ(input_backward.dimension(1), input_rows);
EXPECT_EQ(input_backward.dimension(2), input_planes);
EXPECT_EQ(input_backward.dimension(3), input_depth);
const int dz = patch_planes - 1;
const int dy = patch_rows - 1;
const int dx = patch_cols - 1;
const int forward_pad_x = dx / 2;
const int forward_pad_y = dy / 2;
const int forward_pad_z = dz / 2;
for (int id = 0; id < input_depth; ++id) {
for (int i = 0; i < input_planes; ++i) {
for (int j = 0; j < input_rows; ++j) {
for (int k = 0; k < input_cols; ++k) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int p = 0; p < patch_planes; ++p) {
for (int od = 0; od < output_depth; ++od) {
int output_i = i - p + forward_pad_z;
int output_j = j - r + forward_pad_y;
int output_k = k - c + forward_pad_x;
if (output_i >= 0 && output_i < output_planes &&
output_j >= 0 && output_j < output_rows &&
output_k >= 0 && output_k < output_cols) {
expected +=
output_backward(output_k, output_j, output_i, od) *
kernel(c, r, p, id, od);
}
}
}
}
}
EigenApprox(input_backward(k, j, i, id), expected);
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_batched_cuboid_convolution_backward_input_valid) {
const int num_batches = 13;
const int input_depth = 2;
const int input_planes = 5;
const int input_rows = 3;
const int input_cols = 4;
const int patch_rows = 2;
const int patch_cols = 2;
const int patch_planes = 2;
const int output_rows = input_rows - patch_rows + 1;
const int output_cols = input_cols - patch_cols + 1;
const int output_planes = input_planes - patch_planes + 1;
const int output_depth = 5;
Tensor<float, 5> input_backward(input_depth, input_planes, input_rows,
input_cols, num_batches);
Tensor<float, 5> kernel(output_depth, input_depth, patch_planes, patch_rows,
patch_cols);
Tensor<float, 5> output_backward(output_depth, output_planes, output_rows,
output_cols, num_batches);
output_backward = output_backward.constant(11.0f) + output_backward.random();
kernel = kernel.constant(2.0f) + kernel.random();
input_backward.setRandom();
input_backward = CuboidConvolutionBackwardInput(
kernel, output_backward, input_planes, input_rows, input_cols);
EXPECT_EQ(input_backward.dimension(4), num_batches);
EXPECT_EQ(input_backward.dimension(3), input_cols);
EXPECT_EQ(input_backward.dimension(2), input_rows);
EXPECT_EQ(input_backward.dimension(1), input_planes);
EXPECT_EQ(input_backward.dimension(0), input_depth);
for (int b = 0; b < num_batches; ++b) {
for (int id = 0; id < input_depth; ++id) {
for (int i = 0; i < input_planes; ++i) {
for (int j = 0; j < input_rows; ++j) {
for (int k = 0; k < input_cols; ++k) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int p = 0; p < patch_planes; ++p) {
for (int od = 0; od < output_depth; ++od) {
int output_i = i - p;
int output_j = j - r;
int output_k = k - c;
if (output_i >= 0 && output_i < output_planes &&
output_j >= 0 && output_j < output_rows &&
output_k >= 0 && output_k < output_cols) {
expected +=
output_backward(od, output_i, output_j, output_k, b) *
kernel(od, id, p, r, c);
}
}
}
}
}
EigenApprox(input_backward(id, i, j, k, b), expected);
}
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_batched_cuboid_convolution_backward_input_valid_row_major) {
const int num_batches = 13;
const int input_depth = 2;
const int input_planes = 5;
const int input_rows = 3;
const int input_cols = 4;
const int patch_rows = 2;
const int patch_cols = 2;
const int patch_planes = 2;
const int output_rows = input_rows - patch_rows + 1;
const int output_cols = input_cols - patch_cols + 1;
const int output_planes = input_planes - patch_planes + 1;
const int output_depth = 5;
Tensor<float, 5, RowMajor> input_backward(num_batches, input_cols, input_rows,
input_planes, input_depth);
Tensor<float, 5, RowMajor> kernel(patch_cols, patch_rows, patch_planes,
input_depth, output_depth);
Tensor<float, 5, RowMajor> output_backward(
num_batches, output_cols, output_rows, output_planes, output_depth);
output_backward = output_backward.constant(11.0f) + output_backward.random();
kernel = kernel.constant(2.0f) + kernel.random();
input_backward.setRandom();
input_backward = CuboidConvolutionBackwardInput(
kernel, output_backward, input_planes, input_rows, input_cols);
EXPECT_EQ(input_backward.dimension(0), num_batches);
EXPECT_EQ(input_backward.dimension(1), input_cols);
EXPECT_EQ(input_backward.dimension(2), input_rows);
EXPECT_EQ(input_backward.dimension(3), input_planes);
EXPECT_EQ(input_backward.dimension(4), input_depth);
for (int b = 0; b < num_batches; ++b) {
for (int id = 0; id < input_depth; ++id) {
for (int i = 0; i < input_planes; ++i) {
for (int j = 0; j < input_rows; ++j) {
for (int k = 0; k < input_cols; ++k) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int p = 0; p < patch_planes; ++p) {
for (int od = 0; od < output_depth; ++od) {
int output_i = i - p;
int output_j = j - r;
int output_k = k - c;
if (output_i >= 0 && output_i < output_planes &&
output_j >= 0 && output_j < output_rows &&
output_k >= 0 && output_k < output_cols) {
expected +=
output_backward(b, output_k, output_j, output_i, od) *
kernel(c, r, p, id, od);
}
}
}
}
}
EigenApprox(input_backward(b, k, j, i, id), expected);
}
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_simple_cuboid_convolution_backward_kernel_valid) {
const int input_depth = 2;
const int input_planes = 5;
const int input_rows = 3;
const int input_cols = 4;
const int output_depth = 5;
const int patch_rows = 2;
const int patch_cols = 2;
const int patch_planes = 3;
const int output_rows = input_rows - patch_rows + 1;
const int output_cols = input_cols - patch_cols + 1;
const int output_planes = input_planes - patch_planes + 1;
Tensor<float, 5> input(input_depth, input_planes, input_rows, input_cols,
1);
Tensor<float, 5> kernel(output_depth, input_depth, patch_planes, patch_rows,
patch_cols);
Tensor<float, 5> output_backward(output_depth, output_planes, output_rows,
output_cols, 1);
output_backward = output_backward.constant(11.0f) + output_backward.random();
input = input.constant(2.0f) + input.random();
kernel.setRandom();
kernel = CuboidConvolutionBackwardKernel(input, output_backward, patch_planes,
patch_rows, patch_cols, 1, 1, 1);
EXPECT_EQ(kernel.dimension(0), output_depth);
EXPECT_EQ(kernel.dimension(1), input_depth);
EXPECT_EQ(kernel.dimension(2), patch_planes);
EXPECT_EQ(kernel.dimension(3), patch_rows);
EXPECT_EQ(kernel.dimension(4), patch_cols);
for (int od = 0; od < output_depth; ++od) {
for (int id = 0; id < input_depth; ++id) {
for (int p = 0; p < patch_planes; ++p) {
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
float expected = 0.0f;
for (int i = 0; i < input_planes; ++i) {
for (int j = 0; j < input_rows; ++j) {
for (int k = 0; k < input_cols; ++k) {
int output_j = j - r;
int output_k = k - c;
int output_i = i - p;
if (output_i >= 0 && output_i < output_planes &&
output_j >= 0 && output_j < output_rows &&
output_k >= 0 && output_k < output_cols) {
expected += input(id, i, j, k, 0) *
output_backward(od, output_i, output_j,
output_k, 0);
}
}
}
}
EigenApprox(kernel(od, id, p, r, c), expected);
}
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_simple_cuboid_convolution_backward_kernel_valid_row_major) {
const int input_depth = 2;
const int input_planes = 5;
const int input_rows = 3;
const int input_cols = 4;
const int output_depth = 5;
const int patch_rows = 2;
const int patch_cols = 2;
const int patch_planes = 3;
const int output_rows = input_rows - patch_rows + 1;
const int output_cols = input_cols - patch_cols + 1;
const int output_planes = input_planes - patch_planes + 1;
Tensor<float, 5, RowMajor> input( 1, input_cols, input_rows,
input_planes, input_depth);
Tensor<float, 5, RowMajor> kernel(patch_cols, patch_rows, patch_planes,
input_depth, output_depth);
Tensor<float, 5, RowMajor> output_backward(
1, output_cols, output_rows, output_planes, output_depth);
output_backward = output_backward.constant(11.0f) + output_backward.random();
input = input.constant(2.0f) + input.random();
kernel.setRandom();
kernel = CuboidConvolutionBackwardKernel(input, output_backward, patch_planes,
patch_rows, patch_cols, 1, 1, 1);
EXPECT_EQ(kernel.dimension(4), output_depth);
EXPECT_EQ(kernel.dimension(3), input_depth);
EXPECT_EQ(kernel.dimension(2), patch_planes);
EXPECT_EQ(kernel.dimension(1), patch_rows);
EXPECT_EQ(kernel.dimension(0), patch_cols);
for (int od = 0; od < output_depth; ++od) {
for (int id = 0; id < input_depth; ++id) {
for (int p = 0; p < patch_planes; ++p) {
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
float expected = 0.0f;
for (int i = 0; i < input_planes; ++i) {
for (int j = 0; j < input_rows; ++j) {
for (int k = 0; k < input_cols; ++k) {
int output_j = j - r;
int output_k = k - c;
int output_i = i - p;
if (output_i >= 0 && output_i < output_planes &&
output_j >= 0 && output_j < output_rows &&
output_k >= 0 && output_k < output_cols) {
expected += input( 0, k, j, i, id) *
output_backward( 0, output_k, output_j,
output_i, od);
}
}
}
}
EigenApprox(kernel(c, r, p, id, od), expected);
}
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_batched_cuboid_convolution_backward_kernel_valid) {
const int num_batches = 13;
const int input_depth = 2;
const int input_planes = 5;
const int input_rows = 7;
const int input_cols = 9;
const int output_depth = 3;
const int patch_rows = 5;
const int patch_cols = 5;
const int patch_planes = 3;
const int output_rows = input_rows - patch_rows + 1;
const int output_cols = input_cols - patch_cols + 1;
const int output_planes = input_planes - patch_planes + 1;
Tensor<float, 5> input(input_depth, input_planes, input_rows, input_cols,
num_batches);
Tensor<float, 5> kernel_backward(output_depth, input_depth, patch_planes,
patch_rows, patch_cols);
Tensor<float, 5> output_backward(output_depth, output_planes, output_rows,
output_cols, num_batches);
output_backward = output_backward.constant(11.0f) + output_backward.random();
input = input.constant(2.0f) + input.random();
kernel_backward.setRandom();
kernel_backward = CuboidConvolutionBackwardKernel(
input, output_backward, patch_planes, patch_rows, patch_cols, 1, 1, 1);
EXPECT_EQ(kernel_backward.dimension(0), output_depth);
EXPECT_EQ(kernel_backward.dimension(1), input_depth);
EXPECT_EQ(kernel_backward.dimension(2), patch_planes);
EXPECT_EQ(kernel_backward.dimension(3), patch_rows);
EXPECT_EQ(kernel_backward.dimension(4), patch_cols);
for (int od = 0; od < output_depth; ++od) {
for (int id = 0; id < input_depth; ++id) {
for (int p = 0; p < patch_planes; ++p) {
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
float expected = 0.0f;
for (int b = 0; b < num_batches; ++b) {
for (int i = 0; i < input_planes; ++i) {
for (int j = 0; j < input_rows; ++j) {
for (int k = 0; k < input_cols; ++k) {
int output_j = j - r;
int output_k = k - c;
int output_i = i - p;
if (output_i >= 0 && output_i < output_planes &&
output_j >= 0 && output_j < output_rows &&
output_k >= 0 && output_k < output_cols) {
expected +=
input(id, i, j, k, b) *
output_backward(od, output_i, output_j, output_k, b);
}
}
}
}
}
EigenApprox(kernel_backward(od, id, p, r, c), expected);
}
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_batched_cuboid_convolution_backward_kernel_valid_row_major) {
const int num_batches = 13;
const int input_depth = 2;
const int input_planes = 5;
const int input_rows = 7;
const int input_cols = 9;
const int output_depth = 3;
const int patch_rows = 5;
const int patch_cols = 5;
const int patch_planes = 3;
const int output_rows = input_rows - patch_rows + 1;
const int output_cols = input_cols - patch_cols + 1;
const int output_planes = input_planes - patch_planes + 1;
Tensor<float, 5, RowMajor> input(num_batches, input_cols, input_rows,
input_planes, input_depth);
Tensor<float, 5, RowMajor> kernel_backward(
patch_cols, patch_rows, patch_planes, input_depth, output_depth);
Tensor<float, 5, RowMajor> output_backward(
num_batches, output_cols, output_rows, output_planes, output_depth);
output_backward = output_backward.constant(11.0f) + output_backward.random();
input = input.constant(2.0f) + input.random();
kernel_backward.setRandom();
kernel_backward = CuboidConvolutionBackwardKernel(
input, output_backward, patch_planes, patch_rows, patch_cols, 1, 1, 1);
EXPECT_EQ(kernel_backward.dimension(4), output_depth);
EXPECT_EQ(kernel_backward.dimension(3), input_depth);
EXPECT_EQ(kernel_backward.dimension(2), patch_planes);
EXPECT_EQ(kernel_backward.dimension(1), patch_rows);
EXPECT_EQ(kernel_backward.dimension(0), patch_cols);
for (int od = 0; od < output_depth; ++od) {
for (int id = 0; id < input_depth; ++id) {
for (int p = 0; p < patch_planes; ++p) {
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
float expected = 0.0f;
for (int b = 0; b < num_batches; ++b) {
for (int i = 0; i < input_planes; ++i) {
for (int j = 0; j < input_rows; ++j) {
for (int k = 0; k < input_cols; ++k) {
int output_j = j - r;
int output_k = k - c;
int output_i = i - p;
if (output_i >= 0 && output_i < output_planes &&
output_j >= 0 && output_j < output_rows &&
output_k >= 0 && output_k < output_cols) {
expected +=
input(b, k, j, i, id) *
output_backward(b, output_k, output_j, output_i, od);
}
}
}
}
}
EigenApprox(kernel_backward(c, r, p, id, od), expected);
}
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_batched_strided_cuboid_convolution_backward_kernel_valid) {
const int num_batches = 13;
const int input_depth = 2;
const int input_planes = 8;
const int input_rows = 7;
const int input_cols = 9;
const int output_depth = 3;
const int patch_planes = 3;
const int patch_rows = 3;
const int patch_cols = 2;
const int stride_planes = 2;
const int stride_cols = 3;
const int stride_rows = 1;
const int output_rows = ceil_div(input_rows - patch_rows + 1, stride_rows);
const int output_cols = ceil_div(input_cols - patch_cols + 1, stride_cols);
const int output_planes =
ceil_div(input_planes - patch_planes + 1, stride_planes);
Tensor<float, 5> input(input_depth, input_planes, input_rows, input_cols,
num_batches);
Tensor<float, 5> kernel_backward(output_depth, input_depth, patch_planes,
patch_rows, patch_cols);
Tensor<float, 5> output_backward(output_depth, output_planes, output_rows,
output_cols, num_batches);
output_backward = output_backward.constant(11.0f) + output_backward.random();
input = input.constant(2.0f) + input.random();
kernel_backward.setRandom();
kernel_backward = CuboidConvolutionBackwardKernel(
input, output_backward, patch_planes, patch_rows, patch_cols,
stride_planes, stride_rows, stride_cols);
EXPECT_EQ(kernel_backward.dimension(0), output_depth);
EXPECT_EQ(kernel_backward.dimension(1), input_depth);
EXPECT_EQ(kernel_backward.dimension(2), patch_planes);
EXPECT_EQ(kernel_backward.dimension(3), patch_rows);
EXPECT_EQ(kernel_backward.dimension(4), patch_cols);
for (int od = 0; od < output_depth; ++od) {
for (int id = 0; id < input_depth; ++id) {
for (int p = 0; p < patch_planes; ++p) {
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
float expected = 0.0f;
for (int b = 0; b < num_batches; ++b) {
for (int i = 0; i < input_planes; ++i) {
for (int j = 0; j < input_rows; ++j) {
for (int k = 0; k < input_cols; ++k) {
int output_j = j - r;
int output_k = k - c;
int output_i = i - p;
if (output_i >= 0 &&
output_i / stride_planes < output_planes &&
output_j >= 0 && output_j / stride_rows < output_rows &&
output_k >= 0 && output_k / stride_cols < output_cols &&
output_i % stride_planes == 0 &&
output_j % stride_rows == 0 &&
output_k % stride_cols == 0) {
expected += input(id, i, j, k, b) *
output_backward(od, output_i / stride_planes,
output_j / stride_rows,
output_k / stride_cols, b);
}
}
}
}
}
EigenApprox(kernel_backward(od, id, p, r, c), expected);
}
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_batched_strided_cuboid_convolution_backward_kernel_valid_row_major) {
const int num_batches = 13;
const int input_depth = 2;
const int input_planes = 8;
const int input_rows = 7;
const int input_cols = 9;
const int output_depth = 3;
const int patch_planes = 3;
const int patch_rows = 3;
const int patch_cols = 2;
const int stride_planes = 2;
const int stride_cols = 3;
const int stride_rows = 1;
const int output_rows = ceil_div(input_rows - patch_rows + 1, stride_rows);
const int output_cols = ceil_div(input_cols - patch_cols + 1, stride_cols);
const int output_planes =
ceil_div(input_planes - patch_planes + 1, stride_planes);
Tensor<float, 5, RowMajor> input(num_batches, input_cols, input_rows,
input_planes, input_depth);
Tensor<float, 5, RowMajor> kernel_backward(
patch_cols, patch_rows, patch_planes, input_depth, output_depth);
Tensor<float, 5, RowMajor> output_backward(
num_batches, output_cols, output_rows, output_planes, output_depth);
output_backward = output_backward.constant(11.0f) + output_backward.random();
input = input.constant(2.0f) + input.random();
kernel_backward.setRandom();
kernel_backward = CuboidConvolutionBackwardKernel(
input, output_backward, patch_planes, patch_rows, patch_cols,
stride_planes, stride_rows, stride_cols);
EXPECT_EQ(kernel_backward.dimension(4), output_depth);
EXPECT_EQ(kernel_backward.dimension(3), input_depth);
EXPECT_EQ(kernel_backward.dimension(2), patch_planes);
EXPECT_EQ(kernel_backward.dimension(1), patch_rows);
EXPECT_EQ(kernel_backward.dimension(0), patch_cols);
for (int od = 0; od < output_depth; ++od) {
for (int id = 0; id < input_depth; ++id) {
for (int p = 0; p < patch_planes; ++p) {
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
float expected = 0.0f;
for (int b = 0; b < num_batches; ++b) {
for (int i = 0; i < input_planes; ++i) {
for (int j = 0; j < input_rows; ++j) {
for (int k = 0; k < input_cols; ++k) {
int output_j = j - r;
int output_k = k - c;
int output_i = i - p;
if (output_i >= 0 &&
output_i / stride_planes < output_planes &&
output_j >= 0 && output_j / stride_rows < output_rows &&
output_k >= 0 && output_k / stride_cols < output_cols &&
output_i % stride_planes == 0 &&
output_j % stride_rows == 0 &&
output_k % stride_cols == 0) {
expected += input(b, k, j, i, id) *
output_backward(b, output_k / stride_cols,
output_j / stride_rows,
output_i / stride_planes, od);
}
}
}
}
}
EigenApprox(kernel_backward(c, r, p, id, od), expected);
}
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_batched_strided_cuboid_convolution_backward_input_valid) {
const int num_batches = 13;
const int input_depth = 2;
const int input_planes = 14;
const int input_rows = 13;
const int input_cols = 15;
const int patch_rows = 3;
const int patch_cols = 2;
const int patch_planes = 4;
const int stride_rows = 3;
const int stride_cols = 2;
const int stride_planes = 3;
const int output_rows = ceil_div(input_rows - patch_rows + 1, stride_rows);
const int output_cols = ceil_div(input_cols - patch_cols + 1, stride_cols);
const int output_planes =
ceil_div(input_planes - patch_planes + 1, stride_planes);
const int output_depth = 5;
Tensor<float, 5> input_backward(input_depth, input_planes, input_rows,
input_cols, num_batches);
Tensor<float, 5> kernel(output_depth, input_depth, patch_planes, patch_rows,
patch_cols);
Tensor<float, 5> output_backward(output_depth, output_planes, output_rows,
output_cols, num_batches);
output_backward = output_backward.constant(11.0f) + output_backward.random();
kernel = kernel.constant(2.0f) + kernel.random();
input_backward.setRandom();
input_backward = CuboidConvolutionBackwardInput(
kernel, output_backward, input_planes, input_rows, input_cols,
stride_planes, stride_rows, stride_cols);
EXPECT_EQ(input_backward.dimension(4), num_batches);
EXPECT_EQ(input_backward.dimension(3), input_cols);
EXPECT_EQ(input_backward.dimension(2), input_rows);
EXPECT_EQ(input_backward.dimension(1), input_planes);
EXPECT_EQ(input_backward.dimension(0), input_depth);
for (int b = 0; b < num_batches; ++b) {
for (int id = 0; id < input_depth; ++id) {
for (int i = 0; i < input_planes; ++i) {
for (int j = 0; j < input_rows; ++j) {
for (int k = 0; k < input_cols; ++k) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int p = 0; p < patch_planes; ++p) {
for (int od = 0; od < output_depth; ++od) {
int output_j = j - r;
int output_k = k - c;
int output_i = i - p;
if (output_i >= 0 &&
output_i / stride_planes < output_planes &&
output_j >= 0 && output_j / stride_rows < output_rows &&
output_k >= 0 && output_k / stride_cols < output_cols &&
output_i % stride_planes == 0 &&
output_j % stride_rows == 0 &&
output_k % stride_cols == 0) {
expected += output_backward(od, output_i / stride_planes,
output_j / stride_rows,
output_k / stride_cols, b) *
kernel(od, id, p, r, c);
}
}
}
}
}
EigenApprox(input_backward(id, i, j, k, b), expected);
}
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_batched_strided_cuboid_convolution_backward_input_valid_row_major) {
const int num_batches = 13;
const int input_depth = 2;
const int input_planes = 14;
const int input_rows = 13;
const int input_cols = 15;
const int patch_rows = 3;
const int patch_cols = 2;
const int patch_planes = 4;
const int stride_rows = 3;
const int stride_cols = 2;
const int stride_planes = 3;
const int output_rows = ceil_div(input_rows - patch_rows + 1, stride_rows);
const int output_cols = ceil_div(input_cols - patch_cols + 1, stride_cols);
const int output_planes =
ceil_div(input_planes - patch_planes + 1, stride_planes);
const int output_depth = 5;
Tensor<float, 5, RowMajor> input_backward(num_batches, input_cols, input_rows,
input_planes, input_depth);
Tensor<float, 5, RowMajor> kernel(patch_cols, patch_rows, patch_planes,
input_depth, output_depth);
Tensor<float, 5, RowMajor> output_backward(
num_batches, output_cols, output_rows, output_planes, output_depth);
output_backward = output_backward.constant(11.0f) + output_backward.random();
kernel = kernel.constant(2.0f) + kernel.random();
input_backward.setRandom();
input_backward = CuboidConvolutionBackwardInput(
kernel, output_backward, input_planes, input_rows, input_cols,
stride_planes, stride_rows, stride_cols);
EXPECT_EQ(input_backward.dimension(0), num_batches);
EXPECT_EQ(input_backward.dimension(1), input_cols);
EXPECT_EQ(input_backward.dimension(2), input_rows);
EXPECT_EQ(input_backward.dimension(3), input_planes);
EXPECT_EQ(input_backward.dimension(4), input_depth);
for (int b = 0; b < num_batches; ++b) {
for (int id = 0; id < input_depth; ++id) {
for (int i = 0; i < input_planes; ++i) {
for (int j = 0; j < input_rows; ++j) {
for (int k = 0; k < input_cols; ++k) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int p = 0; p < patch_planes; ++p) {
for (int od = 0; od < output_depth; ++od) {
int output_j = j - r;
int output_k = k - c;
int output_i = i - p;
if (output_i >= 0 &&
output_i / stride_planes < output_planes &&
output_j >= 0 && output_j / stride_rows < output_rows &&
output_k >= 0 && output_k / stride_cols < output_cols &&
output_i % stride_planes == 0 &&
output_j % stride_rows == 0 &&
output_k % stride_cols == 0) {
expected +=
output_backward(b, output_k / stride_cols,
output_j / stride_rows,
output_i / stride_planes, od) *
kernel(c, r, p, id, od);
}
}
}
}
}
EigenApprox(input_backward(b, k, j, i, id), expected);
}
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/eigen_backward_cuboid_convolutions.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/eigen_backward_cuboid_convolutions_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cdd2bdaf-cc0d-4ceb-ab30-fdf7e6b15650 | cpp | tensorflow/tensorflow | eigen_backward_spatial_convolutions | tensorflow/core/kernels/eigen_backward_spatial_convolutions.h | tensorflow/core/kernels/eigen_backward_spatial_convolutions_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_EIGEN_BACKWARD_SPATIAL_CONVOLUTIONS_H_
#define TENSORFLOW_CORE_KERNELS_EIGEN_BACKWARD_SPATIAL_CONVOLUTIONS_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "xla/tsl/framework/convolution/eigen_spatial_convolutions.h"
namespace Eigen {
typedef IndexList<type2index<0>, type2index<0>, type2index<1>, type2index<1>>
ReverseColMajor;
typedef IndexList<type2index<1>, type2index<1>, type2index<0>, type2index<0>>
ReverseRowMajor;
template <typename OutputBackward, typename Kernel>
EIGEN_ALWAYS_INLINE static const std::conditional_t<
internal::traits<OutputBackward>::Layout == ColMajor,
TensorReshapingOp<
const DSizes<typename internal::traits<OutputBackward>::Index,
internal::traits<OutputBackward>::NumDimensions>,
const TensorContractionOp<
const array<
IndexPair<typename internal::traits<OutputBackward>::Index>, 1>,
const TensorReshapingOp<
const DSizes<typename internal::traits<OutputBackward>::Index,
2>,
const Eigen::TensorForcedEvalOp<const TensorShufflingOp<
const array<
typename internal::traits<OutputBackward>::Index, 4>,
const Eigen::TensorForcedEvalOp<const TensorReverseOp<
const ReverseColMajor, const Kernel>>>>>,
const TensorReshapingOp<
const DSizes<typename internal::traits<OutputBackward>::Index,
2>,
const TensorImagePatchOp<Dynamic, Dynamic,
const OutputBackward>>>>,
TensorReshapingOp<
const DSizes<typename internal::traits<OutputBackward>::Index,
internal::traits<OutputBackward>::NumDimensions>,
const TensorContractionOp<
const array<
IndexPair<typename internal::traits<OutputBackward>::Index>, 1>,
const TensorReshapingOp<
const DSizes<typename internal::traits<OutputBackward>::Index,
2>,
const TensorImagePatchOp<Dynamic, Dynamic,
const OutputBackward>>,
const TensorReshapingOp<
const DSizes<typename internal::traits<OutputBackward>::Index,
2>,
const Eigen::TensorForcedEvalOp<const TensorShufflingOp<
const array<
typename internal::traits<OutputBackward>::Index, 4>,
const Eigen::TensorForcedEvalOp<const TensorReverseOp<
const ReverseRowMajor, const Kernel>>>>>>>>
SpatialConvolutionBackwardInput(
const Kernel& kernel, const OutputBackward& output_backward,
typename internal::traits<OutputBackward>::Index inputRows,
typename internal::traits<OutputBackward>::Index inputCols,
const DenseIndex row_stride = 1, const DenseIndex col_stride = 1,
const DenseIndex row_in_stride = 1, const DenseIndex col_in_stride = 1) {
typedef typename internal::traits<OutputBackward>::Index TensorIndex;
typedef typename internal::traits<OutputBackward>::Scalar OutScalar;
TensorRef<Tensor<typename internal::traits<Kernel>::Scalar,
internal::traits<Kernel>::NumDimensions,
internal::traits<Kernel>::Layout, TensorIndex>>
kern(kernel);
TensorRef<Tensor<OutScalar, internal::traits<OutputBackward>::NumDimensions,
internal::traits<OutputBackward>::Layout, TensorIndex>>
out(output_backward);
EIGEN_STATIC_ASSERT(internal::traits<Kernel>::Layout ==
internal::traits<OutputBackward>::Layout,
YOU_MADE_A_PROGRAMMING_MISTAKE);
static const bool isColMajor =
(internal::traits<OutputBackward>::Layout == ColMajor);
static const int NumDims = internal::traits<OutputBackward>::NumDimensions;
const TensorIndex kernelFilters =
isColMajor ? kern.dimensions()[0] : kern.dimensions()[3];
const TensorIndex kernelChannels =
isColMajor ? kern.dimensions()[1] : kern.dimensions()[2];
const TensorIndex kernelRows =
isColMajor ? kern.dimensions()[2] : kern.dimensions()[1];
const TensorIndex kernelCols =
isColMajor ? kern.dimensions()[3] : kern.dimensions()[0];
const TensorIndex kernelRowsEff =
kernelRows + (kernelRows - 1) * (row_in_stride - 1);
const TensorIndex kernelColsEff =
kernelCols + (kernelCols - 1) * (col_in_stride - 1);
const TensorIndex outputRows = isColMajor
? output_backward.dimension(1)
: output_backward.dimension(NumDims - 2);
const TensorIndex outputCols = isColMajor
? output_backward.dimension(2)
: output_backward.dimension(NumDims - 3);
const TensorIndex forward_pad_top = numext::maxi<Index>(
0, ((outputRows - 1) * row_stride + kernelRowsEff - inputRows) / 2);
const TensorIndex forward_pad_left = numext::maxi<Index>(
0, ((outputCols - 1) * col_stride + kernelColsEff - inputCols) / 2);
const TensorIndex padding_top = kernelRowsEff - 1 - forward_pad_top;
const TensorIndex padding_left = kernelColsEff - 1 - forward_pad_left;
const TensorIndex padding_bottom = inputRows - (outputRows - 1) * row_stride -
2 - padding_top + kernelRowsEff;
const TensorIndex padding_right = inputCols - (outputCols - 1) * col_stride -
2 - padding_left + kernelColsEff;
eigen_assert(padding_top >= 0);
eigen_assert(padding_left >= 0);
eigen_assert(padding_bottom >= 0);
eigen_assert(padding_right >= 0);
typedef std::conditional_t<isColMajor, ReverseColMajor, ReverseRowMajor>
Reverse;
Reverse kernel_reverse;
array<TensorIndex, 4> kernel_shuffle;
if (isColMajor) {
kernel_shuffle[0] = 0;
kernel_shuffle[1] = 2;
kernel_shuffle[2] = 3;
kernel_shuffle[3] = 1;
} else {
kernel_shuffle[0] = 2;
kernel_shuffle[1] = 0;
kernel_shuffle[2] = 1;
kernel_shuffle[3] = 3;
}
DSizes<TensorIndex, 2> kernel_dims;
if (isColMajor) {
kernel_dims[0] = kernelFilters * kernelRows * kernelCols;
kernel_dims[1] = kernelChannels;
} else {
kernel_dims[1] = kernelFilters * kernelRows * kernelCols;
kernel_dims[0] = kernelChannels;
}
DSizes<TensorIndex, 2> pre_contract_dims;
if (isColMajor) {
pre_contract_dims[0] = kernelFilters * kernelRows * kernelCols;
pre_contract_dims[1] = inputRows * inputCols;
for (int i = 3; i < NumDims; ++i) {
pre_contract_dims[1] *= out.dimension(i);
}
} else {
pre_contract_dims[1] = kernelFilters * kernelRows * kernelCols;
pre_contract_dims[0] = inputRows * inputCols;
for (int i = 0; i < NumDims - 3; ++i) {
pre_contract_dims[0] *= out.dimension(i);
}
}
array<IndexPair<TensorIndex>, 1> contract_dims;
if (isColMajor) {
contract_dims[0] = IndexPair<TensorIndex>(0, 0);
} else {
contract_dims[0] = IndexPair<TensorIndex>(1, 1);
}
DSizes<TensorIndex, NumDims> post_contract_dims;
if (isColMajor) {
post_contract_dims[0] = kernelChannels;
post_contract_dims[1] = inputRows;
post_contract_dims[2] = inputCols;
for (int i = 3; i < NumDims; ++i) {
post_contract_dims[i] = out.dimension(i);
}
} else {
post_contract_dims[NumDims - 1] = kernelChannels;
post_contract_dims[NumDims - 2] = inputRows;
post_contract_dims[NumDims - 3] = inputCols;
for (int i = 0; i < NumDims - 3; ++i) {
post_contract_dims[i] = out.dimension(i);
}
}
return choose(
Cond<internal::traits<OutputBackward>::Layout == ColMajor>(),
kernel.reverse(kernel_reverse)
.eval()
.shuffle(kernel_shuffle)
.eval()
.reshape(kernel_dims)
.contract(
output_backward
.extract_image_patches(
kernelRows, kernelCols, 1, 1, row_in_stride,
col_in_stride, row_stride, col_stride, padding_top,
padding_bottom, padding_left, padding_right, OutScalar(0))
.reshape(pre_contract_dims),
contract_dims)
.reshape(post_contract_dims),
output_backward
.extract_image_patches(kernelRows, kernelCols, 1, 1, row_in_stride,
col_in_stride, row_stride, col_stride,
padding_top, padding_bottom, padding_left,
padding_right, OutScalar(0))
.reshape(pre_contract_dims)
.contract(kernel.reverse(kernel_reverse)
.eval()
.shuffle(kernel_shuffle)
.eval()
.reshape(kernel_dims),
contract_dims)
.reshape(post_contract_dims));
}
template <typename OutputBackward, typename Input>
EIGEN_ALWAYS_INLINE static const std::conditional_t<
internal::traits<Input>::Layout == ColMajor,
const TensorReverseOp<
const Eigen::array<typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const Eigen::TensorForcedEvalOp<const Eigen::TensorShufflingOp<
const Eigen::array<typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const Eigen::TensorReshapingOp<
const Eigen::DSizes<typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const TensorContractionOp<
const array<
IndexPair<typename internal::traits<Input>::Index>, 1>,
const TensorReshapingOp<
const DSizes<typename internal::traits<Input>::Index,
2>,
const Eigen::TensorForcedEvalOp<
const Eigen::TensorShufflingOp<
const Eigen::array<
typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const Input>>>,
const TensorReshapingOp<
const DSizes<typename internal::traits<Input>::Index,
2>,
const TensorImagePatchOp<
Dynamic, Dynamic,
const Eigen::TensorForcedEvalOp<
const Eigen::TensorShufflingOp<
const Eigen::array<
typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const OutputBackward>>>>>>>>>,
const TensorReverseOp<
const Eigen::array<typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const Eigen::TensorForcedEvalOp<const Eigen::TensorShufflingOp<
const Eigen::array<typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const Eigen::TensorReshapingOp<
const Eigen::DSizes<typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const TensorContractionOp<
const array<
IndexPair<typename internal::traits<Input>::Index>, 1>,
const TensorReshapingOp<
const DSizes<typename internal::traits<Input>::Index,
2>,
const TensorImagePatchOp<
Dynamic, Dynamic,
const Eigen::TensorForcedEvalOp<
const Eigen::TensorShufflingOp<
const Eigen::array<
typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const OutputBackward>>>>,
const TensorReshapingOp<
const DSizes<typename internal::traits<Input>::Index,
2>,
const Eigen::TensorForcedEvalOp<
const Eigen::TensorShufflingOp<
const Eigen::array<
typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const Input>>>>>>>>>
SpatialConvolutionBackwardKernel(
const Input& input, const OutputBackward& output_backward,
typename internal::traits<Input>::Index kernelRows,
typename internal::traits<Input>::Index kernelCols,
const DenseIndex row_stride = 1, const DenseIndex col_stride = 1,
const DenseIndex row_in_stride = 1, const DenseIndex col_in_stride = 1) {
typedef typename internal::traits<Input>::Index TensorIndex;
typedef typename internal::traits<OutputBackward>::Scalar OutScalar;
TensorRef<Tensor<typename internal::traits<Input>::Scalar,
internal::traits<Input>::NumDimensions,
internal::traits<Input>::Layout, TensorIndex>>
in(input);
TensorRef<Tensor<OutScalar, internal::traits<OutputBackward>::NumDimensions,
internal::traits<OutputBackward>::Layout, TensorIndex>>
out(output_backward);
EIGEN_STATIC_ASSERT(internal::traits<Input>::Layout ==
internal::traits<OutputBackward>::Layout,
YOU_MADE_A_PROGRAMMING_MISTAKE);
eigen_assert(!(row_stride > 1 && row_in_stride > 1));
eigen_assert(!(col_stride > 1 && col_in_stride > 1));
static const bool isColMajor = (internal::traits<Input>::Layout == ColMajor);
static const int NumDims = internal::traits<Input>::NumDimensions;
EIGEN_STATIC_ASSERT(internal::traits<Input>::NumDimensions ==
internal::traits<OutputBackward>::NumDimensions,
YOU_MADE_A_PROGRAMMING_MISTAKE);
EIGEN_STATIC_ASSERT(NumDims == 4, YOU_MADE_A_PROGRAMMING_MISTAKE);
const TensorIndex inputRows =
isColMajor ? in.dimension(1) : in.dimension(NumDims - 2);
const TensorIndex inputCols =
isColMajor ? in.dimension(2) : in.dimension(NumDims - 3);
const TensorIndex outputRows = isColMajor
? output_backward.dimension(1)
: output_backward.dimension(NumDims - 2);
const TensorIndex outputCols = isColMajor
? output_backward.dimension(2)
: output_backward.dimension(NumDims - 3);
const TensorIndex kernelFilters =
isColMajor ? out.dimensions()[0] : out.dimensions()[NumDims - 1];
const TensorIndex kernelChannels =
isColMajor ? in.dimensions()[0] : in.dimensions()[NumDims - 1];
const TensorIndex kernelRowsEff =
kernelRows + (kernelRows - 1) * (row_in_stride - 1);
const TensorIndex kernelColsEff =
kernelCols + (kernelCols - 1) * (col_in_stride - 1);
TensorIndex batch = 1;
for (int d = 3; d < NumDims; ++d) {
batch *= isColMajor ? in.dimension(d) : in.dimension(NumDims - d - 1);
}
const TensorIndex padRows = numext::maxi<Index>(
0, (outputRows - 1) * row_stride + kernelRowsEff - inputRows);
const TensorIndex padCols = numext::maxi<Index>(
0, (outputCols - 1) * col_stride + kernelColsEff - inputCols);
TensorIndex padding_top = padRows / 2;
TensorIndex padding_left = padCols / 2;
const TensorIndex expanded_out_rows = (outputRows - 1) * row_stride + 1;
const TensorIndex expanded_out_cols = (outputCols - 1) * col_stride + 1;
const TensorIndex padded_out_rows = inputRows + kernelRowsEff - 1;
const TensorIndex padded_out_cols = inputCols + kernelColsEff - 1;
const TensorIndex top_pad_rows = kernelRowsEff - 1 - padding_top;
const TensorIndex left_pad_cols = kernelColsEff - 1 - padding_left;
const TensorIndex bottom_pad_rows =
padded_out_rows - expanded_out_rows - top_pad_rows;
const TensorIndex right_pad_cols =
padded_out_cols - expanded_out_cols - left_pad_cols;
array<TensorIndex, 4> output_backward_shuffle;
if (isColMajor) {
output_backward_shuffle = {3, 1, 2, 0};
} else {
output_backward_shuffle = {3, 1, 2, 0};
}
array<TensorIndex, 4> input_shuffle;
if (isColMajor) {
input_shuffle = {0, 3, 1, 2};
} else {
input_shuffle = {1, 2, 0, 3};
}
DSizes<TensorIndex, 2> input_dims;
if (isColMajor) {
input_dims[0] = kernelChannels;
input_dims[1] = batch * inputRows * inputCols;
} else {
input_dims[1] = kernelChannels;
input_dims[0] = inputCols * inputRows * batch;
}
DSizes<TensorIndex, 2> pre_contract_dims;
if (isColMajor) {
pre_contract_dims[0] = batch * inputRows * inputCols;
pre_contract_dims[1] = kernelRows * kernelCols * kernelFilters;
} else {
pre_contract_dims[1] = inputCols * inputRows * batch;
pre_contract_dims[0] = kernelFilters * kernelCols * kernelRows;
}
array<IndexPair<TensorIndex>, 1> contract_dims;
contract_dims[0] = IndexPair<TensorIndex>(1, 0);
DSizes<TensorIndex, NumDims> post_contract_dims;
if (isColMajor) {
post_contract_dims[0] = kernelChannels;
post_contract_dims[1] = kernelRows;
post_contract_dims[2] = kernelCols;
post_contract_dims[3] = kernelFilters;
} else {
post_contract_dims[0] = kernelFilters;
post_contract_dims[1] = kernelCols;
post_contract_dims[2] = kernelRows;
post_contract_dims[3] = kernelChannels;
}
array<TensorIndex, 4> kernel_shuffle;
if (isColMajor) {
kernel_shuffle = {3, 0, 1, 2};
} else {
kernel_shuffle = {1, 2, 3, 0};
}
array<TensorIndex, 4> kernel_reverse;
if (isColMajor) {
kernel_reverse = {false, false, true, true};
} else {
kernel_reverse = {true, true, false, false};
}
const auto output_backward_shuffled =
output_backward.shuffle(output_backward_shuffle).eval();
const auto input_shuffled =
input.shuffle(input_shuffle).eval().reshape(input_dims);
return choose(
Cond<internal::traits<OutputBackward>::Layout == ColMajor>(),
input_shuffled.contract(
output_backward_shuffled
.extract_image_patches(inputRows, inputCols, row_in_stride,
col_in_stride, 1, 1, row_stride,
col_stride, top_pad_rows,
bottom_pad_rows, left_pad_cols,
right_pad_cols, OutScalar(0))
.reshape(pre_contract_dims),
contract_dims),
output_backward_shuffled
.extract_image_patches(
inputRows, inputCols, row_in_stride, col_in_stride, 1, 1,
row_stride, col_stride, top_pad_rows, bottom_pad_rows,
left_pad_cols, right_pad_cols, OutScalar(0))
.reshape(pre_contract_dims)
.contract(input_shuffled, contract_dims))
.reshape(post_contract_dims)
.shuffle(kernel_shuffle)
.eval()
.reverse(kernel_reverse);
}
}
#endif | #include "tensorflow/core/kernels/eigen_backward_spatial_convolutions.h"
#include "tensorflow/core/platform/test.h"
namespace Eigen {
namespace {
void EigenApprox(float a, float b) {
ASSERT_TRUE(std::abs(a - b) <= std::min(std::abs(a), std::abs(b)) * 1e-3);
}
static int ceil_div(int a, int b) { return (a + b - 1) / b; }
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_simple_spatial_convolution_backward_input_valid) {
const int input_depth = 2;
const int input_rows = 3;
const int input_cols = 4;
const int output_depth = 5;
const int patch_rows = 2;
const int patch_cols = 2;
const int output_rows = input_rows - patch_rows + 1;
const int output_cols = input_cols - patch_cols + 1;
Tensor<float, 3> input_backward(input_depth, input_rows, input_cols);
Tensor<float, 4> kernel(output_depth, input_depth, patch_rows, patch_cols);
Tensor<float, 3> output_backward(output_depth, output_rows, output_cols);
output_backward = output_backward.constant(11.0f) + output_backward.random();
kernel = kernel.constant(2.0f) + kernel.random();
input_backward.setRandom();
input_backward = SpatialConvolutionBackwardInput(kernel, output_backward,
input_rows, input_cols, 1);
EXPECT_EQ(input_backward.dimension(0), input_depth);
EXPECT_EQ(input_backward.dimension(1), input_rows);
EXPECT_EQ(input_backward.dimension(2), input_cols);
for (int id = 0; id < input_depth; ++id) {
for (int i = 0; i < input_rows; ++i) {
for (int j = 0; j < input_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int od = 0; od < output_depth; ++od) {
int output_i = i - r;
int output_j = j - c;
if (output_i >= 0 && output_i < output_rows && output_j >= 0 &&
output_j < output_cols) {
expected += output_backward(od, output_i, output_j) *
kernel(od, id, r, c);
}
}
}
}
EigenApprox(input_backward(id, i, j), expected);
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_simple_spatial_convolution_backward_input_valid_row_major) {
const int input_depth = 2;
const int input_rows = 3;
const int input_cols = 4;
const int output_depth = 5;
const int patch_rows = 2;
const int patch_cols = 2;
const int output_rows = input_rows - patch_rows + 1;
const int output_cols = input_cols - patch_cols + 1;
Tensor<float, 3, RowMajor> input_backward(input_cols, input_rows,
input_depth);
Tensor<float, 4, RowMajor> kernel(patch_cols, patch_rows, input_depth,
output_depth);
Tensor<float, 3, RowMajor> output_backward(output_cols, output_rows,
output_depth);
output_backward = output_backward.constant(11.0f) + output_backward.random();
kernel = kernel.constant(2.0f) + kernel.random();
input_backward.setRandom();
input_backward = SpatialConvolutionBackwardInput(kernel, output_backward,
input_rows, input_cols, 1);
EXPECT_EQ(input_backward.dimension(0), input_cols);
EXPECT_EQ(input_backward.dimension(1), input_rows);
EXPECT_EQ(input_backward.dimension(2), input_depth);
for (int id = 0; id < input_depth; ++id) {
for (int i = 0; i < input_rows; ++i) {
for (int j = 0; j < input_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int od = 0; od < output_depth; ++od) {
int output_i = i - r;
int output_j = j - c;
if (output_i >= 0 && output_i < output_rows && output_j >= 0 &&
output_j < output_cols) {
expected += output_backward(output_j, output_i, od) *
kernel(c, r, id, od);
}
}
}
}
EigenApprox(input_backward(j, i, id), expected);
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_simple_spatial_convolution_backward_input_same) {
const int input_depth = 2;
const int input_rows = 7;
const int input_cols = 9;
const int output_depth = 3;
const int patch_rows = 4;
const int patch_cols = 4;
const int output_rows = input_rows;
const int output_cols = input_cols;
Tensor<float, 3> input_backward(input_depth, input_rows, input_cols);
Tensor<float, 4> kernel(output_depth, input_depth, patch_rows, patch_cols);
Tensor<float, 3> output_backward(output_depth, output_rows, output_cols);
output_backward = output_backward.constant(11.0f) + output_backward.random();
kernel = kernel.constant(2.0f) + kernel.random();
input_backward = SpatialConvolutionBackwardInput(kernel, output_backward,
input_rows, input_cols, 1);
EXPECT_EQ(input_backward.dimension(0), input_depth);
EXPECT_EQ(input_backward.dimension(1), input_rows);
EXPECT_EQ(input_backward.dimension(2), input_cols);
for (int id = 0; id < input_depth; ++id) {
for (int i = 0; i < input_rows; ++i) {
for (int j = 0; j < input_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int od = 0; od < output_depth; ++od) {
int output_i = i - r + (patch_rows - 1) / 2;
int output_j = j - c + (patch_cols - 1) / 2;
if (output_i >= 0 && output_i < output_rows && output_j >= 0 &&
output_j < output_cols) {
expected += output_backward(od, output_i, output_j) *
kernel(od, id, r, c);
}
}
}
}
EigenApprox(input_backward(id, i, j), expected);
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_simple_spatial_convolution_backward_input_same_row_major) {
const int input_depth = 2;
const int input_rows = 7;
const int input_cols = 9;
const int output_depth = 3;
const int patch_rows = 4;
const int patch_cols = 4;
const int output_rows = input_rows;
const int output_cols = input_cols;
Tensor<float, 3, RowMajor> input_backward(input_cols, input_rows,
input_depth);
Tensor<float, 4, RowMajor> kernel(patch_cols, patch_rows, input_depth,
output_depth);
Tensor<float, 3, RowMajor> output_backward(output_cols, output_rows,
output_depth);
output_backward = output_backward.constant(11.0f) + output_backward.random();
kernel = kernel.constant(2.0f) + kernel.random();
input_backward = SpatialConvolutionBackwardInput(kernel, output_backward,
input_rows, input_cols, 1);
EXPECT_EQ(input_backward.dimension(0), input_cols);
EXPECT_EQ(input_backward.dimension(1), input_rows);
EXPECT_EQ(input_backward.dimension(2), input_depth);
for (int id = 0; id < input_depth; ++id) {
for (int i = 0; i < input_rows; ++i) {
for (int j = 0; j < input_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int od = 0; od < output_depth; ++od) {
int output_i = i - r + (patch_rows - 1) / 2;
int output_j = j - c + (patch_cols - 1) / 2;
if (output_i >= 0 && output_i < output_rows && output_j >= 0 &&
output_j < output_cols) {
expected += output_backward(output_j, output_i, od) *
kernel(c, r, id, od);
}
}
}
}
EigenApprox(input_backward(j, i, id), expected);
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_batched_spatial_convolution_backward_input_valid) {
const int num_batches = 13;
const int input_depth = 2;
const int input_rows = 7;
const int input_cols = 9;
const int output_depth = 3;
const int patch_rows = 5;
const int patch_cols = 5;
const int output_rows = input_rows - patch_rows + 1;
const int output_cols = input_cols - patch_cols + 1;
Tensor<float, 4> input_backward(input_depth, input_rows, input_cols,
num_batches);
Tensor<float, 4> kernel(output_depth, input_depth, patch_rows, patch_cols);
Tensor<float, 4> output_backward(output_depth, output_rows, output_cols,
num_batches);
output_backward = output_backward.constant(11.0f) + output_backward.random();
kernel = kernel.constant(2.0f) + kernel.random();
input_backward.setRandom();
input_backward = SpatialConvolutionBackwardInput(kernel, output_backward,
input_rows, input_cols, 1);
EXPECT_EQ(input_backward.dimension(0), input_depth);
EXPECT_EQ(input_backward.dimension(1), input_rows);
EXPECT_EQ(input_backward.dimension(2), input_cols);
EXPECT_EQ(input_backward.dimension(3), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int id = 0; id < input_depth; ++id) {
for (int i = 0; i < input_rows; ++i) {
for (int j = 0; j < input_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int od = 0; od < output_depth; ++od) {
int output_i = i - r;
int output_j = j - c;
if (output_i >= 0 && output_i < output_rows && output_j >= 0 &&
output_j < output_cols) {
expected += output_backward(od, output_i, output_j, b) *
kernel(od, id, r, c);
}
}
}
}
EigenApprox(input_backward(id, i, j, b), expected);
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_batched_spatial_convolution_backward_input_valid_row_major) {
const int num_batches = 13;
const int input_depth = 2;
const int input_rows = 7;
const int input_cols = 9;
const int output_depth = 3;
const int patch_rows = 5;
const int patch_cols = 5;
const int output_rows = input_rows - patch_rows + 1;
const int output_cols = input_cols - patch_cols + 1;
Tensor<float, 4, RowMajor> input_backward(num_batches, input_cols, input_rows,
input_depth);
Tensor<float, 4, RowMajor> kernel(patch_cols, patch_rows, input_depth,
output_depth);
Tensor<float, 4, RowMajor> output_backward(num_batches, output_cols,
output_rows, output_depth);
output_backward = output_backward.constant(11.0f) + output_backward.random();
kernel = kernel.constant(2.0f) + kernel.random();
input_backward.setRandom();
input_backward = SpatialConvolutionBackwardInput(kernel, output_backward,
input_rows, input_cols, 1);
EXPECT_EQ(input_backward.dimension(0), num_batches);
EXPECT_EQ(input_backward.dimension(1), input_cols);
EXPECT_EQ(input_backward.dimension(2), input_rows);
EXPECT_EQ(input_backward.dimension(3), input_depth);
for (int b = 0; b < num_batches; ++b) {
for (int id = 0; id < input_depth; ++id) {
for (int i = 0; i < input_rows; ++i) {
for (int j = 0; j < input_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int od = 0; od < output_depth; ++od) {
int output_i = i - r;
int output_j = j - c;
if (output_i >= 0 && output_i < output_rows && output_j >= 0 &&
output_j < output_cols) {
expected += output_backward(b, output_j, output_i, od) *
kernel(c, r, id, od);
}
}
}
}
EigenApprox(input_backward(b, j, i, id), expected);
}
}
}
}
}
static void test_batched_strided_spatial_convolution_backward_input_valid(
const int num_batches, const int input_depth, const int input_rows,
const int input_cols, const int output_depth) {
const int patch_rows = 3;
const int patch_cols = 3;
const int stride = 3;
const int output_rows = divup(input_rows - patch_rows + 1, stride);
const int output_cols = divup(input_cols - patch_cols + 1, stride);
Tensor<float, 4> input_backward(input_depth, input_rows, input_cols,
num_batches);
Tensor<float, 4> kernel(output_depth, input_depth, patch_rows, patch_cols);
Tensor<float, 4> output_backward(output_depth, output_rows, output_cols,
num_batches);
output_backward = output_backward.constant(11.0f) + output_backward.random();
kernel = kernel.constant(2.0f) + kernel.random();
input_backward.setRandom();
input_backward = SpatialConvolutionBackwardInput(
kernel, output_backward, input_rows, input_cols, stride, stride);
EXPECT_EQ(input_backward.dimension(0), input_depth);
EXPECT_EQ(input_backward.dimension(1), input_rows);
EXPECT_EQ(input_backward.dimension(2), input_cols);
EXPECT_EQ(input_backward.dimension(3), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int id = 0; id < input_depth; ++id) {
for (int i = 0; i < input_rows; ++i) {
for (int j = 0; j < input_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int od = 0; od < output_depth; ++od) {
int output_i = i - r;
int output_j = j - c;
if (output_i >= 0 && output_i / stride < output_rows &&
output_j >= 0 && output_j / stride < output_cols &&
output_i % stride == 0 && output_j % stride == 0) {
expected += output_backward(od, output_i / stride,
output_j / stride, b) *
kernel(od, id, r, c);
}
}
}
}
EigenApprox(input_backward(id, i, j, b), expected);
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_batched_strided_spatial_convolution_backward_input_valid) {
int num_batches = 1;
int input_depth = 1;
int input_rows = 3;
int input_cols = 5;
int output_depth = 1;
test_batched_strided_spatial_convolution_backward_input_valid(
num_batches, input_depth, input_rows, input_cols, output_depth);
num_batches = 11;
input_depth = 2;
input_rows = 9;
input_cols = 13;
output_depth = 5;
test_batched_strided_spatial_convolution_backward_input_valid(
num_batches, input_depth, input_rows, input_cols, output_depth);
}
static void
test_batched_strided_spatial_convolution_backward_input_valid_row_major(
const int num_batches, const int input_depth, const int input_rows,
const int input_cols, const int output_depth) {
const int patch_rows = 3;
const int patch_cols = 3;
const int stride = 3;
const int output_rows = divup(input_rows - patch_rows + 1, stride);
const int output_cols = divup(input_cols - patch_cols + 1, stride);
Tensor<float, 4, RowMajor> input_backward(num_batches, input_cols, input_rows,
input_depth);
Tensor<float, 4, RowMajor> kernel(patch_cols, patch_rows, input_depth,
output_depth);
Tensor<float, 4, RowMajor> output_backward(num_batches, output_cols,
output_rows, output_depth);
output_backward = output_backward.constant(11.0f) + output_backward.random();
kernel = kernel.constant(2.0f) + kernel.random();
input_backward.setRandom();
input_backward = SpatialConvolutionBackwardInput(
kernel, output_backward, input_rows, input_cols, stride, stride);
EXPECT_EQ(input_backward.dimension(0), num_batches);
EXPECT_EQ(input_backward.dimension(1), input_cols);
EXPECT_EQ(input_backward.dimension(2), input_rows);
EXPECT_EQ(input_backward.dimension(3), input_depth);
for (int b = 0; b < num_batches; ++b) {
for (int id = 0; id < input_depth; ++id) {
for (int i = 0; i < input_rows; ++i) {
for (int j = 0; j < input_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int od = 0; od < output_depth; ++od) {
int output_i = i - r;
int output_j = j - c;
if (output_i >= 0 && output_i / stride < output_rows &&
output_j >= 0 && output_j / stride < output_cols &&
output_i % stride == 0 && output_j % stride == 0) {
expected += output_backward(b, output_j / stride,
output_i / stride, od) *
kernel(c, r, id, od);
}
}
}
}
EigenApprox(input_backward(b, j, i, id), expected);
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_batched_strided_spatial_convolution_backward_input_valid_row_major) {
int num_batches = 1;
int input_depth = 1;
int input_rows = 3;
int input_cols = 5;
int output_depth = 1;
test_batched_strided_spatial_convolution_backward_input_valid_row_major(
num_batches, input_depth, input_rows, input_cols, output_depth);
num_batches = 11;
input_depth = 2;
input_rows = 9;
input_cols = 13;
output_depth = 5;
test_batched_strided_spatial_convolution_backward_input_valid_row_major(
num_batches, input_depth, input_rows, input_cols, output_depth);
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_simple_spatial_convolution_backward_kernel_valid) {
const int num_batches = 5;
const int input_depth = 2;
const int input_rows = 3;
const int input_cols = 4;
const int output_depth = 5;
const int patch_rows = 2;
const int patch_cols = 2;
const int output_rows = input_rows - patch_rows + 1;
const int output_cols = input_cols - patch_cols + 1;
Tensor<float, 4> input(input_depth, input_rows, input_cols, num_batches);
Tensor<float, 4> kernel(output_depth, input_depth, patch_rows, patch_cols);
Tensor<float, 4> output_backward(output_depth, output_rows, output_cols,
num_batches);
output_backward = output_backward.constant(11.0f) + output_backward.random();
input = input.constant(2.0f) + input.random();
kernel.setRandom();
kernel = SpatialConvolutionBackwardKernel(input, output_backward, patch_rows,
patch_cols, 1, 1);
EXPECT_EQ(kernel.dimension(0), output_depth);
EXPECT_EQ(kernel.dimension(1), input_depth);
EXPECT_EQ(kernel.dimension(2), patch_rows);
EXPECT_EQ(kernel.dimension(3), patch_cols);
for (int od = 0; od < output_depth; ++od) {
for (int id = 0; id < input_depth; ++id) {
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
float expected = 0.0f;
for (int b = 0; b < num_batches; ++b) {
for (int i = 0; i < input_rows; ++i) {
for (int j = 0; j < input_cols; ++j) {
int output_i = i - r;
int output_j = j - c;
if (output_i >= 0 && output_i < output_rows && output_j >= 0 &&
output_j < output_cols) {
expected += input(id, i, j, b) *
output_backward(od, output_i, output_j, b);
}
}
}
}
EigenApprox(kernel(od, id, r, c), expected);
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_simple_spatial_convolution_backward_kernel_valid_row_major) {
const int num_batches = 5;
const int input_depth = 2;
const int input_rows = 3;
const int input_cols = 4;
const int output_depth = 5;
const int patch_rows = 2;
const int patch_cols = 2;
const int output_rows = input_rows - patch_rows + 1;
const int output_cols = input_cols - patch_cols + 1;
Tensor<float, 4, RowMajor> input(num_batches, input_cols, input_rows,
input_depth);
Tensor<float, 4, RowMajor> kernel(patch_cols, patch_rows, input_depth,
output_depth);
Tensor<float, 4, RowMajor> output_backward(num_batches, output_cols,
output_rows, output_depth);
output_backward = output_backward.constant(11.0f) + output_backward.random();
input = input.constant(2.0f) + input.random();
kernel.setRandom();
kernel = SpatialConvolutionBackwardKernel(input, output_backward, patch_rows,
patch_cols, 1, 1);
EXPECT_EQ(kernel.dimension(0), patch_cols);
EXPECT_EQ(kernel.dimension(1), patch_rows);
EXPECT_EQ(kernel.dimension(2), input_depth);
EXPECT_EQ(kernel.dimension(3), output_depth);
for (int od = 0; od < output_depth; ++od) {
for (int id = 0; id < input_depth; ++id) {
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
float expected = 0.0f;
for (int b = 0; b < num_batches; ++b) {
for (int i = 0; i < input_rows; ++i) {
for (int j = 0; j < input_cols; ++j) {
int output_i = i - r;
int output_j = j - c;
if (output_i >= 0 && output_i < output_rows && output_j >= 0 &&
output_j < output_cols) {
expected += input(b, j, i, id) *
output_backward(b, output_j, output_i, od);
}
}
}
}
EigenApprox(kernel(c, r, id, od), expected);
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_batched_atrous_spatial_convolution_backward_input_valid) {
const int num_batches = 11;
const int patch_rows = 3;
const int patch_cols = 3;
const int input_depth = 2;
const int input_rows = 9;
const int input_cols = 13;
const int in_stride = 3;
const int patch_rows_eff = patch_rows + (patch_rows - 1) * (in_stride - 1);
const int patch_cols_eff = patch_cols + (patch_cols - 1) * (in_stride - 1);
const int output_depth = 5;
const int output_rows = input_rows - patch_rows_eff + 1;
const int output_cols = input_cols - patch_cols_eff + 1;
Tensor<float, 4> output_backward(output_depth, output_rows, output_cols,
num_batches);
output_backward.setRandom();
Tensor<float, 4> kernel(output_depth, input_depth, patch_rows, patch_cols);
kernel.setRandom();
const array<DenseIndex, 4> kernel_strides({1, 1, in_stride, in_stride});
const Tensor<float, 4> kernel_eff = kernel.inflate(kernel_strides);
const Tensor<float, 4> input_backward =
SpatialConvolutionBackwardInput(kernel, output_backward, input_rows,
input_cols, 1, 1, in_stride, in_stride);
const Tensor<float, 4> expected_input_backward =
SpatialConvolutionBackwardInput(kernel_eff, output_backward, input_rows,
input_cols);
EXPECT_EQ(input_backward.dimension(0), input_depth);
EXPECT_EQ(input_backward.dimension(1), input_rows);
EXPECT_EQ(input_backward.dimension(2), input_cols);
EXPECT_EQ(input_backward.dimension(3), num_batches);
eigen_assert(dimensions_match(input_backward.dimensions(),
expected_input_backward.dimensions()));
for (ptrdiff_t i = 0; i < input_backward.dimensions().TotalSize(); ++i) {
EigenApprox(input_backward.data()[i], expected_input_backward.data()[i]);
}
}
TEST(
EigenBackwardSpatialConvolutionsTest,
test_batched_atrous_spatial_convolution_backward_input_valid_unequal_strides) {
const int num_batches = 11;
const int patch_rows = 3;
const int patch_cols = 3;
const int input_depth = 2;
const int input_rows = 9;
const int input_cols = 13;
const int row_in_stride = 3;
const int col_in_stride = 1;
const int patch_rows_eff =
patch_rows + (patch_rows - 1) * (row_in_stride - 1);
const int patch_cols_eff =
patch_cols + (patch_cols - 1) * (col_in_stride - 1);
const int output_depth = 5;
const int output_rows = input_rows - patch_rows_eff + 1;
const int output_cols = input_cols - patch_cols_eff + 1;
Tensor<float, 4> output_backward(output_depth, output_rows, output_cols,
num_batches);
output_backward.setRandom();
Tensor<float, 4> kernel(output_depth, input_depth, patch_rows, patch_cols);
kernel.setRandom();
const array<DenseIndex, 4> kernel_strides(
{1, 1, row_in_stride, col_in_stride});
const Tensor<float, 4> kernel_eff = kernel.inflate(kernel_strides);
const Tensor<float, 4> input_backward = SpatialConvolutionBackwardInput(
kernel, output_backward, input_rows, input_cols, 1, 1, row_in_stride,
col_in_stride);
const Tensor<float, 4> expected_input_backward =
SpatialConvolutionBackwardInput(kernel_eff, output_backward, input_rows,
input_cols);
EXPECT_EQ(input_backward.dimension(0), input_depth);
EXPECT_EQ(input_backward.dimension(1), input_rows);
EXPECT_EQ(input_backward.dimension(2), input_cols);
EXPECT_EQ(input_backward.dimension(3), num_batches);
eigen_assert(dimensions_match(input_backward.dimensions(),
expected_input_backward.dimensions()));
for (ptrdiff_t i = 0; i < input_backward.dimensions().TotalSize(); ++i) {
EigenApprox(input_backward.data()[i], expected_input_backward.data()[i]);
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_batched_atrous_spatial_convolution_backward_input_valid_row_major) {
const int num_batches = 11;
const int patch_rows = 3;
const int patch_cols = 3;
const int input_depth = 2;
const int input_rows = 9;
const int input_cols = 13;
const int in_stride = 3;
const int patch_rows_eff = patch_rows + (patch_rows - 1) * (in_stride - 1);
const int patch_cols_eff = patch_cols + (patch_cols - 1) * (in_stride - 1);
const int output_depth = 5;
const int output_rows = input_rows - patch_rows_eff + 1;
const int output_cols = input_cols - patch_cols_eff + 1;
Tensor<float, 4, RowMajor> output_backward(num_batches, output_cols,
output_rows, output_depth);
output_backward.setRandom();
Tensor<float, 4, RowMajor> kernel(patch_cols, patch_rows, input_depth,
output_depth);
kernel.setRandom();
const array<DenseIndex, 4> kernel_strides({in_stride, in_stride, 1, 1});
const Tensor<float, 4, RowMajor> kernel_eff = kernel.inflate(kernel_strides);
const Tensor<float, 4, RowMajor> input_backward =
SpatialConvolutionBackwardInput(kernel, output_backward, input_rows,
input_cols, 1, 1, in_stride, in_stride);
const Tensor<float, 4, RowMajor> expected_input_backward =
SpatialConvolutionBackwardInput(kernel_eff, output_backward, input_rows,
input_cols);
EXPECT_EQ(input_backward.dimension(0), num_batches);
EXPECT_EQ(input_backward.dimension(1), input_cols);
EXPECT_EQ(input_backward.dimension(2), input_rows);
EXPECT_EQ(input_backward.dimension(3), input_depth);
eigen_assert(dimensions_match(input_backward.dimensions(),
expected_input_backward.dimensions()));
for (ptrdiff_t i = 0; i < input_backward.dimensions().TotalSize(); ++i) {
EigenApprox(input_backward.data()[i], expected_input_backward.data()[i]);
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_batched_atrous_spatial_convolution_backward_kernel_valid) {
const int num_batches = 11;
const int patch_rows = 3;
const int patch_cols = 3;
const int input_depth = 2;
const int input_rows = 9;
const int input_cols = 13;
const int in_stride = 3;
const int patch_rows_eff = patch_rows + (patch_rows - 1) * (in_stride - 1);
const int patch_cols_eff = patch_cols + (patch_cols - 1) * (in_stride - 1);
const int output_depth = 5;
const int output_rows = input_rows - patch_rows_eff + 1;
const int output_cols = input_cols - patch_cols_eff + 1;
Tensor<float, 4> output_backward(output_depth, output_rows, output_cols,
num_batches);
output_backward.setRandom();
Tensor<float, 4> input(input_depth, input_rows, input_cols, num_batches);
input.setRandom();
const array<DenseIndex, 4> kernel_strides({1, 1, in_stride, in_stride});
const Tensor<float, 4> kernel_backward =
SpatialConvolutionBackwardKernel(input, output_backward, patch_rows,
patch_cols, 1, 1, in_stride, in_stride);
const Tensor<float, 4> expected_kernel_backward =
SpatialConvolutionBackwardKernel(input, output_backward, patch_rows_eff,
patch_cols_eff)
.stride(kernel_strides);
EXPECT_EQ(kernel_backward.dimension(0), output_depth);
EXPECT_EQ(kernel_backward.dimension(1), input_depth);
EXPECT_EQ(kernel_backward.dimension(2), patch_rows);
EXPECT_EQ(kernel_backward.dimension(3), patch_cols);
eigen_assert(dimensions_match(kernel_backward.dimensions(),
expected_kernel_backward.dimensions()));
for (ptrdiff_t i = 0; i < kernel_backward.dimensions().TotalSize(); ++i) {
EigenApprox(kernel_backward.data()[i], expected_kernel_backward.data()[i]);
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_batched_atrous_spatial_convolution_backward_kernel_valid_row_major) {
const int num_batches = 11;
const int patch_rows = 3;
const int patch_cols = 3;
const int input_depth = 2;
const int input_rows = 9;
const int input_cols = 13;
const int in_stride = 3;
const int patch_rows_eff = patch_rows + (patch_rows - 1) * (in_stride - 1);
const int patch_cols_eff = patch_cols + (patch_cols - 1) * (in_stride - 1);
const int output_depth = 5;
const int output_rows = input_rows - patch_rows_eff + 1;
const int output_cols = input_cols - patch_cols_eff + 1;
Tensor<float, 4, RowMajor> output_backward(num_batches, output_cols,
output_rows, output_depth);
output_backward.setRandom();
Tensor<float, 4, RowMajor> input(num_batches, input_cols, input_rows,
input_depth);
input.setRandom();
const array<DenseIndex, 4> kernel_strides({in_stride, in_stride, 1, 1});
const Tensor<float, 4, RowMajor> expected_kernel_backward =
SpatialConvolutionBackwardKernel(input, output_backward, patch_rows_eff,
patch_cols_eff)
.stride(kernel_strides);
const Tensor<float, 4, RowMajor> kernel_backward =
SpatialConvolutionBackwardKernel(input, output_backward, patch_rows,
patch_cols, 1, 1, in_stride, in_stride);
EXPECT_EQ(kernel_backward.dimension(0), patch_cols);
EXPECT_EQ(kernel_backward.dimension(1), patch_rows);
EXPECT_EQ(kernel_backward.dimension(2), input_depth);
EXPECT_EQ(kernel_backward.dimension(3), output_depth);
eigen_assert(dimensions_match(kernel_backward.dimensions(),
expected_kernel_backward.dimensions()));
for (ptrdiff_t i = 0; i < kernel_backward.dimensions().TotalSize(); ++i) {
EigenApprox(kernel_backward.data()[i], expected_kernel_backward.data()[i]);
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_batched_spatial_convolution_backward_kernel_valid) {
const int num_batches = 13;
const int input_depth = 2;
const int input_rows = 7;
const int input_cols = 9;
const int output_depth = 3;
const int patch_rows = 5;
const int patch_cols = 5;
const int output_rows = input_rows - patch_rows + 1;
const int output_cols = input_cols - patch_cols + 1;
Tensor<float, 4> input(input_depth, input_rows, input_cols, num_batches);
Tensor<float, 4> kernel_backward(output_depth, input_depth, patch_rows,
patch_cols);
Tensor<float, 4> output_backward(output_depth, output_rows, output_cols,
num_batches);
output_backward = output_backward.constant(11.0f) + output_backward.random();
input = input.constant(2.0f) + input.random();
kernel_backward.setRandom();
kernel_backward = SpatialConvolutionBackwardKernel(
input, output_backward, patch_rows, patch_cols, 1, 1);
EXPECT_EQ(kernel_backward.dimension(0), output_depth);
EXPECT_EQ(kernel_backward.dimension(1), input_depth);
EXPECT_EQ(kernel_backward.dimension(2), patch_rows);
EXPECT_EQ(kernel_backward.dimension(3), patch_cols);
for (int od = 0; od < output_depth; ++od) {
for (int id = 0; id < input_depth; ++id) {
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
float expected = 0.0f;
for (int b = 0; b < num_batches; ++b) {
for (int i = 0; i < input_rows; ++i) {
for (int j = 0; j < input_cols; ++j) {
int output_i = i - r;
int output_j = j - c;
if (output_i >= 0 && output_i < output_rows && output_j >= 0 &&
output_j < output_cols) {
expected += input(id, i, j, b) *
output_backward(od, output_i, output_j, b);
}
}
}
}
EigenApprox(kernel_backward(od, id, r, c), expected);
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_batched_spatial_convolution_backward_kernel_valid_row_major) {
const int num_batches = 13;
const int input_depth = 2;
const int input_rows = 7;
const int input_cols = 9;
const int output_depth = 3;
const int patch_rows = 4;
const int patch_cols = 4;
const int output_rows = input_rows - patch_rows + 1;
const int output_cols = input_cols - patch_cols + 1;
Tensor<float, 4, RowMajor> input(num_batches, input_cols, input_rows,
input_depth);
Tensor<float, 4, RowMajor> kernel_backward(patch_cols, patch_rows,
input_depth, output_depth);
Tensor<float, 4, RowMajor> output_backward(num_batches, output_cols,
output_rows, output_depth);
output_backward = output_backward.constant(11.0f) + output_backward.random();
input = input.constant(2.0f) + input.random();
kernel_backward.setRandom();
kernel_backward = SpatialConvolutionBackwardKernel(
input, output_backward, patch_rows, patch_cols, 1, 1);
EXPECT_EQ(kernel_backward.dimension(0), patch_cols);
EXPECT_EQ(kernel_backward.dimension(1), patch_rows);
EXPECT_EQ(kernel_backward.dimension(2), input_depth);
EXPECT_EQ(kernel_backward.dimension(3), output_depth);
for (int od = 0; od < output_depth; ++od) {
for (int id = 0; id < input_depth; ++id) {
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
float expected = 0.0f;
for (int b = 0; b < num_batches; ++b) {
for (int i = 0; i < input_rows; ++i) {
for (int j = 0; j < input_cols; ++j) {
int output_i = i - r;
int output_j = j - c;
if (output_i >= 0 && output_i < output_rows && output_j >= 0 &&
output_j < output_cols) {
expected += input(b, j, i, id) *
output_backward(b, output_j, output_i, od);
}
}
}
}
EigenApprox(kernel_backward(c, r, id, od), expected);
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_batched_spatial_convolution_backward_kernel_valid_row_major_unequal) {
const int num_batches = 13;
const int input_depth = 2;
const int input_rows = 7;
const int input_cols = 9;
const int output_depth = 3;
const int patch_rows = 4;
const int patch_cols = 4;
const int r_stride = 2;
const int c_stride = 1;
const int output_rows =
(input_rows - patch_rows + 1 + r_stride - 1) / r_stride;
const int output_cols =
(input_cols - patch_cols + 1 + c_stride - 1) / c_stride;
Tensor<float, 4, RowMajor> input(num_batches, input_cols, input_rows,
input_depth);
Tensor<float, 4, RowMajor> kernel_backward(patch_cols, patch_rows,
input_depth, output_depth);
Tensor<float, 4, RowMajor> output_backward(num_batches, output_cols,
output_rows, output_depth);
output_backward = output_backward.constant(11.0f) + output_backward.random();
input = input.constant(2.0f) + input.random();
kernel_backward.setRandom();
kernel_backward = SpatialConvolutionBackwardKernel(
input, output_backward, patch_rows, patch_cols, r_stride, c_stride);
EXPECT_EQ(kernel_backward.dimension(0), patch_cols);
EXPECT_EQ(kernel_backward.dimension(1), patch_rows);
EXPECT_EQ(kernel_backward.dimension(2), input_depth);
EXPECT_EQ(kernel_backward.dimension(3), output_depth);
for (int od = 0; od < output_depth; ++od) {
for (int id = 0; id < input_depth; ++id) {
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
float expected = 0.0f;
for (int b = 0; b < num_batches; ++b) {
for (int i = 0; i < input_rows; ++i) {
for (int j = 0; j < input_cols; ++j) {
int output_i = i - r;
int output_j = j - c;
if (output_i >= 0 && output_i / r_stride < output_rows &&
output_i % r_stride == 0 && output_j >= 0 &&
output_j / c_stride < output_cols &&
output_j % c_stride == 0) {
expected += input(b, j, i, id) *
output_backward(b, output_j / c_stride,
output_i / r_stride, od);
}
}
}
}
EigenApprox(kernel_backward(c, r, id, od), expected);
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_batched_strided_spatial_convolution_backward_kernel_valid) {
const int num_batches = 13;
const int input_depth = 2;
const int input_rows = 7;
const int input_cols = 9;
const int output_depth = 3;
const int patch_rows = 5;
const int patch_cols = 5;
const int stride = 2;
const int output_rows = (input_rows - patch_rows + 1 + stride - 1) / stride;
const int output_cols = (input_cols - patch_cols + 1 + stride - 1) / stride;
Tensor<float, 4> input(input_depth, input_rows, input_cols, num_batches);
Tensor<float, 4> kernel_backward(output_depth, input_depth, patch_rows,
patch_cols);
Tensor<float, 4> output_backward(output_depth, output_rows, output_cols,
num_batches);
output_backward = output_backward.constant(11.0f) + output_backward.random();
input = input.constant(2.0f) + input.random();
kernel_backward.setRandom();
kernel_backward = SpatialConvolutionBackwardKernel(
input, output_backward, patch_rows, patch_cols, stride, stride);
EXPECT_EQ(kernel_backward.dimension(0), output_depth);
EXPECT_EQ(kernel_backward.dimension(1), input_depth);
EXPECT_EQ(kernel_backward.dimension(2), patch_rows);
EXPECT_EQ(kernel_backward.dimension(3), patch_cols);
for (int od = 0; od < output_depth; ++od) {
for (int id = 0; id < input_depth; ++id) {
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
float expected = 0.0f;
for (int b = 0; b < num_batches; ++b) {
for (int i = 0; i < input_rows; ++i) {
for (int j = 0; j < input_cols; ++j) {
int output_i = i - r;
int output_j = j - c;
if (output_i >= 0 && output_i / stride < output_rows &&
output_j >= 0 && output_j / stride < output_cols &&
output_i % stride == 0 && output_j % stride == 0) {
expected += input(id, i, j, b) *
output_backward(od, output_i / stride,
output_j / stride, b);
}
}
}
}
EigenApprox(kernel_backward(od, id, r, c), expected);
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_batched_strided_spatial_convolution_backward_kernel_valid_row_major) {
const int num_batches = 13;
const int input_depth = 2;
const int input_rows = 7;
const int input_cols = 9;
const int output_depth = 3;
const int patch_rows = 4;
const int patch_cols = 4;
const int stride = 2;
const int output_rows = (input_rows - patch_rows + 1 + stride - 1) / stride;
const int output_cols = (input_cols - patch_cols + 1 + stride - 1) / stride;
Tensor<float, 4, RowMajor> input(num_batches, input_cols, input_rows,
input_depth);
Tensor<float, 4, RowMajor> kernel_backward(patch_cols, patch_rows,
input_depth, output_depth);
Tensor<float, 4, RowMajor> output_backward(num_batches, output_cols,
output_rows, output_depth);
output_backward = output_backward.constant(11.0f) + output_backward.random();
input = input.constant(2.0f) + input.random();
kernel_backward.setRandom();
kernel_backward = SpatialConvolutionBackwardKernel(
input, output_backward, patch_rows, patch_cols, stride, stride);
EXPECT_EQ(kernel_backward.dimension(0), patch_cols);
EXPECT_EQ(kernel_backward.dimension(1), patch_rows);
EXPECT_EQ(kernel_backward.dimension(2), input_depth);
EXPECT_EQ(kernel_backward.dimension(3), output_depth);
for (int od = 0; od < output_depth; ++od) {
for (int id = 0; id < input_depth; ++id) {
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
float expected = 0.0f;
for (int b = 0; b < num_batches; ++b) {
for (int i = 0; i < input_rows; ++i) {
for (int j = 0; j < input_cols; ++j) {
int output_i = i - r;
int output_j = j - c;
if (output_i >= 0 && output_i / stride < output_rows &&
output_j >= 0 && output_j / stride < output_cols &&
output_i % stride == 0 && output_j % stride == 0) {
expected += input(b, j, i, id) *
output_backward(b, output_j / stride,
output_i / stride, od);
}
}
}
}
EigenApprox(kernel_backward(c, r, id, od), expected);
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/eigen_backward_spatial_convolutions.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/eigen_backward_spatial_convolutions_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9daa5f58-6a74-4c1f-a089-e61cfed04987 | cpp | tensorflow/tensorflow | eigen_attention | tensorflow/core/kernels/eigen_attention.h | tensorflow/core/kernels/eigen_attention_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_EIGEN_ATTENTION_H_
#define TENSORFLOW_CORE_KERNELS_EIGEN_ATTENTION_H_
#include "unsupported/Eigen/CXX11/Tensor"
namespace Eigen {
enum ExtractGlimpsesNoiseMode {
UNIFORM = 0,
GAUSSIAN = 1,
ZERO = 2,
};
namespace {
template <typename Index>
struct GlimpseExtractionOp {
GlimpseExtractionOp(const Index width, const Index height,
const std::vector<IndexPair<float> >& offsets,
const bool normalized, const bool centered,
const ExtractGlimpsesNoiseMode noise, const int version)
: width_(width),
height_(height),
offsets_(offsets),
normalized_(normalized),
centered_(centered),
noise_(noise),
version_(version) {}
template <typename Input>
DSizes<Index, 4> dimensions(const Input& input) const {
typedef typename internal::traits<Input>::Index IndexType;
typedef TensorRef<Tensor<typename internal::traits<Input>::Scalar, 4,
internal::traits<Input>::Layout, IndexType> >
Ref;
Ref in(input);
DSizes<Index, 4> dims = in.dimensions();
dims[0] = in.dimension(0);
dims[1] = width_;
dims[2] = height_;
dims[3] = in.dimension(3);
return dims;
}
template <typename Input, typename Output, typename Device>
EIGEN_DEVICE_FUNC void eval(const Input& input, Output& output,
const Device& device) const {
typedef typename internal::traits<Input>::Index IndexType;
typedef TensorRef<Tensor<typename internal::traits<Input>::Scalar, 4,
internal::traits<Input>::Layout, IndexType> >
Ref;
Ref in(input);
const Index num_channels = in.dimension(0);
const Index input_width = in.dimension(1);
const Index input_height = in.dimension(2);
const Index batch_size = in.dimension(3);
eigen_assert(input_width > 0);
eigen_assert(input_height > 0);
internal::NormalRandomGenerator<float> gen;
internal::UniformRandomGenerator<float> unigen;
for (Index i = 0; i < batch_size; ++i) {
float x = offsets_[i].first, y = offsets_[i].second;
if (version_ == 1) {
if (normalized_) {
x *= input_width;
y *= input_height;
}
if (centered_) {
x /= 2.0f;
y /= 2.0f;
x += input_width / 2.0f;
y += input_height / 2.0f;
}
x -= width_ / 2.0f;
y -= height_ / 2.0f;
} else {
if (normalized_) {
x *= input_width;
y *= input_height;
if (centered_) {
x /= 2.0f;
y /= 2.0f;
x += input_width / 2.0f;
y += input_height / 2.0f;
x -= width_ / 2.0f;
y -= height_ / 2.0f;
}
} else {
if (centered_) {
x += input_width / 2.0f;
y += input_height / 2.0f;
}
}
}
const Index offset_x = (Index)x;
const Index offset_y = (Index)y;
Index glimpse_width = width_;
Index glimpse_height = height_;
bool partial_overlap = false;
DSizes<Index, 3> slice_offset(0, offset_x, offset_y);
DSizes<Index, 3> slice_extent(num_channels, width_, height_);
DSizes<Index, 3> base_offset(0, 0, 0);
if (offset_x < 0) {
slice_offset[1] = 0;
glimpse_width = (std::max<Index>)(0, width_ + offset_x);
slice_extent[1] = glimpse_width;
base_offset[1] = width_ - glimpse_width;
partial_overlap = true;
} else if (offset_x + width_ >= input_width) {
glimpse_width = (std::max<Index>)(0, input_width - offset_x);
slice_extent[1] = glimpse_width;
partial_overlap = true;
}
if (offset_y < 0) {
slice_offset[2] = 0;
glimpse_height = (std::max<Index>)(0, height_ + offset_y);
slice_extent[2] = glimpse_height;
base_offset[2] = height_ - glimpse_height;
partial_overlap = true;
} else if (offset_y + height_ >= input_height) {
glimpse_height = (std::max<Index>)(0, input_height - offset_y);
slice_extent[2] = glimpse_height;
partial_overlap = true;
}
slice_extent[1] = std::min<Index>(input_width, slice_extent[1]);
slice_extent[2] = std::min<Index>(input_height, slice_extent[2]);
if (partial_overlap) {
switch (noise_) {
case ZERO: {
output.template chip<3>(i).device(device) =
output.template chip<3>(i).constant(0);
} break;
case UNIFORM: {
typedef std::remove_const_t<
typename internal::traits<Input>::Scalar>
Scalar;
TensorFixedSize<Scalar, Sizes<> > mini;
mini.device(device) = input.template chip<3>(i).minimum();
TensorFixedSize<float, Sizes<> > range;
range.device(device) = (input.template chip<3>(i).maximum() - mini)
.template cast<float>();
DSizes<Index, 3> glimpse_size(num_channels, width_, height_);
TensorMap<Tensor<float, 3> > tmp(nullptr, glimpse_size);
output.template chip<3>(i).device(device) =
mini.reshape(Sizes<1, 1, 1>()).broadcast(glimpse_size) +
(tmp.random(unigen) *
range.reshape(Sizes<1, 1, 1>()).broadcast(glimpse_size))
.template cast<Scalar>();
} break;
case GAUSSIAN: {
DSizes<Index, 2> glimpse_size(width_, height_);
DSizes<Index, 2> input_size(input_width, input_height);
typedef std::remove_const_t<
typename internal::traits<Input>::Scalar>
Scalar;
for (int j = 0; j < num_channels; ++j) {
TensorFixedSize<Scalar, Sizes<> > mean;
mean.device(device) = input.template chip<3>(i)
.template chip<0>(j)
.template cast<float>()
.mean();
TensorFixedSize<float, Sizes<> > sigma;
sigma.device(device) =
(input.template chip<3>(i)
.template chip<0>(j)
.template cast<float>() -
mean.reshape(Sizes<1, 1>()).broadcast(input_size))
.square()
.mean()
.sqrt();
TensorFixedSize<Scalar, Sizes<> > mini;
mini.device(device) =
input.template chip<3>(i).template chip<0>(j).minimum();
TensorFixedSize<float, Sizes<> > maxi;
maxi.device(device) =
input.template chip<3>(i).template chip<0>(j).maximum();
TensorMap<Tensor<float, 2> > tmp(nullptr, glimpse_size);
output.template chip<3>(i).template chip<0>(j).device(device) =
(mean.reshape(Sizes<1, 1>()).broadcast(glimpse_size) +
(tmp.random(gen) *
sigma.reshape(Sizes<1, 1>()).broadcast(glimpse_size))
.template cast<Scalar>())
.cwiseMin(
maxi.reshape(Sizes<1, 1>()).broadcast(glimpse_size))
.cwiseMax(
mini.reshape(Sizes<1, 1>()).broadcast(glimpse_size));
}
} break;
}
if (glimpse_width == 0 || glimpse_height == 0) {
continue;
}
output.template chip<3>(i)
.slice(base_offset, slice_extent)
.device(device) =
input.template chip<3>(i).slice(slice_offset, slice_extent);
} else {
output.template chip<3>(i).device(device) =
input.template chip<3>(i).slice(slice_offset, slice_extent);
}
}
}
private:
const Index width_;
const Index height_;
const std::vector<IndexPair<float> > offsets_;
const bool normalized_;
const bool centered_;
const ExtractGlimpsesNoiseMode noise_;
const int version_;
};
}
template <typename Input>
EIGEN_ALWAYS_INLINE static const TensorCustomUnaryOp<
const GlimpseExtractionOp<typename internal::traits<Input>::Index>,
const Input>
ExtractGlimpses(
const Input& input, const typename internal::traits<Input>::Index width,
const typename internal::traits<Input>::Index height,
const std::vector<IndexPair<float> >& offsets, const bool normalized = true,
const bool centered = true,
const ExtractGlimpsesNoiseMode noise = ExtractGlimpsesNoiseMode::UNIFORM,
const int version = 2) {
EIGEN_STATIC_ASSERT(internal::traits<Input>::Layout == ColMajor,
YOU_MADE_A_PROGRAMMING_MISTAKE);
EIGEN_STATIC_ASSERT(internal::traits<Input>::NumDimensions == 4,
YOU_MADE_A_PROGRAMMING_MISTAKE);
typedef typename internal::traits<Input>::Index Index;
const GlimpseExtractionOp<Index> op(width, height, offsets, normalized,
centered, noise, version);
return input.customOp(op);
}
}
#endif | #include "tensorflow/core/kernels/eigen_attention.h"
#include "tensorflow/core/platform/test.h"
namespace Eigen {
namespace {
void EigenApprox(float a, float b) {
ASSERT_TRUE(std::abs(a - b) <= std::min(std::abs(a), std::abs(b)) * 1e-3);
}
}
TEST(EigenAttentionTest, Simple) {
const ptrdiff_t depth = 3;
const ptrdiff_t batch = 10;
const ptrdiff_t rows = 32;
const ptrdiff_t cols = 48;
const ptrdiff_t glimpse_rows = 8;
const ptrdiff_t glimpse_cols = 6;
Tensor<float, 4> input(depth, rows, cols, batch);
input.setRandom();
std::vector<IndexPair<float>> offsets;
offsets.resize(batch);
for (int i = 0; i < batch; ++i) {
offsets[i].first = (-5 + i) / 10.0f;
offsets[i].second = (5 - i) / 10.0f;
}
Tensor<float, 4> result(depth, glimpse_rows, glimpse_cols, batch);
result = ExtractGlimpses(input, glimpse_rows, glimpse_cols, offsets);
for (int b = 0; b < batch; ++b) {
for (int c = 0; c < glimpse_cols; ++c) {
ptrdiff_t source_c =
c + ((1.0f + offsets[b].second) * cols - glimpse_cols) / 2;
for (int r = 0; r < glimpse_rows; ++r) {
ptrdiff_t source_r =
r + ((1.0f + offsets[b].first) * rows - glimpse_rows) / 2;
for (int d = 0; d < depth; ++d) {
EigenApprox(result(d, r, c, b), input(d, source_r, source_c, b));
}
}
}
}
}
TEST(EigenAttentionTest, OutOfBoundsGlimpse) {
const ptrdiff_t depth = 3;
const ptrdiff_t batch = 10;
const ptrdiff_t rows = 32;
const ptrdiff_t cols = 48;
const ptrdiff_t glimpse_rows = 8;
const ptrdiff_t glimpse_cols = 6;
Tensor<float, 4> input(depth, rows, cols, batch);
input.setRandom();
std::vector<IndexPair<float>> offsets;
offsets.resize(batch);
for (int i = 0; i < batch; ++i) {
offsets[i].first = (-5 + i) / 2.0f;
offsets[i].second = (5 - i) / 2.0f;
}
Tensor<float, 4> result(depth, glimpse_rows, glimpse_cols, batch);
result = ExtractGlimpses(input, glimpse_rows, glimpse_cols, offsets);
for (int b = 0; b < batch; ++b) {
for (int c = 0; c < glimpse_cols; ++c) {
ptrdiff_t source_c =
c + ((1.0f + offsets[b].second) * cols - glimpse_cols) / 2;
if (source_c < glimpse_cols / 2 || source_c >= cols - glimpse_cols / 2) {
continue;
}
for (int r = 0; r < glimpse_rows; ++r) {
ptrdiff_t source_r =
r + ((1.0f + offsets[b].first) * rows - glimpse_rows) / 2;
if (source_r < glimpse_rows / 2 ||
source_r >= rows - glimpse_rows / 2) {
continue;
}
for (int d = 0; d < depth; ++d) {
EigenApprox(result(d, r, c, b), input(d, source_r, source_c, b));
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/eigen_attention.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/eigen_attention_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
65e114f5-b5fd-4148-9e57-cab65cb9be29 | cpp | tensorflow/tensorflow | eigen_activations | tensorflow/core/kernels/eigen_activations.h | tensorflow/core/kernels/eigen_activations_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_EIGEN_ACTIVATIONS_H_
#define TENSORFLOW_CORE_KERNELS_EIGEN_ACTIVATIONS_H_
#include "unsupported/Eigen/CXX11/Tensor"
namespace Eigen {
template <typename T>
struct scalar_sigmoid_fast_derivative_op {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& y) const {
const T one = T(1);
return (one - y) * y;
}
template <typename Packet>
inline Packet packetOp(const Packet& y) const {
const Packet one = internal::pset1<Packet>(1);
return internal::pmul(internal::psub(one, y), y);
}
};
namespace internal {
template <typename T>
struct functor_traits<scalar_sigmoid_fast_derivative_op<T> > {
enum {
Cost = NumTraits<T>::AddCost * 2 + NumTraits<T>::MulCost,
PacketAccess = packet_traits<T>::HasAdd && packet_traits<T>::HasMul &&
packet_traits<T>::HasNegate
};
};
}
template <typename T>
struct scalar_tanh_fast_derivative_op {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& y) const {
const T one = T(1);
return one - (y * y);
}
template <typename Packet>
inline Packet packetOp(const Packet& y) const {
const Packet one = internal::pset1<Packet>(1);
return internal::psub(one, internal::pmul(y, y));
}
};
namespace internal {
template <typename T>
struct functor_traits<scalar_tanh_fast_derivative_op<T> > {
enum {
Cost = NumTraits<T>::AddCost * 2 + NumTraits<T>::MulCost * 1,
PacketAccess = packet_traits<T>::HasAdd && packet_traits<T>::HasMul &&
packet_traits<T>::HasNegate
};
};
}
template <typename Scalar>
struct scalar_clip_op {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar
operator()(const Scalar& a, const Scalar& b) const {
return numext::mini(numext::maxi(a, -b), b);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet
packetOp(const Packet& a, const Packet& b) const {
return internal::pmin(internal::pmax(a, internal::pnegate(b)), b);
}
};
namespace internal {
template <typename Scalar>
struct functor_traits<scalar_clip_op<Scalar> > {
enum {
Cost = NumTraits<Scalar>::AddCost * 3,
PacketAccess = packet_traits<Scalar>::HasMax &&
packet_traits<Scalar>::HasMin &&
packet_traits<Scalar>::HasNegate
};
};
}
}
#endif | #include "tensorflow/core/kernels/eigen_activations.h"
#include "tensorflow/core/platform/test.h"
namespace Eigen {
namespace {
void EigenApprox(float a, float b) {
ASSERT_TRUE(std::abs(a - b) <= std::min(std::abs(a), std::abs(b)) * 1e-3);
}
}
TEST(EigenBackwardSpatialConvolutionsTest, SigmoidFastDerivative) {
const ptrdiff_t depth = 3;
const ptrdiff_t batch = 10;
const ptrdiff_t rows = 32;
const ptrdiff_t cols = 48;
Tensor<float, 4> input(depth, rows, cols, batch);
input.setRandom();
Tensor<float, 4> result(depth, rows, cols, batch);
result = input.unaryExpr(scalar_sigmoid_fast_derivative_op<float>());
for (int b = 0; b < batch; ++b) {
for (int c = 0; c < cols; ++c) {
for (int r = 0; r < rows; ++r) {
for (int d = 0; d < depth; ++d) {
float val = input(d, r, c, b);
EigenApprox(result(d, r, c, b), (1 - val) * val);
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest, TanhFastDerivative) {
const ptrdiff_t depth = 3;
const ptrdiff_t batch = 10;
const ptrdiff_t rows = 32;
const ptrdiff_t cols = 48;
Tensor<float, 4> input(depth, rows, cols, batch);
input.setRandom();
Tensor<float, 4> result(depth, rows, cols, batch);
result = input.unaryExpr(scalar_tanh_fast_derivative_op<float>());
for (int b = 0; b < batch; ++b) {
for (int c = 0; c < cols; ++c) {
for (int r = 0; r < rows; ++r) {
for (int d = 0; d < depth; ++d) {
float val = input(d, r, c, b);
EigenApprox(result(d, r, c, b), 1 - (val * val));
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest, Clip) {
const ptrdiff_t depth = 3;
const ptrdiff_t batch = 10;
const ptrdiff_t rows = 32;
const ptrdiff_t cols = 48;
Tensor<float, 4> input(depth, rows, cols, batch);
input.setRandom();
Tensor<float, 4> result(depth, rows, cols, batch);
result = input.binaryExpr(input.constant(0.01), scalar_clip_op<float>());
for (int b = 0; b < batch; ++b) {
for (int c = 0; c < cols; ++c) {
for (int r = 0; r < rows; ++r) {
for (int d = 0; d < depth; ++d) {
float val = input(d, r, c, b);
EigenApprox(result(d, r, c, b),
(std::min)((std::max)(val, -0.01f), 0.01f));
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/eigen_activations.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/eigen_activations_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
550b570a-3d3c-4266-988c-aca50630b989 | cpp | tensorflow/tensorflow | eigen_pooling | tensorflow/core/kernels/eigen_pooling.h | tensorflow/core/kernels/eigen_pooling_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_EIGEN_POOLING_H_
#define TENSORFLOW_CORE_KERNELS_EIGEN_POOLING_H_
#include "unsupported/Eigen/CXX11/Tensor"
namespace Eigen {
template <typename Input>
EIGEN_ALWAYS_INLINE static const TensorReshapingOp<
const Eigen::DSizes<typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const TensorReductionOp<
internal::MaxReducer<
std::remove_const_t<typename internal::traits<Input>::Scalar>>,
std::conditional_t<
internal::traits<Input>::Layout == ColMajor,
const Eigen::IndexList<Eigen::type2index<1>, Eigen::type2index<2>>,
const Eigen::IndexList<Eigen::type2index<2>, Eigen::type2index<3>>>,
const TensorImagePatchOp<Dynamic, Dynamic, const Input>>>
SpatialMaxPooling(const Input& input, DenseIndex patchRows,
DenseIndex patchCols, DenseIndex strideRows,
DenseIndex strideCols, const PaddingType padding_type,
DenseIndex in_strideRows = 1, DenseIndex in_strideCols = 1) {
EIGEN_STATIC_ASSERT(internal::traits<Input>::NumDimensions == 4,
YOU_MADE_A_PROGRAMMING_MISTAKE);
typedef typename internal::traits<Input>::Index TensorIndex;
TensorRef<Tensor<typename internal::traits<Input>::Scalar,
internal::traits<Input>::NumDimensions,
internal::traits<Input>::Layout, TensorIndex> >
in(input);
const DenseIndex patchRowsEff =
patchRows + (patchRows - 1) * (in_strideRows - 1);
const DenseIndex patchColsEff =
patchCols + (patchCols - 1) * (in_strideCols - 1);
static const bool isColMajor = (internal::traits<Input>::Layout == ColMajor);
static const int idxRows = isColMajor ? 1 : 2;
static const int idxCols = isColMajor ? 2 : 1;
Eigen::DSizes<TensorIndex, internal::traits<Input>::NumDimensions>
post_reduce_dims;
post_reduce_dims[0] = in.dimension(0);
if (padding_type == PADDING_VALID) {
post_reduce_dims[idxRows] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxRows)) - patchRowsEff + 1,
strideRows);
post_reduce_dims[idxCols] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxCols)) - patchColsEff + 1,
strideCols);
} else {
post_reduce_dims[idxRows] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxRows)), strideRows);
post_reduce_dims[idxCols] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxCols)), strideCols);
}
post_reduce_dims[3] = in.dimension(3);
std::conditional_t<
internal::traits<Input>::Layout == ColMajor,
const Eigen::IndexList<Eigen::type2index<1>, Eigen::type2index<2>>,
const Eigen::IndexList<Eigen::type2index<2>, Eigen::type2index<3>>>
reduction_dims;
return input
.extract_image_patches(
patchRows, patchCols, strideRows, strideCols, in_strideRows,
in_strideCols, padding_type,
Eigen::NumTraits<std::remove_const_t<
typename internal::traits<Input>::Scalar>>::lowest())
.maximum(reduction_dims)
.reshape(post_reduce_dims);
}
template <typename Input>
EIGEN_ALWAYS_INLINE static const TensorReshapingOp<
const Eigen::DSizes<DenseIndex, internal::traits<Input>::NumDimensions>,
const TensorReductionOp<
internal::MaxReducer<
std::remove_const_t<typename internal::traits<Input>::Scalar>>,
const Eigen::IndexList<Eigen::type2index<1>>,
const TensorReshapingOp<
const Eigen::DSizes<DenseIndex, 3>,
const TensorVolumePatchOp<Dynamic, Dynamic, Dynamic, const Input>>>>
CuboidMaxPooling(const Input& input, DenseIndex patchPlanes,
DenseIndex patchRows, DenseIndex patchCols,
DenseIndex stridePlanes, DenseIndex strideRows,
DenseIndex strideCols, const PaddingType padding_type) {
EIGEN_STATIC_ASSERT(internal::traits<Input>::NumDimensions == 5,
YOU_MADE_A_PROGRAMMING_MISTAKE);
static const bool isColMajor = (internal::traits<Input>::Layout == ColMajor);
typedef typename internal::traits<Input>::Index TensorIndex;
TensorRef<Tensor<typename internal::traits<Input>::Scalar,
internal::traits<Input>::NumDimensions,
internal::traits<Input>::Layout, TensorIndex> >
in(input);
static const int idxPlanes = isColMajor ? 1 : 3;
static const int idxRows = 2;
static const int idxCols = isColMajor ? 3 : 1;
Eigen::DSizes<DenseIndex, internal::traits<Input>::NumDimensions>
post_reduce_dims;
post_reduce_dims[0] = in.dimension(0);
if (padding_type == PADDING_VALID) {
post_reduce_dims[idxPlanes] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxPlanes)) - patchPlanes + 1,
stridePlanes);
post_reduce_dims[idxRows] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxRows)) - patchRows + 1,
strideRows);
post_reduce_dims[idxCols] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxCols)) - patchCols + 1,
strideCols);
} else {
post_reduce_dims[idxPlanes] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxPlanes)), stridePlanes);
post_reduce_dims[idxRows] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxRows)), strideRows);
post_reduce_dims[idxCols] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxCols)), strideCols);
}
post_reduce_dims[4] = in.dimension(4);
Eigen::DSizes<DenseIndex, 3> pre_reduce_dims;
pre_reduce_dims[1] = patchRows * patchCols * patchPlanes;
if (isColMajor) {
pre_reduce_dims[0] = post_reduce_dims[0];
pre_reduce_dims[2] = post_reduce_dims[1] * post_reduce_dims[2] *
post_reduce_dims[3] * post_reduce_dims[4];
} else {
pre_reduce_dims[0] = post_reduce_dims[0] * post_reduce_dims[1] *
post_reduce_dims[2] * post_reduce_dims[3];
pre_reduce_dims[2] = post_reduce_dims[4];
}
typedef std::remove_const_t<typename internal::traits<Input>::Scalar>
CoeffReturnType;
Eigen::IndexList<Eigen::type2index<1> > reduction_dims;
return input
.extract_volume_patches(patchPlanes, patchRows, patchCols, stridePlanes,
strideRows, strideCols, padding_type,
-Eigen::NumTraits<CoeffReturnType>::highest())
.reshape(pre_reduce_dims)
.maximum(reduction_dims)
.reshape(post_reduce_dims);
}
namespace internal {
template <typename T>
struct AvgPoolMeanReducer {
#if (EIGEN_ARCH_i386 || EIGEN_ARCH_x86_64) && !defined(__CUDACC__) && \
!defined(__HIPCC__)
static constexpr bool PacketAccess = internal::is_same<T, float>::value;
#else
static const bool PacketAccess = false;
#endif
static constexpr bool IsStateful = true;
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE AvgPoolMeanReducer() : scalarCount_(0) {
typedef typename packet_traits<T>::type Packet;
#if defined(__HIPCC__)
packetCount_ = 0;
#else
packetCount_ = pset1<Packet>(T(0.0));
#endif
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) {
if (t != -Eigen::NumTraits<T>::highest()) {
(*accum) = (*accum) + t;
scalarCount_++;
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const {
return static_cast<T>(0);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const {
eigen_assert(scalarCount_ > 0);
return accum / T(scalarCount_);
}
#if (EIGEN_ARCH_i386 || EIGEN_ARCH_x86_64) && !defined(__CUDACC__) && \
!defined(__HIPCC__)
#ifdef EIGEN_VECTORIZE_AVX512
#define pequal(a, b) \
_mm512_castsi512_ps( \
_mm512_maskz_set1_epi32(_mm512_cmp_ps_mask(a, b, _CMP_EQ_UQ), -1))
#define psel(a, b, false_mask) \
_mm512_castsi512_ps(_mm512_ternarylogic_epi32( \
_mm512_castps_si512(a), _mm512_castps_si512(b), \
_mm512_castps_si512(false_mask), 0xd8))
#elif defined EIGEN_VECTORIZE_AVX
#define pequal(a, b) _mm256_cmp_ps(a, b, _CMP_EQ_UQ)
#define psel(a, b, false_mask) _mm256_blendv_ps(a, b, false_mask)
#else
#define pequal(a, b) _mm_cmpeq_ps(a, b)
#define psel(a, b, false_mask) \
_mm_or_ps(_mm_andnot_ps(false_mask, a), _mm_and_ps(false_mask, b))
#endif
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p,
Packet* accum) {
reducePacketWithType(static_cast<T>(0), p, accum);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacketWithType(
T, const Packet& p, Packet* accum) {
Packet skip_mask =
pequal(p, pset1<Packet>(-Eigen::NumTraits<T>::highest()));
(*accum) = padd<Packet>(*accum, psel(p, pset1<Packet>(0), skip_mask));
packetCount_ = padd<Packet>(
packetCount_, psel(pset1<Packet>(1), pset1<Packet>(0), skip_mask));
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const {
return pset1<Packet>(0);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet
finalizePacket(const Packet& vaccum) const {
return pdiv(vaccum, packetCount_);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T
finalizeBoth(const T saccum, const Packet& vaccum) const {
return (saccum + predux(vaccum)) / (scalarCount_ + predux(packetCount_));
}
#endif
protected:
typedef typename packet_traits<T>::type Packet;
int scalarCount_;
#if defined(__HIPCC__)
int packetCount_;
#else
Packet packetCount_;
#endif
};
template <typename Device>
struct reducer_traits<AvgPoolMeanReducer<float>, Device> {
enum {
Cost = 1,
#if (EIGEN_ARCH_i386 || EIGEN_ARCH_x86_64) && !defined(__CUDACC__) && \
!defined(__HIPCC__)
PacketAccess = true,
#else
PacketAccess = false,
#endif
IsStateful = true,
IsExactlyAssociative = false
};
};
template <>
struct reducer_traits<AvgPoolMeanReducer<float>, GpuDevice> {
enum {
Cost = 1,
PacketAccess = false,
IsStateful = true,
IsExactlyAssociative = false
};
};
}
template <typename Input>
EIGEN_ALWAYS_INLINE static const TensorReshapingOp<
const Eigen::DSizes<typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const TensorReductionOp<
internal::AvgPoolMeanReducer<
std::remove_const_t<typename internal::traits<Input>::Scalar>>,
std::conditional_t<
internal::traits<Input>::Layout == ColMajor,
const Eigen::IndexList<Eigen::type2index<1>, Eigen::type2index<2>>,
const Eigen::IndexList<Eigen::type2index<2>, Eigen::type2index<3>>>,
const TensorImagePatchOp<Dynamic, Dynamic, const Input>>>
SpatialAvgPooling(const Input& input, DenseIndex patchRows,
DenseIndex patchCols, DenseIndex strideRows,
DenseIndex strideCols, const PaddingType padding_type,
DenseIndex in_strideRows = 1, DenseIndex in_strideCols = 1) {
EIGEN_STATIC_ASSERT(internal::traits<Input>::NumDimensions == 4,
YOU_MADE_A_PROGRAMMING_MISTAKE);
typedef typename internal::traits<Input>::Index TensorIndex;
TensorRef<Tensor<typename internal::traits<Input>::Scalar,
internal::traits<Input>::NumDimensions,
internal::traits<Input>::Layout, TensorIndex> >
in(input);
const DenseIndex patchRowsEff =
patchRows + (patchRows - 1) * (in_strideRows - 1);
const DenseIndex patchColsEff =
patchCols + (patchCols - 1) * (in_strideCols - 1);
static const bool isColMajor = (internal::traits<Input>::Layout == ColMajor);
static const int idxRows = isColMajor ? 1 : 2;
static const int idxCols = isColMajor ? 2 : 1;
Eigen::DSizes<TensorIndex, internal::traits<Input>::NumDimensions>
post_reduce_dims;
post_reduce_dims[0] = in.dimension(0);
if (padding_type == PADDING_VALID) {
post_reduce_dims[idxRows] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxRows)) - patchRowsEff + 1,
strideRows);
post_reduce_dims[idxCols] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxCols)) - patchColsEff + 1,
strideCols);
} else {
post_reduce_dims[idxRows] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxRows)), strideRows);
post_reduce_dims[idxCols] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxCols)), strideCols);
}
post_reduce_dims[3] = in.dimension(3);
typedef std::remove_const_t<typename internal::traits<Input>::Scalar>
CoeffReturnType;
internal::AvgPoolMeanReducer<CoeffReturnType> mean_with_nan;
std::conditional_t<
internal::traits<Input>::Layout == ColMajor,
const Eigen::IndexList<Eigen::type2index<1>, Eigen::type2index<2>>,
const Eigen::IndexList<Eigen::type2index<2>, Eigen::type2index<3>>>
reduction_dims;
return input
.extract_image_patches(patchRows, patchCols, strideRows, strideCols,
in_strideRows, in_strideCols, padding_type,
-Eigen::NumTraits<CoeffReturnType>::highest())
.reduce(reduction_dims, mean_with_nan)
.reshape(post_reduce_dims);
}
template <typename Input>
EIGEN_ALWAYS_INLINE static const TensorReshapingOp<
const Eigen::DSizes<DenseIndex, internal::traits<Input>::NumDimensions>,
const TensorReductionOp<
internal::AvgPoolMeanReducer<
std::remove_const_t<typename internal::traits<Input>::Scalar>>,
const Eigen::IndexList<Eigen::type2index<1>>,
const TensorReshapingOp<
const Eigen::DSizes<DenseIndex, 3>,
const TensorVolumePatchOp<Dynamic, Dynamic, Dynamic, const Input>>>>
CuboidAvgPooling(const Input& input, DenseIndex patchPlanes,
DenseIndex patchRows, DenseIndex patchCols,
DenseIndex stridePlanes, DenseIndex strideRows,
DenseIndex strideCols, const PaddingType padding_type) {
EIGEN_STATIC_ASSERT(internal::traits<Input>::NumDimensions == 5,
YOU_MADE_A_PROGRAMMING_MISTAKE);
static const bool isColMajor = (internal::traits<Input>::Layout == ColMajor);
typedef typename internal::traits<Input>::Index TensorIndex;
TensorRef<Tensor<typename internal::traits<Input>::Scalar,
internal::traits<Input>::NumDimensions,
internal::traits<Input>::Layout, TensorIndex> >
in(input);
static const int idxPlanes = isColMajor ? 1 : 3;
static const int idxRows = 2;
static const int idxCols = isColMajor ? 3 : 1;
Eigen::DSizes<DenseIndex, internal::traits<Input>::NumDimensions>
post_reduce_dims;
post_reduce_dims[0] = in.dimension(0);
if (padding_type == PADDING_VALID) {
post_reduce_dims[idxPlanes] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxPlanes)) - patchPlanes + 1,
stridePlanes);
post_reduce_dims[idxRows] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxRows)) - patchRows + 1,
strideRows);
post_reduce_dims[idxCols] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxCols)) - patchCols + 1,
strideCols);
} else {
post_reduce_dims[idxPlanes] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxPlanes)), stridePlanes);
post_reduce_dims[idxRows] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxRows)), strideRows);
post_reduce_dims[idxCols] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxCols)), strideCols);
}
post_reduce_dims[4] = in.dimension(4);
Eigen::DSizes<DenseIndex, 3> pre_reduce_dims;
pre_reduce_dims[1] = patchRows * patchCols * patchPlanes;
if (isColMajor) {
pre_reduce_dims[0] = post_reduce_dims[0];
pre_reduce_dims[2] = post_reduce_dims[1] * post_reduce_dims[2] *
post_reduce_dims[3] * post_reduce_dims[4];
} else {
pre_reduce_dims[0] = post_reduce_dims[0] * post_reduce_dims[1] *
post_reduce_dims[2] * post_reduce_dims[3];
pre_reduce_dims[2] = post_reduce_dims[4];
}
typedef std::remove_const_t<typename internal::traits<Input>::Scalar>
CoeffReturnType;
internal::AvgPoolMeanReducer<CoeffReturnType> mean_with_nan;
Eigen::IndexList<Eigen::type2index<1> > reduction_dims;
return input
.extract_volume_patches(patchPlanes, patchRows, patchCols, stridePlanes,
strideRows, strideCols, padding_type,
-Eigen::NumTraits<CoeffReturnType>::highest())
.reshape(pre_reduce_dims)
.reduce(reduction_dims, mean_with_nan)
.reshape(post_reduce_dims);
}
}
#endif | #include "tensorflow/core/kernels/eigen_pooling.h"
#include "tensorflow/core/platform/test.h"
namespace Eigen {
namespace {
void EigenApprox(float a, float b) {
ASSERT_TRUE(std::abs(a - b) <= std::min(std::abs(a), std::abs(b)) * 1e-3);
}
}
TEST(EigenPoolingTest, Simple) {
const int depth = 10;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int patch_rows = 4;
const int patch_cols = 4;
const int output_rows = 2;
const int output_cols = 2;
Tensor<float, 4> input(depth, input_rows, input_cols, num_batches);
Tensor<float, 4> result(depth, output_rows, output_cols, num_batches);
input = input.constant(11.0f) + input.random();
result.setRandom();
result = result.constant(-1000.f);
const int stride = 1;
result = SpatialMaxPooling(input, patch_rows, patch_cols, stride, stride,
PADDING_VALID);
EXPECT_EQ(result.dimension(0), depth);
EXPECT_EQ(result.dimension(1), output_rows);
EXPECT_EQ(result.dimension(2), output_cols);
EXPECT_EQ(result.dimension(3), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int d = 0; d < depth; ++d) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = -10000.f;
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
expected = (std::max)(expected, input(d, r + i, c + j, b));
}
}
if (result(d, i, j, b) != expected) {
std::cout << "at d=" << d << " b=" << b << " i=" << i << " j=" << j
<< " " << result(d, i, j, b) << " vs " << expected
<< std::endl;
}
EigenApprox(result(d, i, j, b), expected);
}
}
}
}
}
TEST(EigenPoolingTest, SimpleRowMajor) {
const int depth = 10;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int patch_rows = 4;
const int patch_cols = 4;
const int output_rows = 2;
const int output_cols = 2;
Tensor<float, 4, RowMajor> input(num_batches, input_cols, input_rows, depth);
Tensor<float, 4, RowMajor> result(num_batches, output_cols, output_rows,
depth);
input = input.constant(11.0f) + input.random();
result.setRandom();
result = result.constant(-1000.f);
const int stride = 1;
result = SpatialMaxPooling(input, patch_rows, patch_cols, stride, stride,
PADDING_VALID);
EXPECT_EQ(result.dimension(3), depth);
EXPECT_EQ(result.dimension(2), output_rows);
EXPECT_EQ(result.dimension(1), output_cols);
EXPECT_EQ(result.dimension(0), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int d = 0; d < depth; ++d) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = -10000.f;
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
expected = (std::max)(expected, input(b, c + j, r + i, d));
}
}
if (result(b, j, i, d) != expected) {
std::cout << "at d=" << d << " b=" << b << " i=" << i << " j=" << j
<< " " << result(b, j, i, d) << " vs " << expected
<< std::endl;
}
EigenApprox(result(b, j, i, d), expected);
}
}
}
}
}
TEST(EigenPoolingTest, Cuboid) {
const int channels = 10;
const int input_planes = 5;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int patch_rows = 4;
const int patch_cols = 3;
const int patch_planes = 2;
const int output_rows = 2;
const int output_cols = 3;
const int output_planes = 4;
Tensor<float, 5> input(channels, input_planes, input_rows, input_cols,
num_batches);
Tensor<float, 5> result(channels, output_planes, output_rows, output_cols,
num_batches);
input = input.constant(11.0f) + input.random();
result.setRandom();
result = result.constant(-1000.0f);
const int stride = 1;
result = CuboidMaxPooling(input, patch_planes, patch_rows, patch_cols, stride,
stride, stride, PADDING_VALID);
EXPECT_EQ(result.dimension(0), channels);
EXPECT_EQ(result.dimension(1), output_planes);
EXPECT_EQ(result.dimension(2), output_rows);
EXPECT_EQ(result.dimension(3), output_cols);
EXPECT_EQ(result.dimension(4), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int d = 0; d < channels; ++d) {
for (int i = 0; i < output_planes; ++i) {
for (int j = 0; j < output_rows; ++j) {
for (int k = 0; k < output_cols; ++k) {
float expected = -10000.f;
for (int p = 0; p < patch_planes; ++p) {
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
expected =
(std::max)(expected, input(d, p + i, r + j, c + k, b));
}
}
}
if (result(d, i, j, k, b) != expected) {
std::cout << "at d=" << d << " b=" << b << " i=" << i
<< " j=" << j << " k=" << k << " "
<< result(d, i, j, k, b) << " vs " << expected
<< std::endl;
}
EigenApprox(result(d, i, j, k, b), expected);
}
}
}
}
}
}
TEST(EigenPoolingTest, CuboidRowMajor) {
const int channels = 10;
const int input_planes = 5;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int patch_rows = 4;
const int patch_cols = 3;
const int patch_planes = 2;
const int output_rows = 2;
const int output_cols = 3;
const int output_planes = 4;
Tensor<float, 5, RowMajor> input(num_batches, input_cols, input_rows,
input_planes, channels);
Tensor<float, 5, RowMajor> result(num_batches, output_cols, output_rows,
output_planes, channels);
input = input.constant(11.0f) + input.random();
result.setRandom();
result = result.constant(-1000.0f);
const int stride = 1;
result = CuboidMaxPooling(input, patch_planes, patch_rows, patch_cols, stride,
stride, stride, PADDING_VALID);
EXPECT_EQ(result.dimension(4), channels);
EXPECT_EQ(result.dimension(3), output_planes);
EXPECT_EQ(result.dimension(2), output_rows);
EXPECT_EQ(result.dimension(1), output_cols);
EXPECT_EQ(result.dimension(0), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int d = 0; d < channels; ++d) {
for (int i = 0; i < output_planes; ++i) {
for (int j = 0; j < output_rows; ++j) {
for (int k = 0; k < output_cols; ++k) {
float expected = -10000.f;
for (int p = 0; p < patch_planes; ++p) {
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
expected =
(std::max)(expected, input(b, c + k, r + j, p + i, d));
}
}
}
if (result(b, k, j, i, d) != expected) {
std::cout << "at d=" << d << " b=" << b << " i=" << i
<< " j=" << j << " k=" << k << " "
<< result(b, k, j, i, d) << " vs " << expected
<< std::endl;
}
EigenApprox(result(b, k, j, i, d), expected);
}
}
}
}
}
}
TEST(EigenPoolingTest, ValidCuboid) {
const int channels = 10;
const int input_planes = 5;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int patch_rows = 4;
const int patch_cols = 3;
const int patch_planes = 2;
const int output_rows = 2;
const int output_cols = 3;
const int output_planes = 4;
Tensor<float, 5> input(channels, input_planes, input_rows, input_cols,
num_batches);
Tensor<float, 5> result(channels, output_planes, output_rows, output_cols,
num_batches);
input = input.constant(11.0f) + input.random();
result.setRandom();
result = result.constant(-1000.0f);
const int stride = 1;
result = CuboidAvgPooling(input, patch_planes, patch_rows, patch_cols, stride,
stride, stride, PADDING_VALID);
EXPECT_EQ(result.dimension(0), channels);
EXPECT_EQ(result.dimension(1), output_planes);
EXPECT_EQ(result.dimension(2), output_rows);
EXPECT_EQ(result.dimension(3), output_cols);
EXPECT_EQ(result.dimension(4), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int d = 0; d < channels; ++d) {
for (int i = 0; i < output_planes; ++i) {
for (int j = 0; j < output_rows; ++j) {
for (int k = 0; k < output_cols; ++k) {
float expected_sum = 0.0f;
int expected_count = 0;
for (int p = 0; p < patch_planes; ++p) {
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
expected_sum += input(d, p + i, r + j, c + k, b);
expected_count++;
}
}
}
const float expected = expected_sum / expected_count;
if (result(d, i, j, k, b) != expected) {
std::cout << "at d=" << d << " b=" << b << " i=" << i
<< " j=" << j << " k=" << k << " "
<< result(d, i, j, k, b) << " vs " << expected
<< std::endl;
}
EigenApprox(result(d, i, j, k, b), expected);
}
}
}
}
}
}
TEST(EigenPoolingTest, ValidCuboidRowMajor) {
const int channels = 10;
const int input_planes = 5;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int patch_rows = 4;
const int patch_cols = 3;
const int patch_planes = 2;
const int output_rows = 2;
const int output_cols = 3;
const int output_planes = 4;
Tensor<float, 5, RowMajor> input(num_batches, input_cols, input_rows,
input_planes, channels);
Tensor<float, 5, RowMajor> result(num_batches, output_cols, output_rows,
output_planes, channels);
input = input.constant(11.0f) + input.random();
result.setRandom();
result = result.constant(-1000.0f);
const int stride = 1;
result = CuboidAvgPooling(input, patch_planes, patch_rows, patch_cols, stride,
stride, stride, PADDING_VALID);
EXPECT_EQ(result.dimension(4), channels);
EXPECT_EQ(result.dimension(3), output_planes);
EXPECT_EQ(result.dimension(2), output_rows);
EXPECT_EQ(result.dimension(1), output_cols);
EXPECT_EQ(result.dimension(0), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int d = 0; d < channels; ++d) {
for (int i = 0; i < output_planes; ++i) {
for (int j = 0; j < output_rows; ++j) {
for (int k = 0; k < output_cols; ++k) {
float expected_sum = 0.0f;
int expected_count = 0;
for (int p = 0; p < patch_planes; ++p) {
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
expected_sum += input(b, c + k, r + j, p + i, d);
expected_count++;
}
}
}
const float expected = expected_sum / expected_count;
if (result(b, k, j, i, d) != expected) {
std::cout << "at d=" << d << " b=" << b << " i=" << i
<< " j=" << j << " k=" << k << " "
<< result(b, k, j, i, d) << " vs " << expected
<< std::endl;
}
EigenApprox(result(b, k, j, i, d), expected);
}
}
}
}
}
}
TEST(EigenPoolingTest, SameCuboid) {
const int channels = 10;
const int input_planes = 5;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int patch_rows = 4;
const int patch_cols = 3;
const int patch_planes = 2;
const int output_rows = input_rows;
const int output_cols = input_cols;
const int output_planes = input_planes;
Tensor<float, 5> input(channels, input_planes, input_rows, input_cols,
num_batches);
Tensor<float, 5> result(channels, output_planes, output_rows, output_cols,
num_batches);
input = input.constant(11.0f) + input.random();
result.setRandom();
result = result.constant(-1000.0f);
const int stride = 1;
result = CuboidAvgPooling(input, patch_planes, patch_rows, patch_cols, stride,
stride, stride, PADDING_SAME);
EXPECT_EQ(result.dimension(0), channels);
EXPECT_EQ(result.dimension(1), output_planes);
EXPECT_EQ(result.dimension(2), output_rows);
EXPECT_EQ(result.dimension(3), output_cols);
EXPECT_EQ(result.dimension(4), num_batches);
const int pad_p = output_planes - input_planes + patch_planes - 1;
const int pad_r = output_rows - input_rows + patch_rows - 1;
const int pad_c = output_cols - input_cols + patch_cols - 1;
const int dp = pad_p / 2;
const int dr = pad_r / 2;
const int dc = pad_c / 2;
for (int b = 0; b < num_batches; ++b) {
for (int d = 0; d < channels; ++d) {
for (int i = 0; i < output_planes; ++i) {
for (int j = 0; j < output_rows; ++j) {
for (int k = 0; k < output_cols; ++k) {
float expected_sum = 0.0f;
int expected_count = 0;
for (int p = 0; p < patch_planes; ++p) {
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
const int in_p = p + i - dp;
const int in_r = r + j - dr;
const int in_c = c + k - dc;
if (in_p >= 0 && in_p < input_planes && in_r >= 0 &&
in_r < input_rows && in_c >= 0 && in_c < input_cols) {
expected_sum += input(d, in_p, in_r, in_c, b);
expected_count++;
}
}
}
}
const float expected = expected_sum / expected_count;
if (result(d, i, j, k, b) != expected) {
std::cout << "at d=" << d << " b=" << b << " i=" << i
<< " j=" << j << " k=" << k << " "
<< result(d, i, j, k, b) << " vs " << expected
<< std::endl;
}
EigenApprox(result(d, i, j, k, b), expected);
}
}
}
}
}
}
TEST(EigenPoolingTest, SameCuboidRowMajor) {
const int channels = 10;
const int input_planes = 5;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int patch_rows = 4;
const int patch_cols = 3;
const int patch_planes = 2;
const int output_rows = input_rows;
const int output_cols = input_cols;
const int output_planes = input_planes;
Tensor<float, 5, RowMajor> input(num_batches, input_cols, input_rows,
input_planes, channels);
Tensor<float, 5, RowMajor> result(num_batches, output_cols, output_rows,
output_planes, channels);
input = input.constant(11.0f) + input.random();
result.setRandom();
result = result.constant(-1000.0f);
const int stride = 1;
result = CuboidAvgPooling(input, patch_planes, patch_rows, patch_cols, stride,
stride, stride, PADDING_SAME);
EXPECT_EQ(result.dimension(4), channels);
EXPECT_EQ(result.dimension(3), output_planes);
EXPECT_EQ(result.dimension(2), output_rows);
EXPECT_EQ(result.dimension(1), output_cols);
EXPECT_EQ(result.dimension(0), num_batches);
const int pad_p = output_planes - input_planes + patch_planes - 1;
const int pad_r = output_rows - input_rows + patch_rows - 1;
const int pad_c = output_cols - input_cols + patch_cols - 1;
const int dp = pad_p / 2;
const int dr = pad_r / 2;
const int dc = pad_c / 2;
for (int b = 0; b < num_batches; ++b) {
for (int d = 0; d < channels; ++d) {
for (int i = 0; i < output_planes; ++i) {
for (int j = 0; j < output_rows; ++j) {
for (int k = 0; k < output_cols; ++k) {
float expected_sum = 0.0f;
int expected_count = 0;
for (int p = 0; p < patch_planes; ++p) {
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
const int in_p = p + i - dp;
const int in_r = r + j - dr;
const int in_c = c + k - dc;
if (in_p >= 0 && in_p < input_planes && in_r >= 0 &&
in_r < input_rows && in_c >= 0 && in_c < input_cols) {
expected_sum += input(b, in_c, in_r, in_p, d);
expected_count++;
}
}
}
}
const float expected = expected_sum / expected_count;
if (result(b, k, j, i, d) != expected) {
std::cout << "at d=" << d << " b=" << b << " i=" << i
<< " j=" << j << " k=" << k << " "
<< result(b, k, j, i, d) << " vs " << expected
<< std::endl;
}
EigenApprox(result(b, k, j, i, d), expected);
}
}
}
}
}
}
TEST(EigenPoolingTest, Strided) {
const int depth = 10;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int patch_rows = 3;
const int patch_cols = 3;
const int output_rows = 2;
const int output_cols = 2;
Tensor<float, 4> input(depth, input_rows, input_cols, num_batches);
Tensor<float, 4> result(depth, output_rows, output_cols, num_batches);
input = input.constant(11.0f) + input.random();
result.setRandom();
int stride = 2;
result = SpatialMaxPooling(input, patch_rows, patch_cols, stride, stride,
PADDING_VALID);
EXPECT_EQ(result.dimension(0), depth);
EXPECT_EQ(result.dimension(1), output_rows);
EXPECT_EQ(result.dimension(2), output_cols);
EXPECT_EQ(result.dimension(3), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int d = 0; d < depth; ++d) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = -10000.f;
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
expected = (std::max)(
expected, input(d, r + stride * i, c + stride * j, b));
}
}
if (result(d, i, j, b) != expected) {
std::cout << "at d=" << d << " b=" << b << " i=" << i << " j=" << j
<< " " << result(d, i, j, b) << " vs " << expected
<< std::endl;
}
EigenApprox(result(d, i, j, b), expected);
}
}
}
}
}
TEST(EigenPoolingTest, StridedRowMajor) {
const int depth = 10;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int patch_rows = 3;
const int patch_cols = 3;
const int output_rows = 2;
const int output_cols = 2;
Tensor<float, 4, RowMajor> input(num_batches, input_cols, input_rows, depth);
Tensor<float, 4, RowMajor> result(num_batches, output_cols, output_rows,
depth);
input = input.constant(11.0f) + input.random();
result.setRandom();
int stride = 2;
result = SpatialMaxPooling(input, patch_rows, patch_cols, stride, stride,
PADDING_VALID);
EXPECT_EQ(result.dimension(3), depth);
EXPECT_EQ(result.dimension(2), output_rows);
EXPECT_EQ(result.dimension(1), output_cols);
EXPECT_EQ(result.dimension(0), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int d = 0; d < depth; ++d) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = -10000.f;
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
expected = (std::max)(
expected, input(b, c + stride * j, r + stride * i, d));
}
}
if (result(b, j, i, d) != expected) {
std::cout << "at d=" << d << " b=" << b << " i=" << i << " j=" << j
<< " " << result(b, j, i, d) << " vs " << expected
<< std::endl;
}
EigenApprox(result(b, j, i, d), expected);
}
}
}
}
}
TEST(EigenPoolingTest, StridedCuboid) {
const int channels = 10;
const int input_planes = 5;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int patch_planes = 3;
const int patch_rows = 3;
const int patch_cols = 3;
const int output_planes = 2;
const int output_rows = 2;
const int output_cols = 2;
Tensor<float, 5> input(channels, input_planes, input_rows, input_cols,
num_batches);
Tensor<float, 5> result(channels, output_planes, output_rows, output_cols,
num_batches);
input = input.constant(11.0f) + input.random();
result.setRandom();
int stride = 2;
result = CuboidMaxPooling(input, patch_planes, patch_rows, patch_cols, stride,
stride, stride, PADDING_VALID);
EXPECT_EQ(result.dimension(0), channels);
EXPECT_EQ(result.dimension(1), output_planes);
EXPECT_EQ(result.dimension(2), output_rows);
EXPECT_EQ(result.dimension(3), output_cols);
EXPECT_EQ(result.dimension(4), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int d = 0; d < channels; ++d) {
for (int i = 0; i < output_planes; ++i) {
for (int j = 0; j < output_rows; ++j) {
for (int k = 0; k < output_cols; ++k) {
float expected = -10000.f;
for (int p = 0; p < patch_planes; ++p) {
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
expected = (std::max)(expected,
input(d, p + stride * i, r + stride * j,
c + stride * k, b));
}
}
}
if (result(d, i, j, k, b) != expected) {
std::cout << "at d=" << d << " b=" << b << " i=" << i
<< " j=" << j << " " << k << " "
<< result(d, i, j, k, b) << " vs " << expected
<< std::endl;
}
EigenApprox(result(d, i, j, k, b), expected);
}
}
}
}
}
}
TEST(EigenPoolingTest, StridedCuboidRowMajor) {
const int channels = 10;
const int input_planes = 5;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int patch_planes = 3;
const int patch_rows = 3;
const int patch_cols = 3;
const int output_planes = 2;
const int output_rows = 2;
const int output_cols = 2;
Tensor<float, 5, RowMajor> input(num_batches, input_cols, input_rows,
input_planes, channels);
Tensor<float, 5, RowMajor> result(num_batches, output_cols, output_rows,
output_planes, channels);
input = input.constant(11.0f) + input.random();
result.setRandom();
int stride = 2;
result = CuboidMaxPooling(input, patch_planes, patch_rows, patch_cols, stride,
stride, stride, PADDING_VALID);
EXPECT_EQ(result.dimension(4), channels);
EXPECT_EQ(result.dimension(3), output_planes);
EXPECT_EQ(result.dimension(2), output_rows);
EXPECT_EQ(result.dimension(1), output_cols);
EXPECT_EQ(result.dimension(0), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int d = 0; d < channels; ++d) {
for (int i = 0; i < output_planes; ++i) {
for (int j = 0; j < output_rows; ++j) {
for (int k = 0; k < output_cols; ++k) {
float expected = -10000.f;
for (int p = 0; p < patch_planes; ++p) {
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
expected = (std::max)(expected,
input(b, c + stride * k, r + stride * j,
p + stride * i, d));
}
}
}
if (result(b, k, j, i, d) != expected) {
std::cout << "at d=" << d << " b=" << b << " i=" << i
<< " j=" << j << " " << k << " "
<< result(b, k, j, i, d) << " vs " << expected
<< std::endl;
}
EigenApprox(result(b, k, j, i, d), expected);
}
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/eigen_pooling.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/eigen_pooling_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
68d21ad5-0373-4de2-8fba-27046bfbf55d | cpp | tensorflow/tensorflow | conv_2d | tensorflow/core/kernels/conv_2d.h | tensorflow/lite/delegates/xnnpack/conv_2d_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_CONV_2D_H_
#define TENSORFLOW_CORE_KERNELS_CONV_2D_H_
#include "absl/strings/string_view.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "xla/tsl/framework/convolution/eigen_spatial_convolutions.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/kernels/eigen_backward_spatial_convolutions.h"
#include "tensorflow/core/util/tensor_format.h"
static bool Conv2dUseFp16Accumulate() {
static bool use_fp16_accumulate = []() {
const char* env = std::getenv("TF_CONV2D_USE_FP16_ACCUMULATE");
return (env != nullptr) && (absl::string_view(env) == "1");
}();
return use_fp16_accumulate;
}
namespace tensorflow {
namespace functor {
template <typename Device, typename Input, typename Filter, typename Output,
typename OutputKernel>
void SpatialConvolutionFunc(const Device& d, Output output, Input input,
Filter filter, int row_stride, int col_stride,
int row_dilation, int col_dilation,
const Eigen::PaddingType& padding,
const OutputKernel& output_kernel,
int padding_top = 0, int padding_bottom = 0,
int padding_left = 0, int padding_right = 0) {
output.device(d) = Eigen::SpatialConvolution(
input, filter, col_stride, row_stride, padding, col_dilation,
row_dilation, output_kernel, padding_left, padding_right, padding_top,
padding_bottom);
}
template <typename Device, typename T,
typename OutputKernel = const Eigen::NoOpOutputKernel>
struct SpatialConvolution {
void operator()(const Device& d, typename TTypes<T, 4>::Tensor output,
typename TTypes<T, 4>::ConstTensor input,
typename TTypes<T, 4>::ConstTensor filter, int row_stride,
int col_stride, int row_dilation, int col_dilation,
const Eigen::PaddingType& padding,
const OutputKernel& output_kernel = OutputKernel()) {
SpatialConvolutionFunc(d, output, input, filter, row_stride, col_stride,
row_dilation, col_dilation, padding, output_kernel);
}
template <typename Input, typename Filter, typename Output>
void operator()(const Device& d, Output output, Input input, Filter filter,
int row_stride, int col_stride, int row_dilation,
int col_dilation, const Eigen::PaddingType& padding,
const OutputKernel& output_kernel = OutputKernel()) {
SpatialConvolutionFunc(d, output, input, filter, row_stride, col_stride,
row_dilation, col_dilation, padding, output_kernel);
}
void operator()(const Device& d, typename TTypes<T, 4>::Tensor output,
typename TTypes<T, 4>::ConstTensor input,
typename TTypes<T, 4>::ConstTensor filter, int row_stride,
int col_stride, int row_dilation, int col_dilation,
int padding_top, int padding_bottom, int padding_left,
int padding_right,
const OutputKernel& output_kernel = OutputKernel()) {
SpatialConvolutionFunc(
d, output, input, filter, row_stride, col_stride, row_dilation,
col_dilation, Eigen::PaddingType::PADDING_VALID, output_kernel,
padding_top, padding_bottom, padding_left, padding_right);
}
template <typename Input, typename Filter, typename Output>
void operator()(const Device& d, Output output, Input input, Filter filter,
int row_stride, int col_stride, int row_dilation,
int col_dilation, int padding_top, int padding_bottom,
int padding_left, int padding_right,
const OutputKernel& output_kernel = OutputKernel()) {
SpatialConvolutionFunc(
d, output, input, filter, row_stride, col_stride, row_dilation,
col_dilation, Eigen::PaddingType::PADDING_VALID, output_kernel,
padding_top, padding_bottom, padding_left, padding_right);
}
};
template <typename Device, typename OutputKernel>
struct SpatialConvolution<Device, Eigen::half, OutputKernel> {
void operator()(const Device& d,
typename TTypes<Eigen::half, 4>::Tensor output,
typename TTypes<Eigen::half, 4>::ConstTensor input,
typename TTypes<Eigen::half, 4>::ConstTensor filter,
int row_stride, int col_stride, int row_dilation,
int col_dilation, const Eigen::PaddingType& padding,
const OutputKernel& output_kernel = OutputKernel()) {
if (Conv2dUseFp16Accumulate()) {
output.device(d) = Eigen::SpatialConvolution(
input, filter, col_stride, row_stride, padding, col_dilation,
row_dilation, output_kernel);
} else {
output.device(d) =
Eigen::SpatialConvolution(input.cast<float>(), filter.cast<float>(),
col_stride, row_stride, padding,
col_dilation, row_dilation, output_kernel)
.template cast<Eigen::half>();
}
}
template <typename Input, typename Filter, typename Output>
void operator()(const Device& d, Output output, Input input, Filter filter,
int row_stride, int col_stride, int row_dilation,
int col_dilation, const Eigen::PaddingType& padding,
const OutputKernel& output_kernel = OutputKernel()) {
if (Conv2dUseFp16Accumulate()) {
output.device(d) = Eigen::SpatialConvolution(
input, filter, col_stride, row_stride, padding, col_dilation,
row_dilation, output_kernel);
} else {
output.device(d) =
Eigen::SpatialConvolution(input.template cast<float>(),
filter.template cast<float>(), col_stride,
row_stride, padding, col_dilation,
row_dilation, output_kernel)
.template cast<Eigen::half>();
}
}
void operator()(const Device& d,
typename TTypes<Eigen::half, 4>::Tensor output,
typename TTypes<Eigen::half, 4>::ConstTensor input,
typename TTypes<Eigen::half, 4>::ConstTensor filter,
int row_stride, int col_stride, int row_dilation,
int col_dilation, int padding_top, int padding_bottom,
int padding_left, int padding_right,
const OutputKernel& output_kernel = OutputKernel()) {
if (Conv2dUseFp16Accumulate()) {
output.device(d) = Eigen::SpatialConvolution(
input, filter, col_stride, row_stride,
Eigen::PaddingType::PADDING_VALID, col_dilation, row_dilation,
output_kernel, padding_left, padding_right, padding_top,
padding_bottom);
} else {
output.device(d) =
Eigen::SpatialConvolution(
input.cast<float>(), filter.cast<float>(), col_stride, row_stride,
Eigen::PaddingType::PADDING_VALID, col_dilation, row_dilation,
output_kernel, padding_left, padding_right, padding_top,
padding_bottom)
.template cast<Eigen::half>();
}
}
template <typename Input, typename Filter, typename Output>
void operator()(const Device& d, Output output, Input input, Filter filter,
int row_stride, int col_stride, int row_dilation,
int col_dilation, int padding_top, int padding_bottom,
int padding_left, int padding_right,
const OutputKernel& output_kernel = OutputKernel()) {
if (Conv2dUseFp16Accumulate()) {
output.device(d) = Eigen::SpatialConvolution(
input, filter, col_stride, row_stride,
Eigen::PaddingType::PADDING_VALID, col_dilation, row_dilation,
output_kernel, padding_left, padding_right, padding_top,
padding_bottom);
} else {
output.device(d) =
Eigen::SpatialConvolution(
input.template cast<float>(), filter.template cast<float>(),
col_stride, row_stride, Eigen::PaddingType::PADDING_VALID,
col_dilation, row_dilation, output_kernel, padding_left,
padding_right, padding_top, padding_bottom)
.template cast<Eigen::half>();
}
}
};
template <typename Device, typename OutputKernel>
struct SpatialConvolution<Device, Eigen::bfloat16, OutputKernel> {
void operator()(const Device& d,
typename TTypes<Eigen::bfloat16, 4>::Tensor output,
typename TTypes<Eigen::bfloat16, 4>::ConstTensor input,
typename TTypes<Eigen::bfloat16, 4>::ConstTensor filter,
int row_stride, int col_stride, int row_dilation,
int col_dilation, const Eigen::PaddingType& padding,
const OutputKernel& output_kernel = OutputKernel()) {
output.device(d) =
Eigen::SpatialConvolution(input.cast<float>(), filter.cast<float>(),
col_stride, row_stride, padding, col_dilation,
row_dilation, output_kernel)
.template cast<Eigen::bfloat16>();
}
template <typename Input, typename Filter, typename Output>
void operator()(const Device& d, Output output, Input input, Filter filter,
int row_stride, int col_stride, int row_dilation,
int col_dilation, const Eigen::PaddingType& padding,
const OutputKernel& output_kernel = OutputKernel()) {
output.device(d) =
Eigen::SpatialConvolution(input.template cast<float>(),
filter.template cast<float>(), col_stride,
row_stride, padding, col_dilation,
row_dilation, output_kernel)
.template cast<Eigen::bfloat16>();
}
void operator()(const Device& d,
typename TTypes<Eigen::bfloat16, 4>::Tensor output,
typename TTypes<Eigen::bfloat16, 4>::ConstTensor input,
typename TTypes<Eigen::bfloat16, 4>::ConstTensor filter,
int row_stride, int col_stride, int row_dilation,
int col_dilation, int padding_top, int padding_bottom,
int padding_left, int padding_right,
const OutputKernel& output_kernel = OutputKernel()) {
output.device(d) =
Eigen::SpatialConvolution(
input.cast<float>(), filter.cast<float>(), col_stride, row_stride,
Eigen::PaddingType::PADDING_VALID, col_dilation, row_dilation,
output_kernel, padding_left, padding_right, padding_top,
padding_bottom)
.template cast<Eigen::bfloat16>();
}
template <typename Input, typename Filter, typename Output>
void operator()(const Device& d, Output output, Input input, Filter filter,
int row_stride, int col_stride, int row_dilation,
int col_dilation, int padding_top, int padding_bottom,
int padding_left, int padding_right,
const OutputKernel& output_kernel = OutputKernel()) {
output.device(d) =
Eigen::SpatialConvolution(
input.template cast<float>(), filter.template cast<float>(),
col_stride, row_stride, Eigen::PaddingType::PADDING_VALID,
col_dilation, row_dilation, output_kernel, padding_left,
padding_right, padding_top, padding_bottom)
.template cast<Eigen::bfloat16>();
}
};
template <typename Device, typename T>
struct SpatialConvolutionBackwardInputFunc {
void operator()(const Device& d, typename TTypes<T, 4>::Tensor input_backward,
typename TTypes<T, 4>::ConstTensor filter,
typename TTypes<T, 4>::ConstTensor output_backward,
Eigen::DenseIndex col_stride, Eigen::DenseIndex row_stride,
Eigen::DenseIndex col_dilation,
Eigen::DenseIndex row_dilation) {
input_backward.device(d) = Eigen::SpatialConvolutionBackwardInput(
filter, output_backward, input_backward.dimension(2),
input_backward.dimension(1), col_stride, row_stride, col_dilation,
row_dilation);
}
};
template <typename T>
struct SpatialConvolutionBackwardInputFunc<Eigen::GpuDevice, T> {
void operator()(const Eigen::GpuDevice& d,
typename TTypes<T, 4>::Tensor input_backward,
typename TTypes<T, 4>::ConstTensor filter,
typename TTypes<T, 4>::ConstTensor output_backward,
Eigen::DenseIndex col_stride, Eigen::DenseIndex row_stride,
Eigen::DenseIndex col_dilation,
Eigen::DenseIndex row_dilation) {
To32Bit(input_backward).device(d) = Eigen::SpatialConvolutionBackwardInput(
To32Bit(filter), To32Bit(output_backward), input_backward.dimension(2),
input_backward.dimension(1), col_stride, row_stride, col_dilation,
row_dilation);
}
};
template <typename Device, typename T>
struct SpatialConvolutionBackwardInputWithExplicitPaddingFunc {
void operator()(const Device& d, typename TTypes<T, 4>::Tensor input_backward,
typename TTypes<T, 4>::ConstTensor filter,
typename TTypes<T, 4>::ConstTensor output_backward,
Eigen::DenseIndex padded_cols, Eigen::DenseIndex padded_rows,
Eigen::DenseIndex col_stride, Eigen::DenseIndex row_stride,
Eigen::DenseIndex col_dilation,
Eigen::DenseIndex row_dilation, Eigen::DenseIndex pad_left,
Eigen::DenseIndex pad_top) {
input_backward.device(d) =
Eigen::SpatialConvolutionBackwardInput(
filter, output_backward, padded_cols, padded_rows, col_stride,
row_stride, col_dilation, row_dilation)
.eval()
.slice(Eigen::DSizes<Eigen::DenseIndex, 4>{0, pad_left, pad_top, 0},
input_backward.dimensions());
}
};
template <typename T>
struct SpatialConvolutionBackwardInputWithExplicitPaddingFunc<Eigen::GpuDevice,
T> {
void operator()(const Eigen::GpuDevice& d,
typename TTypes<T, 4>::Tensor input_backward,
typename TTypes<T, 4>::ConstTensor filter,
typename TTypes<T, 4>::ConstTensor output_backward,
Eigen::DenseIndex padded_cols, Eigen::DenseIndex padded_rows,
Eigen::DenseIndex col_stride, Eigen::DenseIndex row_stride,
Eigen::DenseIndex col_dilation,
Eigen::DenseIndex row_dilation, Eigen::DenseIndex pad_left,
Eigen::DenseIndex pad_top) {
To32Bit(input_backward).device(d) =
Eigen::SpatialConvolutionBackwardInput(
To32Bit(filter), To32Bit(output_backward), padded_cols, padded_rows,
col_stride, row_stride, col_dilation, row_dilation)
.eval()
.slice(Eigen::DSizes<Eigen::DenseIndex, 4>{0, pad_left, pad_top, 0},
input_backward.dimensions());
}
};
template <typename Device, typename T,
typename OutputKernel = const Eigen::NoOpOutputKernel>
struct MatMulConvFunctor {
void operator()(
const Device& d, typename TTypes<T, 2>::Tensor out,
typename TTypes<T, 2>::ConstTensor in0,
typename TTypes<T, 2>::ConstTensor in1,
const Eigen::array<Eigen::IndexPair<Eigen::DenseIndex>, 1>& dim_pair,
const OutputKernel& output_kernel = OutputKernel()) {
out.device(d) = in0.contract(in1, dim_pair, output_kernel);
}
};
template <typename Device, typename OutputKernel>
struct MatMulConvFunctor<Device, Eigen::half, OutputKernel> {
void operator()(
const Device& d, typename TTypes<Eigen::half, 2>::Tensor out,
typename TTypes<Eigen::half, 2>::ConstTensor in0,
typename TTypes<Eigen::half, 2>::ConstTensor in1,
const Eigen::array<Eigen::IndexPair<Eigen::DenseIndex>, 1>& dim_pair,
const OutputKernel& output_kernel = OutputKernel()) {
if (Conv2dUseFp16Accumulate()) {
out.device(d) = in0.contract(in1, dim_pair, output_kernel);
} else {
out.device(d) =
in0.cast<float>()
.contract(in1.template cast<float>(), dim_pair, output_kernel)
.template cast<Eigen::half>();
}
}
};
template <typename Device, typename OutputKernel>
struct MatMulConvFunctor<Device, Eigen::bfloat16, OutputKernel> {
void operator()(
const Device& d, typename TTypes<Eigen::bfloat16, 2>::Tensor out,
typename TTypes<Eigen::bfloat16, 2>::ConstTensor in0,
typename TTypes<Eigen::bfloat16, 2>::ConstTensor in1,
const Eigen::array<Eigen::IndexPair<Eigen::DenseIndex>, 1>& dim_pair,
const OutputKernel& output_kernel = OutputKernel()) {
out.device(d) = in0.cast<float>()
.contract(in1.cast<float>(), dim_pair, output_kernel)
.template cast<Eigen::bfloat16>();
}
};
template <typename Device, typename T, typename IndexType, int NDIMS>
struct TransformFilter {
void operator()(const Device& d, FilterTensorFormat dst_filter_format,
typename TTypes<T, NDIMS, IndexType>::ConstTensor in,
typename TTypes<T, NDIMS, IndexType>::Tensor out) {
Eigen::DSizes<IndexType, NDIMS - 2> spatial_dims;
for (int i = 0; i < spatial_dims.rank(); ++i) {
spatial_dims[i] = in.dimension(i);
}
Eigen::DSizes<IndexType, 3> merged_dims;
merged_dims[0] = spatial_dims.TotalSize();
merged_dims[1] = in.dimension(NDIMS - 2);
merged_dims[2] = in.dimension(NDIMS - 1);
Eigen::DSizes<IndexType, 3> shuffling_perm;
Eigen::DSizes<IndexType, NDIMS> expanded_dims;
if (dst_filter_format == FORMAT_OIHW) {
shuffling_perm = Eigen::DSizes<IndexType, 3>(2, 1, 0);
expanded_dims[0] = merged_dims[2];
expanded_dims[1] = merged_dims[1];
for (int i = 0; i < spatial_dims.rank(); ++i) {
expanded_dims[2 + i] = spatial_dims[i];
}
} else if (dst_filter_format == FORMAT_OHWI) {
shuffling_perm = Eigen::DSizes<IndexType, 3>(2, 0, 1);
expanded_dims[0] = merged_dims[2];
expanded_dims[NDIMS - 1] = merged_dims[1];
for (int i = 0; i < spatial_dims.rank(); ++i) {
expanded_dims[1 + i] = spatial_dims[i];
}
} else {
DCHECK(false) << "Unsupported destination filter format: "
<< ToString(dst_filter_format);
}
out.device(d) =
in.reshape(merged_dims).shuffle(shuffling_perm).reshape(expanded_dims);
}
};
template <typename Device, typename T, typename IndexType>
struct TransformDepth {
void operator()(const Device& d,
typename TTypes<T, 4, IndexType>::ConstTensor in,
const Eigen::DSizes<IndexType, 4>& shuffle,
typename TTypes<T, 4, IndexType>::Tensor out) {
Eigen::DSizes<IndexType, 3> merged_dims;
Eigen::DSizes<IndexType, 4> expanded_dims;
Eigen::DSizes<IndexType, 3> new_shuffle;
if (shuffle[1] == 2 && shuffle[2] == 3) {
merged_dims[0] = in.dimension(0);
merged_dims[1] = in.dimension(1);
merged_dims[2] = in.dimension(2) * in.dimension(3);
new_shuffle[0] = shuffle[0];
new_shuffle[1] = 2;
new_shuffle[2] = shuffle[3];
expanded_dims[0] = in.dimension(shuffle[0]);
expanded_dims[1] = in.dimension(2);
expanded_dims[2] = in.dimension(3);
expanded_dims[3] = in.dimension(shuffle[3]);
} else if (shuffle[0] == 2 && shuffle[1] == 3) {
merged_dims[0] = in.dimension(0);
merged_dims[1] = in.dimension(1);
merged_dims[2] = in.dimension(2) * in.dimension(3);
new_shuffle[0] = 2;
new_shuffle[1] = shuffle[2];
new_shuffle[2] = shuffle[3];
expanded_dims[0] = in.dimension(2);
expanded_dims[1] = in.dimension(3);
expanded_dims[2] = in.dimension(shuffle[2]);
expanded_dims[3] = in.dimension(shuffle[3]);
} else if (shuffle[0] == 0 && shuffle[1] == 3 && shuffle[2] == 1 &&
shuffle[3] == 2) {
merged_dims[0] = in.dimension(0);
merged_dims[1] = in.dimension(1) * in.dimension(2);
merged_dims[2] = in.dimension(3);
new_shuffle[0] = 0;
new_shuffle[1] = 2;
new_shuffle[2] = 1;
expanded_dims[0] = in.dimension(0);
expanded_dims[1] = in.dimension(3);
expanded_dims[2] = in.dimension(1);
expanded_dims[3] = in.dimension(2);
} else {
assert(false && "unexpected shuffle");
}
out.device(d) =
in.reshape(merged_dims).shuffle(new_shuffle).reshape(expanded_dims);
}
};
template <typename Device, typename T, typename IndexType, int NDIMS>
struct PadInput {
void operator()(const Device& d,
typename TTypes<T, NDIMS, IndexType>::ConstTensor in,
const std::array<int, NDIMS - 2>& padding_left,
const std::array<int, NDIMS - 2>& padding_right,
typename TTypes<T, NDIMS, IndexType>::Tensor out,
TensorFormat format, const T& padding_value) {
Eigen::array<Eigen::IndexPair<IndexType>, NDIMS> padding;
padding[GetTensorDimIndex<NDIMS - 2>(format, 'N')] = {0, 0};
for (int i = 0; i < NDIMS - 2; ++i) {
padding[GetTensorDimIndex<NDIMS - 2>(format, '0' + i)] = {
padding_left[i], padding_right[i]};
}
padding[GetTensorDimIndex<NDIMS - 2>(format, 'C')] = {0, 0};
out.device(d) = in.pad(padding, padding_value);
}
};
template <typename Device, typename T, int NDIMS>
struct NHWCToNCHW {
void operator()(const Device& d, typename TTypes<T, NDIMS>::ConstTensor in,
typename TTypes<T, NDIMS>::Tensor out);
};
template <typename Device, typename T, int NDIMS>
struct NCHWToNHWC {
void operator()(const Device& d, typename TTypes<T, NDIMS>::ConstTensor in,
typename TTypes<T, NDIMS>::Tensor out);
};
template <typename Device, typename T, bool conjugate = false>
struct SwapDimension1And2InTensor3 {
void operator()(const Device& d, const T* in,
const absl::Span<const int64_t>& input_dims, T* out);
};
template <typename Device, typename T, bool conjugate = false>
struct SwapDimension0And2InTensor3 {
void operator()(const Device& d, const T* in,
const absl::Span<const int64_t>& input_dims, T* out);
};
template <typename Device, typename T, int NDIMS>
struct ReverseTransformFilter {
void operator()(const Device& d, FilterTensorFormat src_filter_format,
typename TTypes<T, NDIMS>::ConstTensor in,
typename TTypes<T, NDIMS>::Tensor out);
};
}
template <class T>
class ConvAlgorithmMap;
template <>
class ConvAlgorithmMap<Eigen::ThreadPoolDevice> {};
}
#endif | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/conv_2d_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(Conv2D, 1x1) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto input_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
Conv2DTester()
.BatchSize(batch_rng())
.InputHeight(input_rng())
.InputWidth(input_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(1)
.KernelWidth(1)
.ValidPadding()
.Test(xnnpack_delegate.get());
}
TEST(Conv2D, 3x3) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto input_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
Conv2DTester()
.BatchSize(batch_rng())
.InputHeight(input_rng())
.InputWidth(input_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(3)
.KernelWidth(3)
.SamePadding()
.Test(xnnpack_delegate.get());
}
TEST(Conv2D, 3x3Stride2) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto input_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
Conv2DTester()
.BatchSize(batch_rng())
.InputHeight(input_rng())
.InputWidth(input_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(3)
.KernelWidth(3)
.StrideHeight(2)
.StrideWidth(2)
.SamePadding()
.Test(xnnpack_delegate.get());
}
TEST(Conv2D, Grouped) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto input_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto channel_per_group_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
auto groups_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 8), std::ref(rng));
auto groups = groups_rng();
Conv2DTester()
.BatchSize(batch_rng())
.InputHeight(input_rng())
.InputWidth(input_rng())
.InputChannels(groups * channel_per_group_rng())
.OutputChannels(groups * channel_per_group_rng())
.Groups(groups)
.KernelHeight(3)
.KernelWidth(3)
.SamePadding()
.Test(xnnpack_delegate.get());
}
TEST(Conv2D, SmallKernelWithSamePadding) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto input_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 7), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
Conv2DTester()
.BatchSize(batch_rng())
.InputHeight(input_rng())
.InputWidth(input_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.SamePadding()
.Test(xnnpack_delegate.get());
}
TEST(Conv2D, SmallKernelWithValidPadding) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto input_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 7), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
Conv2DTester()
.BatchSize(batch_rng())
.InputHeight(input_rng())
.InputWidth(input_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.ValidPadding()
.Test(xnnpack_delegate.get());
}
TEST(Conv2D, StrideWithSamePadding) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto input_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
Conv2DTester()
.BatchSize(batch_rng())
.InputHeight(input_rng())
.InputWidth(input_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SamePadding()
.Test(xnnpack_delegate.get());
}
TEST(Conv2D, StrideWithValidPadding) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto input_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
Conv2DTester()
.BatchSize(batch_rng())
.InputHeight(input_rng())
.InputWidth(input_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.ValidPadding()
.Test(xnnpack_delegate.get());
}
TEST(Conv2D, DilationWithSamePadding) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto input_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto dilation_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
Conv2DTester()
.BatchSize(batch_rng())
.InputHeight(input_rng())
.InputWidth(input_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.DilationHeight(dilation_rng())
.DilationWidth(dilation_rng())
.SamePadding()
.Test(xnnpack_delegate.get());
}
TEST(Conv2D, DilationWithValidPadding) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto input_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto dilation_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
Conv2DTester()
.BatchSize(batch_rng())
.InputHeight(input_rng())
.InputWidth(input_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.DilationHeight(dilation_rng())
.DilationWidth(dilation_rng())
.ValidPadding()
.Test(xnnpack_delegate.get());
}
TEST(Conv2D, FP16Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto input_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 16), std::ref(rng));
Conv2DTester()
.BatchSize(batch_rng())
.InputHeight(input_rng())
.InputWidth(input_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.FP16Weights()
.Test(xnnpack_delegate.get());
}
TEST(Conv2D, TensorWiseQuantizedInt8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto input_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 16), std::ref(rng));
Conv2DTester()
.BatchSize(batch_rng())
.InputHeight(input_rng())
.InputWidth(input_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.TensorWiseQuantizedInt8Weights()
.Test(xnnpack_delegate.get());
}
TEST(Conv2D, ChannelWiseQuantizedInt8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto input_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 16), std::ref(rng));
Conv2DTester()
.BatchSize(batch_rng())
.InputHeight(input_rng())
.InputWidth(input_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.ChannelWiseQuantizedInt8Weights()
.Test(xnnpack_delegate.get());
}
TEST(Conv2D, SparseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto input_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 16), std::ref(rng));
Conv2DTester()
.BatchSize(batch_rng())
.InputHeight(input_rng())
.InputWidth(input_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SparseWeights()
.Test(xnnpack_delegate.get());
}
TEST(Conv2D, SparseFP16Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto input_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 16), std::ref(rng));
Conv2DTester()
.BatchSize(batch_rng())
.InputHeight(input_rng())
.InputWidth(input_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SparseWeights()
.FP16Weights()
.Test(xnnpack_delegate.get());
}
TEST(Conv2D, SparseTensorWiseQuantizedInt8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto input_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 16), std::ref(rng));
Conv2DTester()
.BatchSize(batch_rng())
.InputHeight(input_rng())
.InputWidth(input_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SparseWeights()
.TensorWiseQuantizedInt8Weights()
.Test(xnnpack_delegate.get());
}
TEST(Conv2D, SparseChannelWiseQuantizedInt8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto input_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 16), std::ref(rng));
Conv2DTester()
.BatchSize(batch_rng())
.InputHeight(input_rng())
.InputWidth(input_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SparseWeights()
.ChannelWiseQuantizedInt8Weights()
.Test(xnnpack_delegate.get());
}
TEST(Conv2D, ReluActivation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto input_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 16), std::ref(rng));
Conv2DTester()
.BatchSize(batch_rng())
.InputHeight(input_rng())
.InputWidth(input_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.ReluActivation()
.Test(xnnpack_delegate.get());
}
TEST(Conv2D, Relu6Activation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto input_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 16), std::ref(rng));
Conv2DTester()
.BatchSize(batch_rng())
.InputHeight(input_rng())
.InputWidth(input_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.Relu6Activation()
.Test(xnnpack_delegate.get());
}
TEST(Conv2D, ReluMinus1To1Activation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto input_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 16), std::ref(rng));
Conv2DTester()
.BatchSize(batch_rng())
.InputHeight(input_rng())
.InputWidth(input_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.ReluMinus1To1Activation()
.Test(xnnpack_delegate.get());
}
TEST(Conv2D, DISABLED_TanhActivation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto input_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 16), std::ref(rng));
Conv2DTester()
.BatchSize(batch_rng())
.InputHeight(input_rng())
.InputWidth(input_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.TanhActivation()
.Test(xnnpack_delegate.get());
}
TEST(Conv2D, DISABLED_SignBitActivation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto input_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 16), std::ref(rng));
Conv2DTester()
.BatchSize(batch_rng())
.InputHeight(input_rng())
.InputWidth(input_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.SignBitActivation()
.Test(xnnpack_delegate.get());
}
TEST(Conv2D, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto input_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 16), std::ref(rng));
Conv2DTester()
.BatchSize(batch_rng())
.InputHeight(input_rng())
.InputWidth(input_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.Test(xnnpack_delegate.get());
}
TEST(Conv2D, WeightsCache) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
std::unique_ptr<TfLiteXNNPackDelegateWeightsCache,
decltype(&TfLiteXNNPackDelegateWeightsCacheDelete)>
weights_cache(TfLiteXNNPackDelegateWeightsCacheCreate(),
TfLiteXNNPackDelegateWeightsCacheDelete);
delegate_options.weights_cache = weights_cache.get();
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto input_rng =
std::bind(std::uniform_int_distribution<int32_t>(10, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 16), std::ref(rng));
Conv2DTester()
.BatchSize(batch_rng())
.InputHeight(input_rng())
.InputWidth(input_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.WeightsCache(weights_cache.get())
.Test(xnnpack_delegate.get());
}
TEST(Conv2D, TransientIndirectionBuffer) {
TfLiteXNNPackDelegateOptions xnnpack_options =
TfLiteXNNPackDelegateOptionsDefault();
xnnpack_options.num_threads = 2;
xnnpack_options.flags |=
TFLITE_XNNPACK_DELEGATE_FLAG_TRANSIENT_INDIRECTION_BUFFER;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&xnnpack_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto input_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto kernel_rng =
std::bind(std::uniform_int_distribution<int32_t>(3, 5), std::ref(rng));
auto stride_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
Conv2DTester()
.BatchSize(batch_rng())
.InputHeight(input_rng())
.InputWidth(input_rng())
.InputChannels(channel_rng())
.OutputChannels(channel_rng())
.KernelHeight(kernel_rng())
.KernelWidth(kernel_rng())
.StrideHeight(stride_rng())
.StrideWidth(stride_rng())
.Test(xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/conv_2d.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/conv_2d_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b0e0bd9f-6772-4b67-a6fa-86f8287c9d5b | cpp | tensorflow/tensorflow | loss | tensorflow/core/kernels/loss.h | tensorflow/core/kernels/loss_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_LOSS_H_
#define TENSORFLOW_CORE_KERNELS_LOSS_H_
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
class DualLossUpdater {
public:
virtual ~DualLossUpdater() {}
virtual double ComputeUpdatedDual(
const int num_loss_partitions, const double label,
const double example_weight, const double current_dual, const double wx,
const double weighted_example_norm) const = 0;
virtual double ComputeDualLoss(const double current_dual,
const double example_label,
const double example_weight) const = 0;
virtual double ComputePrimalLoss(const double wx, const double example_label,
const double example_weight) const = 0;
virtual double PrimalLossDerivative(const double wx,
const double example_label,
const double example_weight) const = 0;
virtual double SmoothnessConstant() const = 0;
virtual Status ConvertLabel(float* const example_label) const = 0;
};
}
#endif | #include <limits>
#include "tensorflow/core/kernels/hinge-loss.h"
#include "tensorflow/core/kernels/logistic-loss.h"
#include "tensorflow/core/kernels/poisson-loss.h"
#include "tensorflow/core/kernels/smooth-hinge-loss.h"
#include "tensorflow/core/kernels/squared-loss.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
void TestComputeUpdatedDual(const DualLossUpdater &loss_updater,
const int num_loss_partitions, const double label,
const double example_weight,
const double current_dual, const double wx,
const double weighted_example_norm) {
double new_dual = loss_updater.ComputeUpdatedDual(
num_loss_partitions, label, example_weight, current_dual, wx,
weighted_example_norm);
double new_wx = wx + (new_dual - current_dual) * num_loss_partitions *
weighted_example_norm * example_weight;
EXPECT_NEAR(new_dual, -loss_updater.PrimalLossDerivative(new_wx, label, 1.0),
1e-5);
}
TEST(LogisticLoss, ComputePrimalLoss) {
LogisticLossUpdater loss_updater;
EXPECT_NEAR(0.693147,
loss_updater.ComputePrimalLoss(0 , 1 ,
1 ),
1e-3);
EXPECT_NEAR(0.0,
loss_updater.ComputePrimalLoss(70 , 1 ,
1 ),
1e-3);
EXPECT_NEAR(0.0,
loss_updater.ComputePrimalLoss(-70 , -1 ,
1 ),
1e-3);
}
TEST(LogisticLoss, ComputeDualLoss) {
LogisticLossUpdater loss_updater;
EXPECT_NEAR(0.0,
loss_updater.ComputeDualLoss(0 , 1 ,
1 ),
1e-3);
EXPECT_NEAR(0.0,
loss_updater.ComputeDualLoss(1 , 1 ,
1 ),
1e-3);
EXPECT_NEAR(
-0.693147,
loss_updater.ComputeDualLoss(0.5 , 1 ,
1 ),
1e-3);
}
TEST(LogisticLoss, ComputeUpdatedDual) {
LogisticLossUpdater loss_updater;
TestComputeUpdatedDual(loss_updater, 1 , 1.0 ,
1.0 , 0.5 ,
0.3 , 10.0 );
TestComputeUpdatedDual(loss_updater, 2 , -1.0 ,
1.0 , 0.1 ,
-0.8 , 10.0 );
}
TEST(SquaredLoss, ComputePrimalLoss) {
SquaredLossUpdater loss_updater;
EXPECT_NEAR(0.5,
loss_updater.ComputePrimalLoss(0.0 , 1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(40.5,
loss_updater.ComputePrimalLoss(10.0 , 1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(0.125,
loss_updater.ComputePrimalLoss(-0.5 , -1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(4.84,
loss_updater.ComputePrimalLoss(1.2 , -1.0 ,
2.0 ),
1e-3);
}
TEST(SquaredLoss, ComputeDualLoss) {
SquaredLossUpdater loss_updater;
EXPECT_NEAR(
0.0,
loss_updater.ComputeDualLoss(0.0 , -1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(
0.66,
loss_updater.ComputeDualLoss(0.2 , -1.0 ,
3.0 ),
1e-3);
EXPECT_NEAR(
-0.375,
loss_updater.ComputeDualLoss(1.5 , 1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(
-1.125,
loss_updater.ComputeDualLoss(0.5 , 1.0 ,
3.0 ),
1e-3);
}
TEST(SquaredLoss, ComputeUpdatedDual) {
SquaredLossUpdater loss_updater;
TestComputeUpdatedDual(loss_updater, 1 , 1.0 ,
1.0 , 0.3 ,
0.3 , 10.0 );
TestComputeUpdatedDual(loss_updater, 5 , -1.0 ,
1.0 , -0.4 ,
0.8 , 10.0 );
}
TEST(HingeLoss, ComputePrimalLoss) {
HingeLossUpdater loss_updater;
EXPECT_NEAR(1.0,
loss_updater.ComputePrimalLoss(0.0 , 1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(0.0,
loss_updater.ComputePrimalLoss(10.0 , 1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(0.5,
loss_updater.ComputePrimalLoss(-0.5 , -1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(4.4,
loss_updater.ComputePrimalLoss(1.2 , -1.0 ,
2.0 ),
1e-3);
}
TEST(HingeLoss, ComputeDualLoss) {
HingeLossUpdater loss_updater;
EXPECT_NEAR(
0.0,
loss_updater.ComputeDualLoss(0.0 , -1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(
std::numeric_limits<double>::max(),
loss_updater.ComputeDualLoss(0.2 , -1.0 ,
3.0 ),
1e-3);
EXPECT_NEAR(
std::numeric_limits<double>::max(),
loss_updater.ComputeDualLoss(1.5 , 1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(
-1.5,
loss_updater.ComputeDualLoss(0.5 , 1.0 ,
3.0 ),
1e-3);
}
TEST(HingeLoss, ConvertLabel) {
HingeLossUpdater loss_updater;
float example_label = 1.0;
Status status;
TF_EXPECT_OK(loss_updater.ConvertLabel(&example_label));
EXPECT_EQ(1.0, example_label);
example_label = 0.0;
TF_EXPECT_OK(loss_updater.ConvertLabel(&example_label));
EXPECT_EQ(-1.0, example_label);
example_label = 0.5;
status = loss_updater.ConvertLabel(&example_label);
EXPECT_FALSE(status.ok());
}
TEST(HingeLoss, ComputeUpdatedDual) {
HingeLossUpdater loss_updater;
EXPECT_NEAR(0.507,
loss_updater.ComputeUpdatedDual(
1 , 1.0 ,
1.0 , 0.5 ,
0.3 , 100.0 ),
1e-3);
EXPECT_NEAR(-0.416,
loss_updater.ComputeUpdatedDual(
10 , -1.0 ,
1.0 , -0.4 ,
0.6 , 10.0 ),
1e-3);
TestComputeUpdatedDual(loss_updater, 1 , 1.0 ,
1.0 , -0.5 ,
0.3 , 10.0 );
TestComputeUpdatedDual(loss_updater, 1 , -1.0 ,
2.0 , -1.0 ,
0.3 , 10.0 );
}
TEST(SmoothHingeLoss, ComputePrimalLoss) {
SmoothHingeLossUpdater loss_updater;
EXPECT_NEAR(0.5,
loss_updater.ComputePrimalLoss(0.0 , 1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(0.0,
loss_updater.ComputePrimalLoss(10.0 , 1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(0.125,
loss_updater.ComputePrimalLoss(-0.5 , -1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(3.4,
loss_updater.ComputePrimalLoss(1.2 , -1.0 ,
2.0 ),
1e-3);
}
TEST(SmoothHingeLoss, ComputeDualLoss) {
SmoothHingeLossUpdater loss_updater;
EXPECT_NEAR(
0.0,
loss_updater.ComputeDualLoss(0.0 , -1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(
std::numeric_limits<double>::max(),
loss_updater.ComputeDualLoss(0.2 , -1.0 ,
3.0 ),
1e-3);
EXPECT_NEAR(
std::numeric_limits<double>::max(),
loss_updater.ComputeDualLoss(1.5 , 1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(
-1.125,
loss_updater.ComputeDualLoss(0.5 , 1.0 ,
3.0 ),
1e-3);
}
TEST(SmoothHingeLoss, ComputeUpdatedDual) {
SmoothHingeLossUpdater loss_updater;
TestComputeUpdatedDual(loss_updater, 1 , 1.0 ,
1.0 , 0.3 ,
0.3 , 10.0 );
TestComputeUpdatedDual(loss_updater, 5 , -1.0 ,
1.0 , -0.4 ,
0.8 , 10.0 );
}
TEST(PoissonLoss, ComputePrimalLoss) {
PoissonLossUpdater loss_updater;
EXPECT_NEAR(1.0,
loss_updater.ComputePrimalLoss(0.0 , 3.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(21996.0,
loss_updater.ComputePrimalLoss(10.0 , 3.0 ,
1.0 ),
1.0);
EXPECT_NEAR(0.606,
loss_updater.ComputePrimalLoss(-0.5 , 0.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(6.64,
loss_updater.ComputePrimalLoss(1.2 , 0.0 ,
2.0 ),
1e-2);
}
TEST(PoissonLoss, ComputeDualLoss) {
PoissonLossUpdater loss_updater;
EXPECT_NEAR(
std::numeric_limits<double>::max(),
loss_updater.ComputeDualLoss(1.0 , 0.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(
0.0,
loss_updater.ComputeDualLoss(0.0 , 0.0 ,
3.0 ),
1e-3);
EXPECT_NEAR(
-0.847,
loss_updater.ComputeDualLoss(1.5 , 2.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(
-2.675,
loss_updater.ComputeDualLoss(0.5 , 2.0 ,
3.0 ),
1e-3);
}
TEST(PoissonLoss, ConvertLabel) {
PoissonLossUpdater loss_updater;
float example_label = -1.0;
Status status = loss_updater.ConvertLabel(&example_label);
EXPECT_FALSE(status.ok());
}
TEST(PoissonLoss, ComputeUpdatedDual) {
PoissonLossUpdater loss_updater;
TestComputeUpdatedDual(loss_updater, 1 , 2.0 ,
1.0 , 0.5 ,
0.3 , 10.0 );
TestComputeUpdatedDual(loss_updater, 2 , 0.0 ,
1.0 , 0.0 ,
-0.8 , 10.0 );
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/loss.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/loss_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7f13d784-1dd3-4570-9e98-e4867d47a9bd | cpp | tensorflow/tensorflow | shared_batch_scheduler | tensorflow/core/kernels/batching_util/shared_batch_scheduler.h | tensorflow/core/kernels/batching_util/shared_batch_scheduler_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_SHARED_BATCH_SCHEDULER_H_
#define TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_SHARED_BATCH_SCHEDULER_H_
#include <stddef.h>
#include <atomic>
#include <cstddef>
#include <cstdint>
#include <deque>
#include <functional>
#include <list>
#include <memory>
#include <utility>
#include <variant>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/time/clock.h"
#include "tensorflow/core/kernels/batching_util/batch_input_task.h"
#include "tensorflow/core/kernels/batching_util/batch_scheduler.h"
#include "tensorflow/core/kernels/batching_util/batch_scheduler_utils.h"
#include "tensorflow/core/kernels/batching_util/batch_stats.h"
#include "tensorflow/core/kernels/batching_util/periodic_function.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/notification.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/lib/connected_traceme.h"
#include "tensorflow/core/profiler/lib/context_types.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
#include "tsl/platform/criticality.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace serving {
namespace internal {
template <typename TaskType>
class Queue;
}
}
}
namespace tensorflow {
namespace serving {
template <typename TaskType>
class SharedBatchScheduler
: public std::enable_shared_from_this<SharedBatchScheduler<TaskType>> {
public:
using BatchTaskUniquePtr = std::unique_ptr<Batch<TaskType>>;
using ProcessBatchCallback =
std::variant<std::function<void(BatchTaskUniquePtr)>,
std::function<void(BatchTaskUniquePtr,
std::vector<std::unique_ptr<TaskType>>)>>;
struct Options {
string thread_pool_name = {"batch_threads"};
int num_batch_threads = port::MaxParallelism();
Env* env = Env::Default();
};
static Status Create(
const Options& options,
std::shared_ptr<SharedBatchScheduler<TaskType>>* scheduler);
virtual ~SharedBatchScheduler();
struct QueueOptions {
size_t input_batch_size_limit = 1000;
int64_t batch_timeout_micros = 0;
size_t max_enqueued_batches = 10;
bool enable_large_batch_splitting = false;
std::function<Status(std::unique_ptr<TaskType>* input_task,
int first_output_task_size, int input_batch_size_limit,
std::vector<std::unique_ptr<TaskType>>* output_tasks)>
split_input_task_func;
size_t max_execution_batch_size = 1000;
std::vector<int32> allowed_batch_sizes;
bool disable_padding = false;
string batch_padding_policy = string(kPadUpPolicy);
ModelBatchStats* model_batch_stats = nullptr;
bool enable_priority_queue = false;
struct PriorityQueueOptions {
size_t max_execution_batch_size = 0;
int64_t batch_timeout_micros = 0;
size_t input_batch_size_limit = 0;
size_t max_enqueued_batches = 0;
std::vector<int32> allowed_batch_sizes;
};
PriorityQueueOptions high_priority_queue_options;
PriorityQueueOptions low_priority_queue_options;
MixedPriorityBatchingPolicy mixed_priority_batching_policy =
MixedPriorityBatchingPolicy::kLowPriorityPaddingWithMaxBatchSize;
};
virtual Status AddQueue(const QueueOptions& options,
ProcessBatchCallback process_batch_callback,
std::unique_ptr<BatchScheduler<TaskType>>* queue);
protected:
explicit SharedBatchScheduler(const Options& options);
private:
void GetNextWorkItem_Locked(internal::Queue<TaskType>** queue_for_batch_out,
BatchTaskUniquePtr* batch_to_process_out)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void ThreadLogic();
Status AddQueueAfterRewritingOptions(
const QueueOptions& options, ProcessBatchCallback process_batch_callback,
std::unique_ptr<BatchScheduler<TaskType>>* queue);
static bool BatchExists(const BatchTaskUniquePtr& batch_to_process);
const Options options_;
mutex mu_;
using QueueList = std::list<std::unique_ptr<internal::Queue<TaskType>>>;
QueueList queues_ TF_GUARDED_BY(mu_);
typename QueueList::iterator next_queue_to_schedule_ TF_GUARDED_BY(mu_);
condition_variable schedulable_batch_cv_;
std::vector<std::unique_ptr<PeriodicFunction>> batch_threads_;
SharedBatchScheduler(const SharedBatchScheduler&) = delete;
void operator=(const SharedBatchScheduler&) = delete;
};
namespace internal {
template <typename TaskType>
class Queue {
public:
using ProcessBatchCallbackWithoutPaddingTasks =
std::function<void(std::unique_ptr<Batch<TaskType>>)>;
using ProcessBatchCallbackWithPaddingTasks =
std::function<void(std::unique_ptr<Batch<TaskType>>,
std::vector<std::unique_ptr<TaskType>>)>;
using ProcessBatchCallback =
std::variant<ProcessBatchCallbackWithoutPaddingTasks,
ProcessBatchCallbackWithPaddingTasks>;
using SchedulableBatchCallback = std::function<void()>;
using SplitInputTaskIntoSubtasksCallback = std::function<Status(
std::unique_ptr<TaskType>* input_task, int open_batch_remaining_slot,
int max_execution_batch_size,
std::vector<std::unique_ptr<TaskType>>* output_tasks)>;
Queue(const typename SharedBatchScheduler<TaskType>::QueueOptions& options,
Env* env, ProcessBatchCallback process_batch_callback,
SchedulableBatchCallback schedulable_batch_callback);
~Queue();
Status Schedule(std::unique_ptr<TaskType>* task);
size_t NumEnqueuedTasks() const;
size_t SchedulingCapacity() const;
size_t max_task_size() const { return options_.input_batch_size_limit; }
size_t max_execution_batch_size() const { return max_execution_batch_size_; }
typename SharedBatchScheduler<TaskType>::BatchTaskUniquePtr ScheduleBatch();
std::vector<std::unique_ptr<TaskType>> GetLowPriorityTasksForPadding(
size_t batch_size);
void ProcessBatch(std::unique_ptr<Batch<TaskType>> batch,
std::vector<std::unique_ptr<TaskType>> padding_task);
bool IsEmpty() const;
void CloseAndWaitUntilEmpty();
bool closed() const TF_NO_THREAD_SAFETY_ANALYSIS { return closed_.load(); }
private:
static size_t GetMaxExecutionBatchSize(
const typename SharedBatchScheduler<TaskType>::QueueOptions& options) {
if (options.enable_large_batch_splitting) {
return options.max_execution_batch_size;
} else {
return options.input_batch_size_limit;
}
}
bool IsEmptyInternal() const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
bool IsLowPriorityTask(std::unique_ptr<TaskType>* task);
Status ScheduleWithoutOrEagerSplitImpl(std::unique_ptr<TaskType>* task)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void StartNewBatch() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
Status SplitInputBatchIntoSubtasks(
std::unique_ptr<TaskType>* input_task,
std::vector<std::unique_ptr<TaskType>>* output_tasks)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
bool IsOpenBatchSchedulable() const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
std::unique_ptr<Batch<TaskType>> ScheduleLowPriorityBatch()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
size_t SchedulingCapacityInternal() const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
Status ValidateBatchTaskQueueCapacity(TaskType* task) const
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
Status ValidateLowPriorityTaskQueueCapacity(const TaskType& task) const
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
size_t tail_batch_task_size() const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
int64 num_enqueued_batches() const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
std::deque<std::unique_ptr<Batch<TaskType>>>& GetBatches()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
const std::deque<std::unique_ptr<Batch<TaskType>>>& GetBatches() const
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
TaskQueue<TaskType>& GetLowPriorityTaskQueue()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
std::vector<std::unique_ptr<TaskType>> GetLowPriorityTasks(size_t size);
const typename SharedBatchScheduler<TaskType>::QueueOptions options_;
Env* env_;
const size_t max_execution_batch_size_;
ProcessBatchCallback process_batch_callback_;
SchedulableBatchCallback schedulable_batch_callback_;
mutable mutex mu_;
std::atomic<bool> closed_ TF_GUARDED_BY(mu_){false};
TaskQueue<TaskType> low_priority_tasks_ TF_GUARDED_BY(mu_);
std::deque<std::unique_ptr<Batch<TaskType>>> low_priority_batches_
TF_GUARDED_BY(mu_);
std::deque<std::unique_ptr<Batch<TaskType>>> high_priority_batches_
TF_GUARDED_BY(mu_);
uint64 traceme_context_id_counter_ TF_GUARDED_BY(mu_) = 0;
uint64 open_batch_start_time_micros_ TF_GUARDED_BY(mu_);
bool schedulable_batch_ TF_GUARDED_BY(mu_) = false;
int num_batches_being_processed_ TF_GUARDED_BY(mu_) = 0;
Notification* empty_notification_ TF_GUARDED_BY(mu_) = nullptr;
Queue(const Queue&) = delete;
void operator=(const Queue&) = delete;
};
template <typename TaskType>
class QueueHandle : public BatchScheduler<TaskType> {
public:
QueueHandle(std::shared_ptr<SharedBatchScheduler<TaskType>> scheduler,
Queue<TaskType>* queue);
~QueueHandle() override;
Status Schedule(std::unique_ptr<TaskType>* task) override;
size_t NumEnqueuedTasks() const override;
size_t SchedulingCapacity() const override;
size_t max_task_size() const override { return queue_->max_task_size(); }
private:
std::shared_ptr<SharedBatchScheduler<TaskType>> scheduler_;
Queue<TaskType>* queue_;
QueueHandle(const QueueHandle&) = delete;
void operator=(const QueueHandle&) = delete;
};
}
template <typename TaskType>
Status SharedBatchScheduler<TaskType>::Create(
const Options& options,
std::shared_ptr<SharedBatchScheduler<TaskType>>* scheduler) {
if (options.num_batch_threads < 1) {
return errors::InvalidArgument("num_batch_threads must be positive; was ",
options.num_batch_threads);
}
scheduler->reset(new SharedBatchScheduler<TaskType>(options));
return absl::OkStatus();
}
template <typename TaskType>
SharedBatchScheduler<TaskType>::~SharedBatchScheduler() {
for (;;) {
{
mutex_lock l(mu_);
if (queues_.empty()) {
break;
}
}
const int64_t kSleepTimeMicros = 100;
options_.env->SleepForMicroseconds(kSleepTimeMicros);
}
batch_threads_.clear();
}
template <typename TaskType>
Status SharedBatchScheduler<TaskType>::AddQueue(
const QueueOptions& options, ProcessBatchCallback process_batch_callback,
std::unique_ptr<BatchScheduler<TaskType>>* queue) {
QueueOptions rewrite_options = options;
if ((!rewrite_options.enable_large_batch_splitting) &&
rewrite_options.max_enqueued_batches == 0) {
rewrite_options.max_enqueued_batches = 1;
}
return AddQueueAfterRewritingOptions(rewrite_options, process_batch_callback,
queue);
}
template <typename TaskType>
Status SharedBatchScheduler<TaskType>::AddQueueAfterRewritingOptions(
const QueueOptions& options, ProcessBatchCallback process_batch_callback,
std::unique_ptr<BatchScheduler<TaskType>>* queue) {
if (options.input_batch_size_limit == 0) {
return errors::InvalidArgument(
"input_batch_size_limit must be positive; was ",
options.input_batch_size_limit);
}
if (options.batch_timeout_micros < 0) {
return errors::InvalidArgument(
"batch_timeout_micros must be non-negative; was ",
options.batch_timeout_micros);
}
if (options.max_enqueued_batches == 0) {
return errors::InvalidArgument(
"max_enqueued_batches must be positive; was ",
options.max_enqueued_batches);
}
if (options.enable_large_batch_splitting &&
options.split_input_task_func == nullptr) {
return errors::InvalidArgument(
"split_input_task_func must be specified when split_input_task is "
"true: ",
options.enable_large_batch_splitting);
}
if (options.enable_large_batch_splitting &&
(options.input_batch_size_limit < options.max_execution_batch_size)) {
return errors::InvalidArgument(
"When enable_large_batch_splitting is true, input_batch_size_limit "
"must be "
"greater than or equal to max_execution_batch_size.",
options.enable_large_batch_splitting, options.input_batch_size_limit,
options.max_execution_batch_size);
}
auto schedulable_batch_callback = [this] {
mutex_lock l(mu_);
schedulable_batch_cv_.notify_one();
};
auto internal_queue =
std::unique_ptr<internal::Queue<TaskType>>(new internal::Queue<TaskType>(
options, options_.env, process_batch_callback,
schedulable_batch_callback));
auto handle = std::unique_ptr<BatchScheduler<TaskType>>(
new internal::QueueHandle<TaskType>(this->shared_from_this(),
internal_queue.get()));
{
mutex_lock l(mu_);
queues_.push_back(std::move(internal_queue));
if (next_queue_to_schedule_ == queues_.end()) {
next_queue_to_schedule_ = queues_.begin();
}
}
*queue = std::move(handle);
return absl::OkStatus();
}
template <typename TaskType>
SharedBatchScheduler<TaskType>::SharedBatchScheduler(const Options& options)
: options_(options), next_queue_to_schedule_(queues_.end()) {
PeriodicFunction::Options periodic_fn_options;
periodic_fn_options.thread_name_prefix =
strings::StrCat(options.thread_pool_name, "_");
for (int i = 0; i < options.num_batch_threads; ++i) {
std::unique_ptr<PeriodicFunction> thread(new PeriodicFunction(
[this] { this->ThreadLogic(); },
0 , periodic_fn_options));
batch_threads_.push_back(std::move(thread));
}
}
template <typename TaskType>
bool SharedBatchScheduler<TaskType>::BatchExists(
const BatchTaskUniquePtr& batch_to_process) {
return batch_to_process != nullptr;
}
template <typename TaskType>
void SharedBatchScheduler<TaskType>::GetNextWorkItem_Locked(
internal::Queue<TaskType>** queue_for_batch_out,
BatchTaskUniquePtr* batch_to_process_out) {
BatchTaskUniquePtr batch_to_process;
internal::Queue<TaskType>* queue_for_batch = nullptr;
const int num_queues = queues_.size();
for (int num_queues_tried = 0;
!BatchExists(batch_to_process) && num_queues_tried < num_queues;
++num_queues_tried) {
DCHECK(next_queue_to_schedule_ != queues_.end());
const bool queue_closed = (*next_queue_to_schedule_)->closed();
batch_to_process = (*next_queue_to_schedule_)->ScheduleBatch();
if (BatchExists(batch_to_process)) {
queue_for_batch = next_queue_to_schedule_->get();
}
if (queue_closed && (*next_queue_to_schedule_)->IsEmpty() &&
!BatchExists(batch_to_process)) {
DCHECK_NE(queue_for_batch, next_queue_to_schedule_->get());
next_queue_to_schedule_ = queues_.erase(next_queue_to_schedule_);
} else {
++next_queue_to_schedule_;
}
if (next_queue_to_schedule_ == queues_.end() && !queues_.empty()) {
next_queue_to_schedule_ = queues_.begin();
}
}
*queue_for_batch_out = queue_for_batch;
*batch_to_process_out = std::move(batch_to_process);
}
template <typename TaskType>
void SharedBatchScheduler<TaskType>::ThreadLogic() {
BatchTaskUniquePtr batch_to_process;
internal::Queue<TaskType>* queue_for_batch = nullptr;
{
mutex_lock l(mu_);
while (true) {
GetNextWorkItem_Locked(&queue_for_batch, &batch_to_process);
if (BatchExists(batch_to_process)) break;
const int64_t kTimeoutMillis =
1;
WaitForMilliseconds(&l, &schedulable_batch_cv_, kTimeoutMillis);
if (queues_.empty()) return;
}
}
size_t batch_size_to_schedule = batch_to_process->size();
queue_for_batch->ProcessBatch(
std::move(batch_to_process),
queue_for_batch->GetLowPriorityTasksForPadding(batch_size_to_schedule));
}
namespace internal {
template <typename TaskType>
Queue<TaskType>::Queue(
const typename SharedBatchScheduler<TaskType>::QueueOptions& options,
Env* env, ProcessBatchCallback process_batch_callback,
SchedulableBatchCallback schedulable_batch_callback)
: options_(options),
env_(env),
max_execution_batch_size_(GetMaxExecutionBatchSize(options_)),
process_batch_callback_(process_batch_callback),
schedulable_batch_callback_(schedulable_batch_callback) {
traceme_context_id_counter_ = (absl::GetCurrentTimeNanos() & 0xFFFFFFFF)
<< 32;
GetBatches().emplace_back(new Batch<TaskType>);
}
template <typename TaskType>
Queue<TaskType>::~Queue() {
mutex_lock l(mu_);
DCHECK(IsEmptyInternal());
GetBatches().back()->Close();
}
template <typename TaskType>
bool Queue<TaskType>::IsLowPriorityTask(std::unique_ptr<TaskType>* task) {
if (!options_.enable_priority_queue) {
return false;
}
if constexpr (std::is_base_of_v<BatchTask, TaskType>) {
return ((*task)->criticality() ==
tsl::criticality::Criticality::kSheddablePlus ||
(*task)->criticality() ==
tsl::criticality::Criticality::kSheddable);
}
return false;
}
template <typename TaskType>
Status Queue<TaskType>::ScheduleWithoutOrEagerSplitImpl(
std::unique_ptr<TaskType>* task) {
TF_RETURN_IF_ERROR(ValidateBatchTaskQueueCapacity((*task).get()));
std::deque<std::unique_ptr<Batch<TaskType>>>& batches = GetBatches();
const int64_t open_batch_remaining_slot =
max_execution_batch_size() - batches.back()->size();
const int64_t input_task_size = (*task)->size();
std::vector<std::unique_ptr<TaskType>> output_tasks;
if (input_task_size <= open_batch_remaining_slot ||
!options_.enable_large_batch_splitting) {
output_tasks.push_back(std::move(*task));
} else {
TF_RETURN_IF_ERROR(SplitInputBatchIntoSubtasks(task, &output_tasks));
}
for (int i = 0; i < output_tasks.size(); ++i) {
if (batches.back()->size() + output_tasks[i]->size() >
max_execution_batch_size()) {
StartNewBatch();
}
if (batches.back()->empty()) {
open_batch_start_time_micros_ = env_->NowMicros();
}
tsl::profiler::TraceMeProducer trace_me(
[&output_tasks, i] {
return profiler::TraceMeEncode("ScheduleOutputTask",
{{"size", output_tasks[i]->size()}});
},
tsl::profiler::ContextType::kSharedBatchScheduler,
batches.back()->traceme_context_id());
batches.back()->AddTask(std::move(output_tasks[i]));
}
return absl::OkStatus();
}
template <typename TaskType>
Status Queue<TaskType>::Schedule(std::unique_ptr<TaskType>* task) {
const bool large_batch_splitting = options_.enable_large_batch_splitting;
tsl::profiler::TraceMe trace_me([task, large_batch_splitting] {
return profiler::TraceMeEncode(
large_batch_splitting ? "ScheduleWithEagerSplit"
: "ScheduleWithoutSplit",
{{"batching_input_task_size", (*task)->size()}});
});
bool notify_of_schedulable_batch = false;
{
mutex_lock l(mu_);
DCHECK(!closed_);
if (IsLowPriorityTask(task)) {
TF_RETURN_IF_ERROR(ValidateLowPriorityTaskQueueCapacity(**task));
low_priority_tasks_.AddTask(std::move(*task), env_->NowMicros());
} else {
TF_RETURN_IF_ERROR(ScheduleWithoutOrEagerSplitImpl(task));
}
if (!schedulable_batch_) {
if (GetBatches().size() > 1 || IsOpenBatchSchedulable()) {
schedulable_batch_ = true;
notify_of_schedulable_batch = true;
}
}
}
if (notify_of_schedulable_batch) {
schedulable_batch_callback_();
}
return absl::OkStatus();
}
template <typename TaskType>
size_t Queue<TaskType>::NumEnqueuedTasks() const {
size_t num_enqueued_tasks = 0;
mutex_lock l(mu_);
for (const auto& batch : GetBatches()) {
num_enqueued_tasks += batch->num_tasks();
}
return num_enqueued_tasks + low_priority_tasks_.num_tasks();
}
template <typename TaskType>
size_t Queue<TaskType>::SchedulingCapacity() const {
mutex_lock l(mu_);
return SchedulingCapacityInternal();
}
template <typename TaskType>
size_t Queue<TaskType>::SchedulingCapacityInternal() const {
const int64 num_new_batches_schedulable =
static_cast<int64_t>(options_.max_enqueued_batches) -
this->num_enqueued_batches();
const int64 execution_batch_size_limit = max_execution_batch_size();
const int64 open_batch_capacity =
execution_batch_size_limit - this->tail_batch_task_size();
return (num_new_batches_schedulable * execution_batch_size_limit) +
open_batch_capacity;
}
template <typename TaskType>
Status Queue<TaskType>::ValidateBatchTaskQueueCapacity(TaskType* task) const {
if (task->size() > options_.input_batch_size_limit) {
return absl::InvalidArgumentError(absl::StrFormat(
"Task size %d is larger than maximum input batch size %d", task->size(),
options_.input_batch_size_limit));
}
if (options_.enable_large_batch_splitting) {
if (task->size() > SchedulingCapacityInternal()) {
return errors::Unavailable(
"The batch scheduling queue to which this task was submitted is "
"full; task size is ",
task->size(), " but scheduling capacity is only ",
SchedulingCapacityInternal(),
" (num_enqueued_batches=", num_enqueued_batches(),
", max_enqueued_batches=", options_.max_enqueued_batches,
", open_batch_size=", tail_batch_task_size(),
", max_execution_batch_size=", max_execution_batch_size(), ")");
}
return absl::OkStatus();
}
const std::deque<std::unique_ptr<Batch<TaskType>>>& batches = GetBatches();
if (batches.back()->size() + task->size() > options_.input_batch_size_limit) {
if (batches.size() >= options_.max_enqueued_batches) {
return errors::Unavailable(
"The batch scheduling queue to which this task was submitted is "
"full; currently ",
batches.size(), " batches enqueued and max_enqueued_batches is ",
options_.max_enqueued_batches);
}
}
return absl::OkStatus();
}
template <typename TaskType>
Status Queue<TaskType>::ValidateLowPriorityTaskQueueCapacity(
const TaskType& task) const {
if (task.size() >
options_.low_priority_queue_options.max_execution_batch_size) {
return absl::UnavailableError(absl::StrFormat(
"The low priority task queue to which this task was submitted has "
"max_execution_batch_size=%d and the task size is %d",
options_.low_priority_queue_options.max_execution_batch_size,
task.size()));
}
if (low_priority_tasks_.size() + task.size() >
options_.low_priority_queue_options.max_enqueued_batches *
options_.low_priority_queue_options.max_execution_batch_size) {
return absl::UnavailableError(absl::StrFormat(
"The low priority task queue to which this task was submitted does not "
"have the capcity to handle this task; currently the low priority "
"queue has %d tasks enqueued and the submitted task size is %d while "
"max_enqueued_batches=%d and max_execution_batch_size=%d",
low_priority_tasks_.size(), task.size(),
options_.low_priority_queue_options.max_enqueued_batches,
options_.low_priority_queue_options.max_execution_batch_size));
}
return absl::OkStatus();
}
template <typename TaskType>
typename SharedBatchScheduler<TaskType>::BatchTaskUniquePtr
Queue<TaskType>::ScheduleBatch() {
std::unique_ptr<Batch<TaskType>> batch_to_schedule;
{
mutex_lock l(mu_);
std::deque<std::unique_ptr<Batch<TaskType>>>& batches = GetBatches();
if (batches.size() == 1 && IsOpenBatchSchedulable()) {
Batch<TaskType>& old_batch = *batches[0];
std::vector<std::unique_ptr<TaskType>> trimmed_tasks;
MaybeBatchDown(
old_batch,
options_.allowed_batch_sizes,
options_.disable_padding,
options_.batch_padding_policy,
options_.model_batch_stats,
trimmed_tasks);
StartNewBatch();
Batch<TaskType>& new_batch = *batches[1];
for (std::unique_ptr<TaskType>& task : trimmed_tasks) {
new_batch.AddTask(std::move(task));
}
if (!new_batch.empty()) {
double position = static_cast<double>(old_batch.size()) /
(old_batch.size() + new_batch.size());
open_batch_start_time_micros_ +=
(env_->NowMicros() - open_batch_start_time_micros_) * position;
}
}
if (batches.size() >= 2) {
batch_to_schedule = std::move(batches.front());
batches.pop_front();
}
if (batch_to_schedule == nullptr) {
batch_to_schedule = ScheduleLowPriorityBatch();
}
if (batch_to_schedule == nullptr) {
schedulable_batch_ = false;
return batch_to_schedule;
}
++num_batches_being_processed_;
}
return batch_to_schedule;
}
template <typename TaskType>
std::vector<std::unique_ptr<TaskType>> Queue<TaskType>::GetLowPriorityTasks(
size_t size) {
std::vector<std::unique_ptr<TaskType>> low_priority_tasks_to_pad;
if (!options_.enable_priority_queue || size == 0)
return low_priority_tasks_to_pad;
{
mutex_lock l(mu_);
low_priority_tasks_to_pad = GetLowPriorityTaskQueue().RemoveTask(size);
}
return low_priority_tasks_to_pad;
}
template <typename TaskType>
std::vector<std::unique_ptr<TaskType>>
Queue<TaskType>::GetLowPriorityTasksForPadding(size_t batch_size) {
size_t target_batch_size;
switch (options_.mixed_priority_batching_policy) {
case MixedPriorityBatchingPolicy::kLowPriorityPaddingWithMaxBatchSize:
target_batch_size = max_execution_batch_size();
break;
case MixedPriorityBatchingPolicy::
kLowPriorityPaddingWithNextAllowedBatchSize:
target_batch_size = GetNextAllowedBatchSize(
batch_size, options_.allowed_batch_sizes, options_.disable_padding);
break;
default:
target_batch_size = 0;
break;
}
if (target_batch_size <= batch_size) {
return {};
}
return GetLowPriorityTasks(target_batch_size - batch_size);
}
template <typename TaskType>
void Queue<TaskType>::ProcessBatch(
std::unique_ptr<Batch<TaskType>> batch,
std::vector<std::unique_ptr<TaskType>> padding_task) {
tsl::profiler::TraceMeConsumer trace_me(
[&] {
return profiler::TraceMeEncode(
"ProcessBatch", {{"batch_size_before_padding", batch->size()},
{"_r", 2} });
},
tsl::profiler::ContextType::kSharedBatchScheduler,
batch->traceme_context_id());
if (std::holds_alternative<ProcessBatchCallbackWithoutPaddingTasks>(
process_batch_callback_)) {
std::get<ProcessBatchCallbackWithoutPaddingTasks>(process_batch_callback_)(
std::move(batch));
} else {
std::get<ProcessBatchCallbackWithPaddingTasks>(process_batch_callback_)(
std::move(batch), std::move(padding_task));
}
{
mutex_lock l(mu_);
--num_batches_being_processed_;
if (empty_notification_ != nullptr && IsEmptyInternal()) {
empty_notification_->Notify();
}
}
}
template <typename TaskType>
bool Queue<TaskType>::IsEmpty() const {
mutex_lock l(mu_);
return IsEmptyInternal();
}
template <typename TaskType>
void Queue<TaskType>::CloseAndWaitUntilEmpty() {
Notification empty;
{
mutex_lock l(mu_);
closed_ = true;
if (IsEmptyInternal()) {
empty.Notify();
} else {
empty_notification_ = ∅
}
}
empty.WaitForNotification();
}
template <typename TaskType>
bool Queue<TaskType>::IsEmptyInternal() const {
const std::deque<std::unique_ptr<Batch<TaskType>>>& batches = GetBatches();
return num_batches_being_processed_ == 0 && batches.size() == 1 &&
batches.back()->empty() && low_priority_tasks_.empty();
}
template <typename TaskType>
void Queue<TaskType>::StartNewBatch() {
std::deque<std::unique_ptr<Batch<TaskType>>>& batches = GetBatches();
batches.back()->Close();
batches.emplace_back(new Batch<TaskType>(++traceme_context_id_counter_));
}
template <typename TaskType>
Status Queue<TaskType>::SplitInputBatchIntoSubtasks(
std::unique_ptr<TaskType>* input_task,
std::vector<std::unique_ptr<TaskType>>* output_tasks) {
const int open_batch_remaining_slot =
max_execution_batch_size() - this->tail_batch_task_size();
return options_.split_input_task_func(
std::move(input_task), open_batch_remaining_slot,
max_execution_batch_size(), std::move(output_tasks));
}
template <typename TaskType>
bool Queue<TaskType>::IsOpenBatchSchedulable() const {
Batch<TaskType>* open_batch = GetBatches().back().get();
if (open_batch->empty()) {
return false;
}
return closed_ || open_batch->size() >= max_execution_batch_size() ||
env_->NowMicros() >=
open_batch_start_time_micros_ + options_.batch_timeout_micros;
}
template <typename TaskType>
std::unique_ptr<Batch<TaskType>> Queue<TaskType>::ScheduleLowPriorityBatch() {
std::unique_ptr<Batch<TaskType>> batch_to_schedule;
if (!options_.enable_priority_queue || low_priority_tasks_.empty()) {
return batch_to_schedule;
}
if (env_->NowMicros() <
*low_priority_tasks_.EarliestTaskStartTime() +
options_.low_priority_queue_options.batch_timeout_micros &&
low_priority_tasks_.size() <
options_.low_priority_queue_options.max_execution_batch_size) {
return batch_to_schedule;
}
if (!GetBatches().empty() && !GetBatches().front()->empty()) {
return batch_to_schedule;
}
batch_to_schedule = std::make_unique<Batch<TaskType>>();
for (std::unique_ptr<TaskType>& task : low_priority_tasks_.RemoveTask(
options_.low_priority_queue_options.max_execution_batch_size)) {
batch_to_schedule->AddTask(std::move(task));
}
batch_to_schedule->Close();
return batch_to_schedule;
}
template <typename TaskType>
size_t Queue<TaskType>::tail_batch_task_size() const {
return GetBatches().back()->size();
}
template <typename TaskType>
int64 Queue<TaskType>::num_enqueued_batches() const {
return GetBatches().size();
}
template <typename TaskType>
std::deque<std::unique_ptr<Batch<TaskType>>>& Queue<TaskType>::GetBatches() {
return high_priority_batches_;
}
template <typename TaskType>
const std::deque<std::unique_ptr<Batch<TaskType>>>&
Queue<TaskType>::GetBatches() const {
return high_priority_batches_;
}
template <typename TaskType>
TaskQueue<TaskType>& Queue<TaskType>::GetLowPriorityTaskQueue() {
return low_priority_tasks_;
}
template <typename TaskType>
QueueHandle<TaskType>::QueueHandle(
std::shared_ptr<SharedBatchScheduler<TaskType>> scheduler,
Queue<TaskType>* queue)
: scheduler_(scheduler), queue_(queue) {}
template <typename TaskType>
QueueHandle<TaskType>::~QueueHandle() {
queue_->CloseAndWaitUntilEmpty();
}
template <typename TaskType>
Status QueueHandle<TaskType>::Schedule(std::unique_ptr<TaskType>* task) {
return queue_->Schedule(task);
}
template <typename TaskType>
size_t QueueHandle<TaskType>::NumEnqueuedTasks() const {
return queue_->NumEnqueuedTasks();
}
template <typename TaskType>
size_t QueueHandle<TaskType>::SchedulingCapacity() const {
return queue_->SchedulingCapacity();
}
}
}
}
#endif | #include "tensorflow/core/kernels/batching_util/shared_batch_scheduler.h"
#include <cstddef>
#include <memory>
#include <string>
#include <thread>
#include <tuple>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/base/call_once.h"
#include "absl/container/fixed_array.h"
#include "absl/status/status.h"
#include "absl/time/time.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/kernels/batching_util/batch_scheduler.h"
#include "tensorflow/core/kernels/batching_util/batch_scheduler_utils.h"
#include "tensorflow/core/kernels/batching_util/fake_clock_env.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tsl/platform/criticality.h"
namespace tensorflow {
namespace serving {
namespace {
using ::testing::HasSubstr;
class FakeTask : public BatchTask {
public:
explicit FakeTask(size_t size, tsl::criticality::Criticality criticality =
tsl::criticality::Criticality::kCritical)
: size_(size), criticality_(criticality) {}
~FakeTask() override = default;
size_t size() const override { return size_; }
tsl::criticality::Criticality criticality() const override {
return criticality_;
}
private:
const size_t size_;
const tsl::criticality::Criticality criticality_;
FakeTask(const FakeTask&) = delete;
void operator=(const FakeTask&) = delete;
};
class FakeTaskWithoutCriticality {
public:
explicit FakeTaskWithoutCriticality(size_t size) : size_(size) {}
~FakeTaskWithoutCriticality() = default;
size_t size() const { return size_; }
private:
const size_t size_;
FakeTaskWithoutCriticality(const FakeTaskWithoutCriticality&) = delete;
void operator=(const FakeTaskWithoutCriticality&) = delete;
};
using Queue = BatchScheduler<FakeTask>;
using Scheduler = SharedBatchScheduler<FakeTask>;
using QueueOptions = Scheduler::QueueOptions;
using SplitFunc =
std::function<Status(std::unique_ptr<FakeTask>* input_task,
int first_output_task_size, int input_batch_size_limit,
std::vector<std::unique_ptr<FakeTask>>* output_tasks)>;
Status ScheduleTask(size_t task_size, BatchScheduler<FakeTask>* scheduler,
tsl::criticality::Criticality criticality =
tsl::criticality::Criticality::kCritical) {
std::unique_ptr<FakeTask> task(new FakeTask(task_size, criticality));
Status status = scheduler->Schedule(&task);
CHECK_EQ(status.ok(), task == nullptr);
return status;
}
Status ScheduleTaskWithoutCriticality(
size_t task_size, BatchScheduler<FakeTaskWithoutCriticality>* scheduler) {
std::unique_ptr<FakeTaskWithoutCriticality> task(
new FakeTaskWithoutCriticality(task_size));
Status status = scheduler->Schedule(&task);
CHECK_EQ(status.ok(), task == nullptr);
return status;
}
std::unique_ptr<Thread> CreateFakeClockAdvancerThread(
test_util::FakeClockEnv* env, Notification* start, Notification* stop) {
return std::unique_ptr<Thread>(Env::Default()->StartThread(
{}, "FakeClockAdvancerThread", [env, start, stop] {
start->WaitForNotification();
while (!stop->HasBeenNotified()) {
env->AdvanceByMicroseconds(10);
Env::Default()->SleepForMicroseconds(10);
}
}));
}
std::shared_ptr<Scheduler> CreateSharedBatchScheduler(
int num_batch_threads, Env* env = Env::Default()) {
Scheduler::Options options;
options.num_batch_threads = num_batch_threads;
options.env = env;
std::shared_ptr<Scheduler> shared_batch_scheduler;
TF_CHECK_OK(Scheduler::Create(options, &shared_batch_scheduler));
return shared_batch_scheduler;
}
std::unique_ptr<Queue> CreateQueue(
std::shared_ptr<Scheduler> scheduler, Scheduler::QueueOptions queue_options,
internal::Queue<FakeTask>::ProcessBatchCallback process_batch_callback) {
std::unique_ptr<BatchScheduler<FakeTask>> queue;
TF_CHECK_OK(
scheduler->AddQueue(queue_options, process_batch_callback, &queue));
return queue;
}
QueueOptions CreateQueueOptions(size_t max_execution_batch_size,
size_t input_batch_size_limit,
size_t batch_timeout_micros,
size_t max_enqueued_batches,
bool enable_large_batch_splitting,
SplitFunc split_func,
bool enable_priority_queue = false) {
QueueOptions queue_options;
queue_options.max_enqueued_batches = max_enqueued_batches;
queue_options.max_execution_batch_size = max_execution_batch_size;
queue_options.input_batch_size_limit = input_batch_size_limit;
queue_options.batch_timeout_micros = batch_timeout_micros;
queue_options.enable_large_batch_splitting = enable_large_batch_splitting;
queue_options.enable_priority_queue = enable_priority_queue;
if (enable_large_batch_splitting) {
queue_options.split_input_task_func = split_func;
}
return queue_options;
}
class SharedBatchSchedulerTestBase {
public:
SharedBatchSchedulerTestBase() = default;
virtual ~SharedBatchSchedulerTestBase() = default;
protected:
QueueOptions CreateQueueOptions(size_t max_execution_batch_size,
size_t input_batch_size_limit,
size_t batch_timeout_micros,
size_t max_enqueued_batches,
bool enable_priority_queue = false) {
return tensorflow::serving::CreateQueueOptions(
max_execution_batch_size, input_batch_size_limit, batch_timeout_micros,
max_enqueued_batches, enable_input_batch_split(), get_split_func(),
enable_priority_queue);
}
virtual bool enable_input_batch_split() const = 0;
SplitFunc get_split_func() const {
if (enable_input_batch_split()) {
return
[](std::unique_ptr<FakeTask>* input_task,
int open_batch_remaining_slot, int max_batch_size,
std::vector<std::unique_ptr<FakeTask>>* output_tasks) -> Status {
std::unique_ptr<FakeTask> owned_input_task = std::move(*input_task);
const int input_task_size = owned_input_task->size();
const internal::InputSplitMetadata input_split_metadata(
input_task_size, open_batch_remaining_slot, max_batch_size);
const absl::FixedArray<int> task_sizes =
input_split_metadata.task_sizes();
const int num_batches = task_sizes.size();
output_tasks->resize(num_batches);
for (int i = 0; i < num_batches; i++) {
(*output_tasks)[i] = std::make_unique<FakeTask>(task_sizes[i]);
}
return absl::OkStatus();
};
}
return nullptr;
}
};
class SharedBatchSchedulerTest : public ::testing::TestWithParam<bool>,
public SharedBatchSchedulerTestBase {
protected:
bool enable_input_batch_split() const override { return GetParam(); }
};
TEST_P(SharedBatchSchedulerTest, Basic) {
for (int num_batch_threads : {1, 2, 3}) {
for (const bool delete_scheduler_early : {false, true}) {
for (const bool delete_queue_1_early : {false, true}) {
bool queue_0_callback_called = false;
auto queue_0_callback =
[&queue_0_callback_called](std::unique_ptr<Batch<FakeTask>> batch) {
queue_0_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(3, batch->num_tasks());
EXPECT_EQ(1, batch->task(0).size());
EXPECT_EQ(3, batch->task(1).size());
EXPECT_EQ(5, batch->task(2).size());
};
bool queue_1_callback_called = false;
auto queue_1_callback =
[&queue_1_callback_called](std::unique_ptr<Batch<FakeTask>> batch) {
queue_1_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(2, batch->num_tasks());
EXPECT_EQ(2, batch->task(0).size());
EXPECT_EQ(4, batch->task(1).size());
};
{
auto scheduler = CreateSharedBatchScheduler(num_batch_threads);
const size_t input_batch_size_limit = 10;
const size_t batch_timeout_micros = 1 * 1000 * 1000;
const size_t max_enqueued_batches = 2;
const auto queue_options =
CreateQueueOptions(input_batch_size_limit, input_batch_size_limit,
batch_timeout_micros, max_enqueued_batches);
auto queue_0 =
CreateQueue(scheduler, queue_options, queue_0_callback);
auto queue_1 =
CreateQueue(scheduler, queue_options, queue_1_callback);
if (delete_scheduler_early) {
scheduler = nullptr;
}
TF_ASSERT_OK(ScheduleTask(1, queue_0.get()));
TF_ASSERT_OK(ScheduleTask(2, queue_1.get()));
TF_ASSERT_OK(ScheduleTask(3, queue_0.get()));
TF_ASSERT_OK(ScheduleTask(4, queue_1.get()));
if (delete_queue_1_early) {
queue_1 = nullptr;
}
TF_ASSERT_OK(ScheduleTask(5, queue_0.get()));
}
EXPECT_TRUE(queue_0_callback_called);
EXPECT_TRUE(queue_1_callback_called);
}
}
}
}
TEST_P(SharedBatchSchedulerTest,
CallbackWithTaskVectorOkWithPriorityQueueEnabled) {
bool queue_0_callback_called = false;
auto queue_0_callback = [&queue_0_callback_called](
std::unique_ptr<Batch<FakeTask>> batch,
std::vector<std::unique_ptr<FakeTask>> tasks) {
queue_0_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(3, batch->num_tasks());
EXPECT_EQ(1, batch->task(0).size());
EXPECT_EQ(3, batch->task(1).size());
EXPECT_EQ(5, batch->task(2).size());
EXPECT_EQ(0, tasks.size());
};
bool queue_1_callback_called = false;
auto queue_1_callback = [&queue_1_callback_called](
std::unique_ptr<Batch<FakeTask>> batch,
std::vector<std::unique_ptr<FakeTask>> tasks) {
queue_1_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(2, batch->num_tasks());
EXPECT_EQ(2, batch->task(0).size());
EXPECT_EQ(4, batch->task(1).size());
EXPECT_EQ(0, tasks.size());
};
{
std::shared_ptr<Scheduler> scheduler =
CreateSharedBatchScheduler(3);
const QueueOptions queue_options = CreateQueueOptions(
10, 10,
1 * 1000 * 1000, 2,
true);
std::unique_ptr<Queue> queue_0 =
CreateQueue(scheduler, queue_options, queue_0_callback);
std::unique_ptr<Queue> queue_1 =
CreateQueue(scheduler, queue_options, queue_1_callback);
TF_ASSERT_OK(ScheduleTask(1, queue_0.get()));
TF_ASSERT_OK(ScheduleTask(2, queue_1.get()));
TF_ASSERT_OK(ScheduleTask(3, queue_0.get()));
TF_ASSERT_OK(ScheduleTask(4, queue_1.get()));
TF_ASSERT_OK(ScheduleTask(5, queue_0.get()));
}
EXPECT_TRUE(queue_0_callback_called);
EXPECT_TRUE(queue_1_callback_called);
}
TEST_P(SharedBatchSchedulerTest,
CallbackWithTaskVectorOkWithPriorityQueueDisabled) {
bool queue_0_callback_called = false;
auto queue_0_callback = [&queue_0_callback_called](
std::unique_ptr<Batch<FakeTask>> batch,
std::vector<std::unique_ptr<FakeTask>> tasks) {
queue_0_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(3, batch->num_tasks());
EXPECT_EQ(1, batch->task(0).size());
EXPECT_EQ(3, batch->task(1).size());
EXPECT_EQ(5, batch->task(2).size());
EXPECT_EQ(0, tasks.size());
};
bool queue_1_callback_called = false;
auto queue_1_callback = [&queue_1_callback_called](
std::unique_ptr<Batch<FakeTask>> batch,
std::vector<std::unique_ptr<FakeTask>> tasks) {
queue_1_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(2, batch->num_tasks());
EXPECT_EQ(2, batch->task(0).size());
EXPECT_EQ(4, batch->task(1).size());
EXPECT_EQ(0, tasks.size());
};
{
std::shared_ptr<Scheduler> scheduler =
CreateSharedBatchScheduler(3);
const QueueOptions queue_options = CreateQueueOptions(
10, 10,
1 * 1000 * 1000, 2,
false);
std::unique_ptr<Queue> queue_0 =
CreateQueue(scheduler, queue_options, queue_0_callback);
std::unique_ptr<Queue> queue_1 =
CreateQueue(scheduler, queue_options, queue_1_callback);
TF_ASSERT_OK(ScheduleTask(1, queue_0.get()));
TF_ASSERT_OK(ScheduleTask(2, queue_1.get()));
TF_ASSERT_OK(ScheduleTask(3, queue_0.get()));
TF_ASSERT_OK(ScheduleTask(4, queue_1.get()));
TF_ASSERT_OK(ScheduleTask(5, queue_0.get()));
}
EXPECT_TRUE(queue_0_callback_called);
EXPECT_TRUE(queue_1_callback_called);
}
TEST_P(
SharedBatchSchedulerTest,
CallbackWithTaskVectorOkWithPriorityQueueEnabledWithCriticalitylessTask) {
bool queue_0_callback_called = false;
auto queue_0_callback =
[&queue_0_callback_called](
std::unique_ptr<Batch<FakeTaskWithoutCriticality>> batch,
std::vector<std::unique_ptr<FakeTaskWithoutCriticality>> tasks) {
queue_0_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(3, batch->num_tasks());
EXPECT_EQ(1, batch->task(0).size());
EXPECT_EQ(3, batch->task(1).size());
EXPECT_EQ(5, batch->task(2).size());
EXPECT_EQ(0, tasks.size());
};
bool queue_1_callback_called = false;
auto queue_1_callback =
[&queue_1_callback_called](
std::unique_ptr<Batch<FakeTaskWithoutCriticality>> batch,
std::vector<std::unique_ptr<FakeTaskWithoutCriticality>> tasks) {
queue_1_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(2, batch->num_tasks());
EXPECT_EQ(2, batch->task(0).size());
EXPECT_EQ(4, batch->task(1).size());
EXPECT_EQ(0, tasks.size());
};
{
SharedBatchScheduler<FakeTaskWithoutCriticality>::Options options;
options.num_batch_threads = 3;
options.env = Env::Default();
std::shared_ptr<SharedBatchScheduler<FakeTaskWithoutCriticality>>
shared_batch_scheduler;
TF_CHECK_OK(SharedBatchScheduler<FakeTaskWithoutCriticality>::Create(
options, &shared_batch_scheduler));
SharedBatchScheduler<FakeTaskWithoutCriticality>::QueueOptions
queue_options;
queue_options.input_batch_size_limit = 10;
queue_options.batch_timeout_micros = 1000 * 1000;
queue_options.max_enqueued_batches = 2;
queue_options.enable_large_batch_splitting = enable_input_batch_split();
queue_options.split_input_task_func =
[](std::unique_ptr<FakeTaskWithoutCriticality>* input_task,
int open_batch_remaining_slot, int max_batch_size,
std::vector<std::unique_ptr<FakeTaskWithoutCriticality>>*
output_tasks) -> Status {
std::unique_ptr<FakeTaskWithoutCriticality> owned_input_task =
std::move(*input_task);
const int input_task_size = owned_input_task->size();
const internal::InputSplitMetadata input_split_metadata(
input_task_size, open_batch_remaining_slot, max_batch_size);
const absl::FixedArray<int> task_sizes =
input_split_metadata.task_sizes();
const int num_batches = task_sizes.size();
output_tasks->resize(num_batches);
for (int i = 0; i < num_batches; i++) {
(*output_tasks)[i] =
std::make_unique<FakeTaskWithoutCriticality>(task_sizes[i]);
}
return absl::OkStatus();
};
queue_options.max_execution_batch_size = 10;
queue_options.enable_priority_queue = true;
std::unique_ptr<BatchScheduler<FakeTaskWithoutCriticality>> queue_0;
TF_CHECK_OK(shared_batch_scheduler->AddQueue(queue_options,
queue_0_callback, &queue_0));
std::unique_ptr<BatchScheduler<FakeTaskWithoutCriticality>> queue_1;
TF_CHECK_OK(shared_batch_scheduler->AddQueue(queue_options,
queue_1_callback, &queue_1));
TF_ASSERT_OK(ScheduleTaskWithoutCriticality(1, queue_0.get()));
TF_ASSERT_OK(ScheduleTaskWithoutCriticality(2, queue_1.get()));
TF_ASSERT_OK(ScheduleTaskWithoutCriticality(3, queue_0.get()));
TF_ASSERT_OK(ScheduleTaskWithoutCriticality(4, queue_1.get()));
TF_ASSERT_OK(ScheduleTaskWithoutCriticality(5, queue_0.get()));
}
EXPECT_TRUE(queue_0_callback_called);
EXPECT_TRUE(queue_1_callback_called);
}
TEST_P(SharedBatchSchedulerTest, ObeyBatchSizeConstraint) {
test_util::FakeClockEnv env(Env::Default());
Notification start_teardown, stop_teardown;
std::unique_ptr<Thread> teardown_thread =
CreateFakeClockAdvancerThread(&env, &start_teardown, &stop_teardown);
mutex mu;
std::vector<std::vector<size_t>> callback_data;
Notification all_batches_processed;
auto callback = [&mu, &callback_data, &all_batches_processed](
std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
std::vector<size_t> batch_data;
batch_data.reserve(batch->num_tasks());
for (int i = 0; i < batch->num_tasks(); ++i) {
batch_data.push_back(batch->mutable_task(i)->size());
}
{
mutex_lock l(mu);
callback_data.push_back(batch_data);
if (callback_data.size() == 2) {
all_batches_processed.Notify();
}
}
};
{
auto scheduler = CreateSharedBatchScheduler(2, &env);
const size_t input_batch_size_limit = 10;
const size_t batch_timeout_micros = 10 * 1000;
const size_t max_enqueued_batches = 2;
auto queue = CreateQueue(
scheduler,
CreateQueueOptions(input_batch_size_limit, input_batch_size_limit,
batch_timeout_micros, max_enqueued_batches),
callback);
if (enable_input_batch_split()) {
TF_ASSERT_OK(ScheduleTask(3, queue.get()));
TF_ASSERT_OK(ScheduleTask(5, queue.get()));
TF_ASSERT_OK(ScheduleTask(3 , queue.get()));
TF_ASSERT_OK(ScheduleTask(1, queue.get()));
TF_ASSERT_OK(ScheduleTask(6, queue.get()));
TF_ASSERT_OK(ScheduleTask(1, queue.get()));
} else {
TF_ASSERT_OK(ScheduleTask(3, queue.get()));
TF_ASSERT_OK(ScheduleTask(5, queue.get()));
TF_ASSERT_OK(ScheduleTask(3 , queue.get()));
TF_ASSERT_OK(ScheduleTask(1, queue.get()));
TF_ASSERT_OK(ScheduleTask(6, queue.get()));
}
env.AdvanceByMicroseconds(20 * 1000);
all_batches_processed.WaitForNotification();
if (enable_input_batch_split()) {
EXPECT_THAT(
callback_data,
::testing::UnorderedElementsAreArray(std::vector<std::vector<size_t>>{
std::vector<size_t>{3, 5, 2}, std::vector<size_t>{1, 1, 6, 1}}));
} else {
EXPECT_THAT(callback_data,
::testing::UnorderedElementsAreArray(
std::vector<std::vector<size_t>>{{3, 5}, {3, 1, 6}}));
}
start_teardown.Notify();
}
stop_teardown.Notify();
}
TEST_P(SharedBatchSchedulerTest, ObeysTimeout) {
test_util::FakeClockEnv env(Env::Default());
Notification start_teardown, stop_teardown;
std::unique_ptr<Thread> teardown_thread =
CreateFakeClockAdvancerThread(&env, &start_teardown, &stop_teardown);
{
Notification first_batch_processed, second_batch_processed,
third_batch_processed;
bool notify_first_batch = false, notify_second_batch = false,
notify_third_batch = false;
auto callback = [&](std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
if (notify_first_batch && (!first_batch_processed.HasBeenNotified())) {
first_batch_processed.Notify();
return;
}
if (notify_second_batch && (!second_batch_processed.HasBeenNotified())) {
second_batch_processed.Notify();
return;
}
if (notify_third_batch && (!third_batch_processed.HasBeenNotified())) {
third_batch_processed.Notify();
return;
}
EXPECT_TRUE(false) << "Unexpected condition";
};
auto scheduler = CreateSharedBatchScheduler(1, &env);
const size_t input_batch_size_limit = 4;
const size_t batch_timeout_micros = 10;
const size_t max_enqueued_batches = 2;
QueueOptions options =
CreateQueueOptions(input_batch_size_limit, input_batch_size_limit,
batch_timeout_micros, max_enqueued_batches);
auto queue = CreateQueue(scheduler, options, callback);
TF_ASSERT_OK(ScheduleTask(1, queue.get()));
env.AdvanceByMicroseconds(9);
Env::Default()->SleepForMicroseconds(10 * 1000 );
EXPECT_FALSE(first_batch_processed.HasBeenNotified());
notify_first_batch = true;
env.AdvanceByMicroseconds(1);
first_batch_processed.WaitForNotification();
TF_ASSERT_OK(ScheduleTask(2, queue.get()));
Env::Default()->SleepForMicroseconds(10 * 1000 );
EXPECT_FALSE(second_batch_processed.HasBeenNotified());
notify_second_batch = true;
TF_ASSERT_OK(ScheduleTask(3, queue.get()));
second_batch_processed.WaitForNotification();
env.AdvanceByMicroseconds(9);
Env::Default()->SleepForMicroseconds(10 * 1000 );
EXPECT_FALSE(third_batch_processed.HasBeenNotified());
notify_third_batch = true;
env.AdvanceByMicroseconds(1);
third_batch_processed.WaitForNotification();
start_teardown.Notify();
}
stop_teardown.Notify();
}
TEST_P(SharedBatchSchedulerTest, ObeysTimeoutWithRealClock) {
Notification first_batch_processed, second_batch_processed;
auto callback = [&first_batch_processed, &second_batch_processed](
std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
if (batch->size() == 1) {
first_batch_processed.Notify();
} else if (batch->size() == 2) {
second_batch_processed.Notify();
} else {
EXPECT_TRUE(false) << "Unexpected batch size";
}
};
auto scheduler = CreateSharedBatchScheduler(2);
const size_t input_batch_size_limit = 10;
const size_t batch_timeout_micros = 100 * 1000;
const size_t max_enqueued_batches = 2;
auto queue = CreateQueue(
scheduler,
CreateQueueOptions(input_batch_size_limit, input_batch_size_limit,
batch_timeout_micros, max_enqueued_batches),
callback);
TF_ASSERT_OK(ScheduleTask(1, queue.get()));
first_batch_processed.WaitForNotification();
TF_ASSERT_OK(ScheduleTask(2, queue.get()));
second_batch_processed.WaitForNotification();
}
TEST_P(SharedBatchSchedulerTest,
WithZeroTimeoutBatchesScheduledAsSoonAsThreadIsAvailable) {
test_util::FakeClockEnv env(Env::Default());
Notification start_teardown, stop_teardown;
std::unique_ptr<Thread> teardown_thread =
CreateFakeClockAdvancerThread(&env, &start_teardown, &stop_teardown);
{
Notification first_batch_processed, second_batch_processed;
auto callback = [&first_batch_processed, &second_batch_processed](
std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
if (batch->size() == 1) {
first_batch_processed.Notify();
} else if (batch->size() == 2) {
second_batch_processed.Notify();
} else {
EXPECT_TRUE(false) << "Unexpected batch size";
}
};
auto scheduler = CreateSharedBatchScheduler(2, &env);
const size_t batch_size_limit = 100;
const size_t batch_timeout_micros = 0;
const size_t max_enqueued_batches = 2;
auto queue = CreateQueue(
scheduler,
CreateQueueOptions(batch_size_limit, batch_size_limit,
batch_timeout_micros, max_enqueued_batches),
callback);
TF_ASSERT_OK(ScheduleTask(1, queue.get()));
first_batch_processed.WaitForNotification();
TF_ASSERT_OK(ScheduleTask(2, queue.get()));
second_batch_processed.WaitForNotification();
start_teardown.Notify();
}
stop_teardown.Notify();
}
TEST_P(SharedBatchSchedulerTest, Fairness) {
test_util::FakeClockEnv env(Env::Default());
Notification start_teardown, stop_teardown;
std::unique_ptr<Thread> teardown_thread =
CreateFakeClockAdvancerThread(&env, &start_teardown, &stop_teardown);
{
Notification queue_0_first_batch_scheduled, queue_0_first_batch_proceed,
queue_0_second_batch_scheduled;
auto queue_0_callback = [&queue_0_first_batch_scheduled,
&queue_0_first_batch_proceed,
&queue_0_second_batch_scheduled](
std::unique_ptr<Batch<FakeTask>> batch) {
if (!queue_0_first_batch_scheduled.HasBeenNotified()) {
queue_0_first_batch_scheduled.Notify();
queue_0_first_batch_proceed.WaitForNotification();
} else if (!queue_0_second_batch_scheduled.HasBeenNotified()) {
queue_0_second_batch_scheduled.Notify();
}
};
Notification queue_1_first_batch_scheduled, queue_1_first_batch_proceed;
auto queue_1_callback =
[&queue_1_first_batch_scheduled,
&queue_1_first_batch_proceed](std::unique_ptr<Batch<FakeTask>> batch) {
queue_1_first_batch_scheduled.Notify();
queue_1_first_batch_proceed.WaitForNotification();
};
auto scheduler = CreateSharedBatchScheduler(1, &env);
size_t input_batch_size_limit = 10;
QueueOptions queue_options = CreateQueueOptions(
input_batch_size_limit, input_batch_size_limit,
1 , 100 );
std::vector<std::unique_ptr<BatchScheduler<FakeTask>>> queues(2);
TF_ASSERT_OK(
scheduler->AddQueue(queue_options, queue_0_callback, &queues[0]));
TF_ASSERT_OK(
scheduler->AddQueue(queue_options, queue_1_callback, &queues[1]));
TF_ASSERT_OK(ScheduleTask(10, queues[0].get()));
env.AdvanceByMicroseconds(1);
queue_0_first_batch_scheduled.WaitForNotification();
TF_ASSERT_OK(ScheduleTask(10, queues[0].get()));
TF_ASSERT_OK(ScheduleTask(10, queues[0].get()));
TF_ASSERT_OK(ScheduleTask(1, queues[1].get()));
env.AdvanceByMicroseconds(1);
queue_0_first_batch_proceed.Notify();
queue_1_first_batch_scheduled.WaitForNotification();
Env::Default()->SleepForMicroseconds(10 * 1000 );
EXPECT_FALSE(queue_0_second_batch_scheduled.HasBeenNotified());
queue_1_first_batch_proceed.Notify();
start_teardown.Notify();
}
stop_teardown.Notify();
}
TEST_P(SharedBatchSchedulerTest, ConstMethods) {
for (const int max_enqueued_batches : {1, 2, 5}) {
Notification processing, proceed;
auto callback = [&processing,
&proceed](std::unique_ptr<Batch<FakeTask>> batch) {
if (!processing.HasBeenNotified()) {
processing.Notify();
}
proceed.WaitForNotification();
};
auto scheduler = CreateSharedBatchScheduler( 1);
const size_t input_batch_size_limit = 2;
const size_t batch_timeout_micros = 0;
auto queue = CreateQueue(
scheduler,
CreateQueueOptions(input_batch_size_limit, input_batch_size_limit,
batch_timeout_micros, max_enqueued_batches),
callback);
EXPECT_EQ(2, queue->max_task_size());
EXPECT_EQ(0, queue->NumEnqueuedTasks());
EXPECT_EQ(max_enqueued_batches * 2, queue->SchedulingCapacity());
TF_ASSERT_OK(ScheduleTask(2, queue.get()));
processing.WaitForNotification();
EXPECT_EQ(0, queue->NumEnqueuedTasks());
for (int i = 0; i < max_enqueued_batches; ++i) {
EXPECT_EQ(i * 2, queue->NumEnqueuedTasks());
EXPECT_EQ((max_enqueued_batches - i) * 2, queue->SchedulingCapacity());
TF_ASSERT_OK(ScheduleTask(1, queue.get()));
EXPECT_EQ((i * 2) + 1, queue->NumEnqueuedTasks());
EXPECT_EQ((max_enqueued_batches - i) * 2 - 1,
queue->SchedulingCapacity());
TF_ASSERT_OK(ScheduleTask(1, queue.get()));
}
EXPECT_EQ(max_enqueued_batches * 2, queue->NumEnqueuedTasks());
EXPECT_EQ(0, queue->SchedulingCapacity());
EXPECT_THAT(
ScheduleTask(1, queue.get()),
testing::StatusIs(error::UNAVAILABLE,
HasSubstr("The batch scheduling queue to which this "
"task was submitted is full")));
EXPECT_EQ(max_enqueued_batches * 2, queue->NumEnqueuedTasks());
EXPECT_EQ(0, queue->SchedulingCapacity());
proceed.Notify();
}
}
TEST_P(SharedBatchSchedulerTest, OneFullQueueDoesntBlockOtherQueues) {
Notification queue_0_processing, queue_0_proceed;
auto queue_0_callback = [&queue_0_processing, &queue_0_proceed](
std::unique_ptr<Batch<FakeTask>> batch) {
if (!queue_0_processing.HasBeenNotified()) {
queue_0_processing.Notify();
queue_0_proceed.WaitForNotification();
}
};
Notification queue_1_first_batch_processed, queue_1_second_batch_processed,
queue_1_third_batch_processed;
auto queue_1_callback =
[&queue_1_first_batch_processed, &queue_1_second_batch_processed,
&queue_1_third_batch_processed](std::unique_ptr<Batch<FakeTask>> batch) {
if (batch->size() == 1) {
queue_1_first_batch_processed.Notify();
} else if (batch->size() == 2) {
queue_1_second_batch_processed.Notify();
} else if (batch->size() == 3) {
queue_1_third_batch_processed.Notify();
} else {
EXPECT_TRUE(false) << "Unexpected batch size";
}
};
auto scheduler = CreateSharedBatchScheduler( 2);
const size_t input_batch_size_limit = 10;
const size_t batch_timeout_micros = 0;
const size_t max_enqueued_batches = 2;
QueueOptions queue_options =
CreateQueueOptions(input_batch_size_limit, input_batch_size_limit,
batch_timeout_micros, max_enqueued_batches);
std::unique_ptr<BatchScheduler<FakeTask>> queue_0;
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_0_callback, &queue_0));
std::unique_ptr<BatchScheduler<FakeTask>> queue_1;
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_1_callback, &queue_1));
TF_ASSERT_OK(ScheduleTask(1, queue_0.get()));
queue_0_processing.WaitForNotification();
Status queue_0_status;
do {
queue_0_status = ScheduleTask(1, queue_0.get());
} while (queue_0_status.ok());
EXPECT_EQ(error::UNAVAILABLE, queue_0_status.code());
TF_ASSERT_OK(ScheduleTask(1, queue_1.get()));
queue_1_first_batch_processed.WaitForNotification();
TF_ASSERT_OK(ScheduleTask(2, queue_1.get()));
queue_1_second_batch_processed.WaitForNotification();
TF_ASSERT_OK(ScheduleTask(3, queue_1.get()));
queue_1_third_batch_processed.WaitForNotification();
queue_0_proceed.Notify();
}
TEST_P(SharedBatchSchedulerTest, QueueDestructorBlocksUntilAllTasksProcessed) {
test_util::FakeClockEnv env(Env::Default());
Notification start_teardown, stop_teardown;
std::unique_ptr<Thread> teardown_thread =
CreateFakeClockAdvancerThread(&env, &start_teardown, &stop_teardown);
{
int current_batch = 0;
Notification first_callback_started;
const int kMaxEnqueuedBatches = 3;
std::vector<Notification> callback_proceed(kMaxEnqueuedBatches);
auto callback =
[¤t_batch, &first_callback_started,
&callback_proceed](std::unique_ptr<Batch<FakeTask>> batch) {
if (current_batch == 0) {
first_callback_started.Notify();
}
callback_proceed[current_batch].WaitForNotification();
++current_batch;
};
auto scheduler = CreateSharedBatchScheduler(1, &env);
const size_t batch_size_limit = 10;
const size_t batch_timeout_micros = 0;
const size_t max_enqueued_batches = 2;
QueueOptions queue_options =
CreateQueueOptions(batch_size_limit, batch_size_limit,
batch_timeout_micros, max_enqueued_batches);
auto queue = CreateQueue(scheduler, queue_options, callback);
int num_enqueued_batches = 0;
TF_ASSERT_OK(ScheduleTask(10, queue.get()));
++num_enqueued_batches;
env.AdvanceByMicroseconds(1);
first_callback_started.WaitForNotification();
for (int i = 0; i < 2; ++i) {
TF_ASSERT_OK(ScheduleTask(10, queue.get()));
++num_enqueued_batches;
}
EXPECT_EQ(kMaxEnqueuedBatches, num_enqueued_batches);
EXPECT_EQ(error::UNAVAILABLE, ScheduleTask(10, queue.get()).code());
Notification destroy_queue_thread_started, queue_destroyed;
std::unique_ptr<Thread> destroy_queue_thread(Env::Default()->StartThread(
{}, "DestroyQueueThread",
[&queue, &destroy_queue_thread_started, &queue_destroyed] {
destroy_queue_thread_started.Notify();
queue = nullptr;
queue_destroyed.Notify();
}));
destroy_queue_thread_started.WaitForNotification();
for (int i = 0; i < num_enqueued_batches; ++i) {
Env::Default()->SleepForMicroseconds(10 * 1000 );
EXPECT_FALSE(queue_destroyed.HasBeenNotified());
callback_proceed[i].Notify();
}
start_teardown.Notify();
}
stop_teardown.Notify();
}
TEST_P(SharedBatchSchedulerTest, ZeroQueueRewrittenToOneQueue) {
auto callback = [](std::unique_ptr<Batch<FakeTask>> batch) {
};
auto scheduler = CreateSharedBatchScheduler(2);
const size_t input_batch_size_limit = 10;
const size_t batch_timeout_micros = 100 * 1000;
const size_t max_enqueued_batches = 0;
std::unique_ptr<Queue> queue;
if (enable_input_batch_split()) {
EXPECT_THAT(
scheduler->AddQueue(tensorflow::serving::CreateQueueOptions(
input_batch_size_limit, input_batch_size_limit,
batch_timeout_micros, max_enqueued_batches,
enable_input_batch_split(), get_split_func()),
callback, &queue),
testing::StatusIs(error::INVALID_ARGUMENT,
"max_enqueued_batches must be positive; was 0"));
} else {
TF_ASSERT_OK(
scheduler->AddQueue(tensorflow::serving::CreateQueueOptions(
input_batch_size_limit, input_batch_size_limit,
batch_timeout_micros, max_enqueued_batches,
enable_input_batch_split(), get_split_func()),
callback, &queue));
EXPECT_EQ(queue->SchedulingCapacity(), input_batch_size_limit);
}
}
TEST_P(SharedBatchSchedulerTest, BatchPaddingPolicyBatchDown) {
test_util::FakeClockEnv env(Env::Default());
Notification start_teardown, stop_teardown;
std::unique_ptr<Thread> teardown_thread =
CreateFakeClockAdvancerThread(&env, &start_teardown, &stop_teardown);
{
Notification first_batch_processed;
Notification second_batch_processed;
auto callback = [&](std::unique_ptr<Batch<FakeTask>> batch) {
if (!first_batch_processed.HasBeenNotified()) {
EXPECT_EQ(batch->size(), 2);
first_batch_processed.Notify();
return;
}
if (!second_batch_processed.HasBeenNotified()) {
EXPECT_EQ(batch->size(), 1);
second_batch_processed.Notify();
return;
}
ADD_FAILURE() << "Batch callback must not be invoked more than expected";
};
auto scheduler = CreateSharedBatchScheduler(1, &env);
QueueOptions options =
CreateQueueOptions( 10,
10,
10,
10);
options.allowed_batch_sizes = {1, 2, 4, 8};
options.batch_padding_policy = kBatchDownPolicy;
auto queue = CreateQueue(scheduler, options, callback);
TF_ASSERT_OK(ScheduleTask(1, queue.get()));
TF_ASSERT_OK(ScheduleTask(1, queue.get()));
TF_ASSERT_OK(ScheduleTask(1, queue.get()));
env.AdvanceByMicroseconds(options.batch_timeout_micros);
first_batch_processed.WaitForNotification();
auto new_batch_timeout_micros = options.batch_timeout_micros * 2 / 3;
env.AdvanceByMicroseconds(new_batch_timeout_micros - 1);
EXPECT_FALSE(second_batch_processed.WaitForNotificationWithTimeout(
absl::Milliseconds(10)));
env.AdvanceByMicroseconds(1);
second_batch_processed.WaitForNotification();
start_teardown.Notify();
}
stop_teardown.Notify();
}
INSTANTIATE_TEST_SUITE_P(Parameter, SharedBatchSchedulerTest,
::testing::Bool());
class SharedBatchSchedulerPriorityTest
: public ::testing::TestWithParam<
std::tuple<bool, MixedPriorityBatchingPolicy>>,
public SharedBatchSchedulerTestBase {
protected:
bool enable_input_batch_split() const override {
return std::get<0>(GetParam());
}
MixedPriorityBatchingPolicy mixed_priority_batching_policy() const {
return std::get<1>(GetParam());
}
};
TEST_P(SharedBatchSchedulerPriorityTest,
InvalidLowPriorityTaskWithPriorityQueueEnabled) {
bool queue_callback_called = false;
auto queue_callback = [&queue_callback_called](
std::unique_ptr<Batch<FakeTask>> batch,
std::vector<std::unique_ptr<FakeTask>> tasks) {
queue_callback_called = true;
};
{
std::shared_ptr<Scheduler> scheduler =
CreateSharedBatchScheduler(3);
QueueOptions queue_options = CreateQueueOptions(
100, 100,
1 * 1000 * 1000, 2,
true);
queue_options.low_priority_queue_options.max_execution_batch_size = 1;
queue_options.low_priority_queue_options.batch_timeout_micros =
1 * 1000 * 1000;
queue_options.low_priority_queue_options.input_batch_size_limit = 1;
queue_options.low_priority_queue_options.max_enqueued_batches = 2;
queue_options.mixed_priority_batching_policy =
mixed_priority_batching_policy();
std::unique_ptr<Queue> queue =
CreateQueue(scheduler, queue_options, queue_callback);
EXPECT_THAT(
ScheduleTask(10, queue.get(),
tsl::criticality::Criticality::kSheddablePlus),
testing::StatusIs(
absl::StatusCode::kUnavailable,
HasSubstr(
"The low priority task queue to which this task was submitted "
"has max_execution_batch_size=1 and the task size is 10")));
}
EXPECT_FALSE(queue_callback_called);
}
TEST_P(SharedBatchSchedulerPriorityTest,
InvalidLowPriorityTaskWithQueueFullWithPriorityQueueEnabledNew) {
Notification processing, proceed;
auto queue_callback = [&processing, &proceed](
std::unique_ptr<Batch<FakeTask>> batch,
std::vector<std::unique_ptr<FakeTask>> tasks) {
if (!processing.HasBeenNotified()) {
processing.Notify();
}
proceed.WaitForNotification();
};
std::shared_ptr<Scheduler> scheduler =
CreateSharedBatchScheduler(1);
QueueOptions queue_options = CreateQueueOptions(
100, 100,
1 * 1000 * 1000, 2,
true);
queue_options.low_priority_queue_options.max_execution_batch_size = 10;
queue_options.low_priority_queue_options.batch_timeout_micros =
1 * 1000 * 1000;
queue_options.low_priority_queue_options.input_batch_size_limit = 10;
queue_options.low_priority_queue_options.max_enqueued_batches = 2;
queue_options.mixed_priority_batching_policy =
mixed_priority_batching_policy();
std::unique_ptr<Queue> queue =
CreateQueue(scheduler, queue_options, queue_callback);
TF_ASSERT_OK(ScheduleTask(5, queue.get(),
tsl::criticality::Criticality::kCriticalPlus));
processing.WaitForNotification();
ASSERT_EQ(0, queue->NumEnqueuedTasks());
TF_ASSERT_OK(ScheduleTask(10, queue.get(),
tsl::criticality::Criticality::kSheddablePlus));
ASSERT_EQ(1, queue->NumEnqueuedTasks());
TF_ASSERT_OK(ScheduleTask(10, queue.get(),
tsl::criticality::Criticality::kSheddablePlus));
ASSERT_EQ(2, queue->NumEnqueuedTasks());
EXPECT_THAT(
ScheduleTask(1, queue.get(),
tsl::criticality::Criticality::kSheddablePlus),
testing::StatusIs(
absl::StatusCode::kUnavailable,
HasSubstr("The low priority task queue to which this task was "
"submitted does not have the capcity to handle this task; "
"currently the low priority queue has 20 tasks enqueued "
"and the submitted task size is 1 while "
"max_enqueued_batches=2 and max_execution_batch_size=10")));
proceed.Notify();
}
TEST_P(SharedBatchSchedulerPriorityTest,
CallbackWithTaskVectorOkWithPriorityQueueDisabledWithPrioritySet) {
bool queue_callback_called = false;
auto queue_callback = [&queue_callback_called](
std::unique_ptr<Batch<FakeTask>> batch,
std::vector<std::unique_ptr<FakeTask>> tasks) {
queue_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(3, batch->num_tasks());
EXPECT_EQ(1, batch->task(0).size());
EXPECT_EQ(3, batch->task(1).size());
EXPECT_EQ(5, batch->task(2).size());
EXPECT_EQ(0, tasks.size());
};
{
std::shared_ptr<Scheduler> scheduler =
CreateSharedBatchScheduler(3);
const QueueOptions queue_options = CreateQueueOptions(
10, 10,
1 * 1000 * 1000, 2,
false);
std::unique_ptr<Queue> queue =
CreateQueue(scheduler, queue_options, queue_callback);
TF_ASSERT_OK(ScheduleTask(1, queue.get(),
tsl::criticality::Criticality::kCriticalPlus));
TF_ASSERT_OK(ScheduleTask(3, queue.get(),
tsl::criticality::Criticality::kCriticalPlus));
TF_ASSERT_OK(ScheduleTask(5, queue.get(),
tsl::criticality::Criticality::kSheddable));
}
EXPECT_TRUE(queue_callback_called);
}
TEST_P(SharedBatchSchedulerPriorityTest,
LowPriorityTaskOnlyAtMaxBatchSizeWithPriorityQueueEnabled) {
bool queue_callback_called = false;
auto queue_callback = [&queue_callback_called](
std::unique_ptr<Batch<FakeTask>> batch,
std::vector<std::unique_ptr<FakeTask>> tasks) {
queue_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(3, batch->num_tasks());
EXPECT_EQ(1, batch->task(0).size());
EXPECT_EQ(3, batch->task(1).size());
EXPECT_EQ(5, batch->task(2).size());
EXPECT_TRUE(tasks.empty());
};
{
std::shared_ptr<Scheduler> scheduler =
CreateSharedBatchScheduler(3);
QueueOptions queue_options = CreateQueueOptions(
100, 100,
1 * 1000 * 1000, 2,
true);
queue_options.low_priority_queue_options.max_execution_batch_size = 9;
queue_options.low_priority_queue_options.batch_timeout_micros =
1 * 1000 * 1000;
queue_options.low_priority_queue_options.input_batch_size_limit = 10;
queue_options.low_priority_queue_options.max_enqueued_batches = 2;
queue_options.mixed_priority_batching_policy =
mixed_priority_batching_policy();
std::unique_ptr<Queue> queue =
CreateQueue(scheduler, queue_options, queue_callback);
TF_ASSERT_OK(ScheduleTask(1, queue.get(),
tsl::criticality::Criticality::kSheddablePlus));
TF_ASSERT_OK(ScheduleTask(3, queue.get(),
tsl::criticality::Criticality::kSheddablePlus));
TF_ASSERT_OK(ScheduleTask(5, queue.get(),
tsl::criticality::Criticality::kSheddable));
}
EXPECT_TRUE(queue_callback_called);
}
TEST_P(SharedBatchSchedulerPriorityTest,
LowPriorityTaskOnlyAtTimeoutWithPriorityQueueEnabled) {
bool queue_callback_called = false;
auto queue_callback = [&queue_callback_called](
std::unique_ptr<Batch<FakeTask>> batch,
std::vector<std::unique_ptr<FakeTask>> tasks) {
queue_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(3, batch->num_tasks());
EXPECT_EQ(1, batch->task(0).size());
EXPECT_EQ(3, batch->task(1).size());
EXPECT_EQ(5, batch->task(2).size());
EXPECT_TRUE(tasks.empty());
};
{
std::shared_ptr<Scheduler> scheduler =
CreateSharedBatchScheduler(3);
QueueOptions queue_options = CreateQueueOptions(
100, 100,
1 * 1000 * 1000, 2,
true);
queue_options.low_priority_queue_options.max_execution_batch_size = 20;
queue_options.low_priority_queue_options.batch_timeout_micros =
1 * 1000 * 1000;
queue_options.low_priority_queue_options.input_batch_size_limit = 10;
queue_options.low_priority_queue_options.max_enqueued_batches = 2;
queue_options.mixed_priority_batching_policy =
mixed_priority_batching_policy();
std::unique_ptr<Queue> queue =
CreateQueue(scheduler, queue_options, queue_callback);
TF_ASSERT_OK(ScheduleTask(1, queue.get(),
tsl::criticality::Criticality::kSheddablePlus));
TF_ASSERT_OK(ScheduleTask(3, queue.get(),
tsl::criticality::Criticality::kSheddablePlus));
TF_ASSERT_OK(ScheduleTask(5, queue.get(),
tsl::criticality::Criticality::kSheddable));
}
EXPECT_TRUE(queue_callback_called);
}
INSTANTIATE_TEST_SUITE_P(
Parameter, SharedBatchSchedulerPriorityTest,
::testing::Values(
std::make_tuple(
true,
MixedPriorityBatchingPolicy::kLowPriorityPaddingWithMaxBatchSize),
std::make_tuple(true,
MixedPriorityBatchingPolicy::
kLowPriorityPaddingWithNextAllowedBatchSize),
std::make_tuple(
false,
MixedPriorityBatchingPolicy::kLowPriorityPaddingWithMaxBatchSize),
std::make_tuple(false,
MixedPriorityBatchingPolicy::
kLowPriorityPaddingWithNextAllowedBatchSize),
std::make_tuple(
false,
MixedPriorityBatchingPolicy::kPriorityIsolation),
std::make_tuple(false,
MixedPriorityBatchingPolicy::kPriorityIsolation)));
using SharedBatchSchedulerPriorityPolicyTest = SharedBatchSchedulerTest;
TEST_P(SharedBatchSchedulerPriorityPolicyTest,
HighPriorityBatchPaddedUptoMaxBatchSize) {
bool queue_callback_called = false;
auto queue_callback = [&queue_callback_called](
std::unique_ptr<Batch<FakeTask>> batch,
std::vector<std::unique_ptr<FakeTask>> tasks) {
if (queue_callback_called) return;
queue_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(2, batch->num_tasks());
EXPECT_EQ(1, batch->task(0).size());
EXPECT_EQ(3, batch->task(1).size());
EXPECT_EQ(2, tasks.size());
EXPECT_EQ(3, tasks[0]->size());
EXPECT_EQ(3, tasks[1]->size());
};
{
std::shared_ptr<Scheduler> scheduler =
CreateSharedBatchScheduler(3);
QueueOptions queue_options = CreateQueueOptions(
10, 10,
1 * 1000 * 1000, 2,
true);
queue_options.low_priority_queue_options.max_execution_batch_size = 10;
queue_options.low_priority_queue_options.batch_timeout_micros =
1 * 1000 * 1000;
queue_options.low_priority_queue_options.input_batch_size_limit = 10;
queue_options.low_priority_queue_options.max_enqueued_batches = 2;
queue_options.mixed_priority_batching_policy =
MixedPriorityBatchingPolicy::kLowPriorityPaddingWithMaxBatchSize;
std::unique_ptr<Queue> queue =
CreateQueue(scheduler, queue_options, queue_callback);
TF_ASSERT_OK(ScheduleTask(1, queue.get(),
tsl::criticality::Criticality::kCriticalPlus));
TF_ASSERT_OK(ScheduleTask(3, queue.get(),
tsl::criticality::Criticality::kCriticalPlus));
TF_ASSERT_OK(ScheduleTask(3, queue.get(),
tsl::criticality::Criticality::kSheddable));
TF_ASSERT_OK(ScheduleTask(3, queue.get(),
tsl::criticality::Criticality::kSheddable));
TF_ASSERT_OK(ScheduleTask(3, queue.get(),
tsl::criticality::Criticality::kSheddable));
}
EXPECT_TRUE(queue_callback_called);
}
TEST_P(SharedBatchSchedulerPriorityPolicyTest,
HighPriorityBatchPaddedUptoMaxAvailableBatchSize) {
bool queue_callback_called = false;
auto queue_callback = [&queue_callback_called](
std::unique_ptr<Batch<FakeTask>> batch,
std::vector<std::unique_ptr<FakeTask>> tasks) {
queue_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(2, batch->num_tasks());
EXPECT_EQ(1, batch->task(0).size());
EXPECT_EQ(3, batch->task(1).size());
EXPECT_EQ(1, tasks.size());
EXPECT_EQ(3, tasks[0]->size());
};
{
std::shared_ptr<Scheduler> scheduler =
CreateSharedBatchScheduler(3);
QueueOptions queue_options = CreateQueueOptions(
10, 10,
1 * 1000 * 1000, 2,
true);
queue_options.low_priority_queue_options.max_execution_batch_size = 10;
queue_options.low_priority_queue_options.batch_timeout_micros =
1 * 1000 * 1000;
queue_options.low_priority_queue_options.input_batch_size_limit = 10;
queue_options.low_priority_queue_options.max_enqueued_batches = 2;
queue_options.mixed_priority_batching_policy =
MixedPriorityBatchingPolicy::kLowPriorityPaddingWithMaxBatchSize;
std::unique_ptr<Queue> queue =
CreateQueue(scheduler, queue_options, queue_callback);
TF_ASSERT_OK(ScheduleTask(1, queue.get(),
tsl::criticality::Criticality::kCriticalPlus));
TF_ASSERT_OK(ScheduleTask(3, queue.get(),
tsl::criticality::Criticality::kCriticalPlus));
TF_ASSERT_OK(ScheduleTask(3, queue.get(),
tsl::criticality::Criticality::kSheddable));
}
EXPECT_TRUE(queue_callback_called);
}
TEST_P(SharedBatchSchedulerPriorityPolicyTest,
HighPriorityBatchPaddedUptoNextAllowedBatchSize) {
bool queue_callback_called = false;
auto queue_callback = [&queue_callback_called](
std::unique_ptr<Batch<FakeTask>> batch,
std::vector<std::unique_ptr<FakeTask>> tasks) {
if (queue_callback_called) return;
queue_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(2, batch->num_tasks());
EXPECT_EQ(1, batch->task(0).size());
EXPECT_EQ(3, batch->task(1).size());
EXPECT_EQ(2, tasks.size());
EXPECT_EQ(2, tasks[0]->size());
EXPECT_EQ(2, tasks[1]->size());
};
{
std::shared_ptr<Scheduler> scheduler =
CreateSharedBatchScheduler(3);
QueueOptions queue_options = CreateQueueOptions(
10, 10,
1 * 1000 * 1000, 2,
true);
queue_options.allowed_batch_sizes = {2, 8, 16};
queue_options.low_priority_queue_options.max_execution_batch_size = 10;
queue_options.low_priority_queue_options.batch_timeout_micros =
1 * 1000 * 1000;
queue_options.low_priority_queue_options.input_batch_size_limit = 10;
queue_options.low_priority_queue_options.max_enqueued_batches = 2;
queue_options.mixed_priority_batching_policy = MixedPriorityBatchingPolicy::
kLowPriorityPaddingWithNextAllowedBatchSize;
std::unique_ptr<Queue> queue =
CreateQueue(scheduler, queue_options, queue_callback);
TF_ASSERT_OK(ScheduleTask(1, queue.get(),
tsl::criticality::Criticality::kCriticalPlus));
TF_ASSERT_OK(ScheduleTask(3, queue.get(),
tsl::criticality::Criticality::kCriticalPlus));
TF_ASSERT_OK(ScheduleTask(2, queue.get(),
tsl::criticality::Criticality::kSheddable));
TF_ASSERT_OK(ScheduleTask(2, queue.get(),
tsl::criticality::Criticality::kSheddable));
TF_ASSERT_OK(ScheduleTask(2, queue.get(),
tsl::criticality::Criticality::kSheddable));
}
EXPECT_TRUE(queue_callback_called);
}
TEST_P(SharedBatchSchedulerPriorityPolicyTest,
HighPriorityBatchNotPaddedWhenAllowedBatchSizeMissing) {
bool queue_callback_called = false;
auto queue_callback = [&queue_callback_called](
std::unique_ptr<Batch<FakeTask>> batch,
std::vector<std::unique_ptr<FakeTask>> tasks) {
if (queue_callback_called) return;
queue_callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(2, batch->num_tasks());
EXPECT_EQ(1, batch->task(0).size());
EXPECT_EQ(3, batch->task(1).size());
EXPECT_EQ(0, tasks.size());
};
{
std::shared_ptr<Scheduler> scheduler =
CreateSharedBatchScheduler(3);
QueueOptions queue_options = CreateQueueOptions(
10, 10,
1 * 1000 * 1000, 2,
true);
queue_options.low_priority_queue_options.max_execution_batch_size = 10;
queue_options.low_priority_queue_options.batch_timeout_micros =
1 * 1000 * 1000;
queue_options.low_priority_queue_options.input_batch_size_limit = 10;
queue_options.low_priority_queue_options.max_enqueued_batches = 2;
queue_options.mixed_priority_batching_policy = MixedPriorityBatchingPolicy::
kLowPriorityPaddingWithNextAllowedBatchSize;
std::unique_ptr<Queue> queue =
CreateQueue(scheduler, queue_options, queue_callback);
TF_ASSERT_OK(ScheduleTask(1, queue.get(),
tsl::criticality::Criticality::kCriticalPlus));
TF_ASSERT_OK(ScheduleTask(3, queue.get(),
tsl::criticality::Criticality::kCriticalPlus));
TF_ASSERT_OK(ScheduleTask(2, queue.get(),
tsl::criticality::Criticality::kSheddable));
TF_ASSERT_OK(ScheduleTask(2, queue.get(),
tsl::criticality::Criticality::kSheddable));
TF_ASSERT_OK(ScheduleTask(2, queue.get(),
tsl::criticality::Criticality::kSheddable));
}
EXPECT_TRUE(queue_callback_called);
}
TEST_P(SharedBatchSchedulerPriorityPolicyTest,
HighPriorityBatchNotPaddedWithLowPriorityTasks) {
int queue_callback_counter = 0;
auto queue_callback = [&queue_callback_counter](
std::unique_ptr<Batch<FakeTask>> batch,
std::vector<std::unique_ptr<FakeTask>> tasks) {
if (queue_callback_counter++ == 0) {
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(2, batch->num_tasks());
EXPECT_EQ(1, batch->task(0).size());
EXPECT_EQ(3, batch->task(1).size());
return;
}
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(2, batch->num_tasks());
EXPECT_EQ(3, batch->task(0).size());
EXPECT_EQ(3, batch->task(1).size());
};
{
std::shared_ptr<Scheduler> scheduler =
CreateSharedBatchScheduler(3);
QueueOptions queue_options = CreateQueueOptions(
10, 10,
1 * 1000 * 1000, 2,
true);
queue_options.low_priority_queue_options.max_execution_batch_size = 10;
queue_options.low_priority_queue_options.batch_timeout_micros =
1 * 1000 * 1000;
queue_options.low_priority_queue_options.input_batch_size_limit = 10;
queue_options.low_priority_queue_options.max_enqueued_batches = 2;
queue_options.mixed_priority_batching_policy =
MixedPriorityBatchingPolicy::kPriorityIsolation;
std::unique_ptr<Queue> queue =
CreateQueue(scheduler, queue_options, queue_callback);
TF_ASSERT_OK(ScheduleTask(1, queue.get(),
tsl::criticality::Criticality::kCriticalPlus));
TF_ASSERT_OK(ScheduleTask(3, queue.get(),
tsl::criticality::Criticality::kCriticalPlus));
TF_ASSERT_OK(ScheduleTask(3, queue.get(),
tsl::criticality::Criticality::kSheddable));
TF_ASSERT_OK(ScheduleTask(3, queue.get(),
tsl::criticality::Criticality::kSheddable));
}
EXPECT_EQ(queue_callback_counter, 2);
}
INSTANTIATE_TEST_SUITE_P(Parameter, SharedBatchSchedulerPriorityPolicyTest,
::testing::Bool());
#ifdef PLATFORM_GOOGLE
static std::vector<std::unique_ptr<Queue>>* queues =
new std::vector<std::unique_ptr<Queue>>();
static std::vector<std::string>* queue_labels = new std::vector<std::string>();
void CreateQueues() {
auto split_func_for_size_one_task =
[](std::unique_ptr<FakeTask>* input_task, int open_batch_remaining_slot,
int max_batch_size,
std::vector<std::unique_ptr<FakeTask>>* output_tasks) -> Status {
output_tasks->push_back(std::move(*input_task));
Notification notify;
std::thread busy_waiter([&] {
while (!notify.HasBeenNotified()) {
}
});
std::thread notifier([&] {
Env::Default()->SleepForMicroseconds(1);
notify.Notify();
});
busy_waiter.join();
notifier.join();
return absl::OkStatus();
};
internal::Queue<FakeTask>::ProcessBatchCallback process_batch_callback =
[](std::unique_ptr<Batch<FakeTask>> task) {
};
const size_t max_execution_batch_size = 64;
const size_t input_batch_size_limit = 128;
const size_t batch_timeout_micros = 10;
queues->push_back(CreateQueue(
CreateSharedBatchScheduler(5),
CreateQueueOptions(max_execution_batch_size, input_batch_size_limit,
batch_timeout_micros, INT_MAX ,
true ,
split_func_for_size_one_task),
process_batch_callback));
queue_labels->push_back(std::string("EagerSplit"));
queues->push_back(CreateQueue(
CreateSharedBatchScheduler(5),
CreateQueueOptions(max_execution_batch_size, input_batch_size_limit,
batch_timeout_micros, INT_MAX ,
false ,
nullptr ),
process_batch_callback));
queue_labels->push_back(std::string("NoSplit"));
}
void BM_QueueSchedule(::testing::benchmark::State& state) {
static absl::once_flag once;
absl::call_once(once, []() { CreateQueues(); });
const int queue_index = state.range(1);
Queue* queue = (*queues)[queue_index].get();
const string label = strings::StrCat(state.threads(), "-Threads",
(*queue_labels)[queue_index]);
state.SetLabel(label);
for (auto s : state) {
for (int i = 0; i < state.range(0); i++) {
auto batch_task = std::make_unique<FakeTask>(1);
auto status = queue->Schedule(&batch_task);
tensorflow::testing::DoNotOptimize(status);
}
}
}
BENCHMARK(BM_QueueSchedule)->Apply([](benchmark::internal::Benchmark* b) {
b->ThreadRange(1,
port::NumSchedulableCPUs() * tensorflow::port::CPUIDNumSMT());
for (int queue_index : {0, 1, 2}) {
b->ArgPair(10000, queue_index);
}
});
#endif
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/shared_batch_scheduler.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/shared_batch_scheduler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
93a4e78f-e7b6-439c-a9ab-8ad84529ba55 | cpp | tensorflow/tensorflow | batch_stats | tensorflow/core/kernels/batching_util/batch_stats.h | tensorflow/core/kernels/batching_util/batch_stats_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_BATCH_STATS_H_
#define TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_BATCH_STATS_H_
#include <atomic>
#include <cstdint>
#include <optional>
#include <string>
#include <tuple>
#include <vector>
#include "absl/container/node_hash_map.h"
#include "absl/time/time.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow::serving {
constexpr int64_t kNumBatchThreadsUnknown = -1;
constexpr int64_t kBatchTimeoutMicrosUnknown = -1;
class CostTracker {
public:
void Register(absl::Duration cost) {
DCHECK_GT(cost, absl::ZeroDuration());
mutex_lock l(mu_);
sample_count_++;
sample_sum_ += cost;
};
std::optional<absl::Duration> mean() const {
int64_t count;
absl::Duration sum;
{
mutex_lock l(mu_);
count = sample_count_;
sum = sample_sum_;
}
if (count == 0) return std::nullopt;
return sum / count;
};
private:
mutable mutex mu_;
int64_t sample_count_ TF_GUARDED_BY(mu_) = 0;
absl::Duration sample_sum_ TF_GUARDED_BY(mu_);
};
class BatchSizeStats {
public:
CostTracker& tpu_cost() { return tpu_cost_; };
private:
CostTracker tpu_cost_;
};
class ModelBatchStats {
public:
BatchSizeStats& batch_size(int32 batch_size) {
mutex_lock l(mu_);
return batch_size_stats_by_batch_size_[batch_size];
}
void RegisterProcessedSize(int64_t size) {
cumulative_processed_size_.fetch_add(size, std::memory_order_relaxed);
}
int64_t cumulative_processed_size() const {
return cumulative_processed_size_.load(std::memory_order_relaxed);
}
std::vector<int32> BatchSizes() const {
std::vector<int32> result;
mutex_lock l(mu_);
result.reserve(batch_size_stats_by_batch_size_.size());
for (const auto& [key, value] : batch_size_stats_by_batch_size_) {
result.push_back(key);
}
return result;
}
void SetNumBatchThreads(int64_t num_batch_threads) {
num_batch_threads_.store(num_batch_threads, std::memory_order_relaxed);
}
int64_t num_batch_threads() const {
return num_batch_threads_.load(std::memory_order_relaxed);
}
void SetBatchTimeoutMicros(int64_t batch_timeout_micros) {
batch_timeout_micros_.store(batch_timeout_micros,
std::memory_order_relaxed);
}
int64_t batch_timeout_micros() const {
return batch_timeout_micros_.load(std::memory_order_relaxed);
}
private:
mutable mutex mu_;
absl::node_hash_map<int32, BatchSizeStats> batch_size_stats_by_batch_size_
TF_GUARDED_BY(mu_);
std::atomic<int64_t> cumulative_processed_size_ = 0;
std::atomic<int64_t> num_batch_threads_ = kNumBatchThreadsUnknown;
std::atomic<int64_t> batch_timeout_micros_ = kBatchTimeoutMicrosUnknown;
};
class BatchStatsRegistry {
public:
ModelBatchStats& model(const std::string& model_name,
const std::string& op_name) {
std::tuple key(model_name, op_name);
mutex_lock l(mu_);
return model_batch_stats_by_model_and_op_names_[key];
}
std::vector<std::tuple<std::string, std::string>> ModelAndOpNames() const {
std::vector<std::tuple<std::string, std::string>> result;
mutex_lock l(mu_);
result.reserve(model_batch_stats_by_model_and_op_names_.size());
for (const auto& [key, value] : model_batch_stats_by_model_and_op_names_) {
result.push_back(key);
}
return result;
}
private:
mutable mutex mu_;
absl::node_hash_map<std::tuple<std::string, std::string>, ModelBatchStats>
model_batch_stats_by_model_and_op_names_ TF_GUARDED_BY(mu_);
};
inline BatchStatsRegistry& GlobalBatchStatsRegistry() {
static BatchStatsRegistry* instance = new BatchStatsRegistry();
return *instance;
}
}
#endif | #include "tensorflow/core/kernels/batching_util/batch_stats.h"
#include <tuple>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/time/time.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow::serving {
namespace {
using ::testing::UnorderedElementsAre;
TEST(BatchStatsTest, GlobalBatchStatsRegistryAlwaysReturnsTheSameInstance) {
ASSERT_EQ(&GlobalBatchStatsRegistry(), &GlobalBatchStatsRegistry());
}
TEST(BatchStatsTest, BasicOperation) {
BatchStatsRegistry stats;
stats.model( "m", "o")
.batch_size(1)
.tpu_cost()
.Register(absl::Hours(5));
ASSERT_EQ(stats.model( "m", "o")
.batch_size(1)
.tpu_cost()
.mean(),
absl::Hours(5));
}
TEST(BatchStatsTest, ModelBatchStatsAreUniqueForEachModel) {
BatchStatsRegistry stats;
ASSERT_NE(&stats.model( "m", "o"),
&stats.model( "m", "o2"));
}
TEST(BatchStatsTest, BatchSizeStatsAreUniqueForEachBatchSize) {
ModelBatchStats stats;
ASSERT_NE(&stats.batch_size(1), &stats.batch_size(2));
}
TEST(BatchStatsTest, CostTrackerStartsWithNoMean) {
CostTracker tracker;
ASSERT_FALSE(tracker.mean().has_value());
}
TEST(BatchStatsTest, CostTrackerMeanIsCorrect) {
CostTracker tracker;
tracker.Register(absl::Hours(5));
tracker.Register(absl::Hours(7));
ASSERT_EQ(*tracker.mean(), absl::Hours(6));
}
TEST(BatchStatsTest, ProcessedSizeIsCorrect) {
ModelBatchStats stats;
stats.RegisterProcessedSize(5);
stats.RegisterProcessedSize(7);
ASSERT_EQ(stats.cumulative_processed_size(), 12);
}
TEST(BatchStatsTest, ModelOpNamesAreCorrect) {
BatchStatsRegistry stats;
stats.model( "m", "o")
.batch_size(1)
.tpu_cost()
.Register(absl::Hours(5));
stats.model( "m2", "o")
.batch_size(1)
.tpu_cost()
.Register(absl::Hours(7));
stats.model( "m", "o")
.batch_size(2)
.tpu_cost()
.Register(absl::Hours(4));
stats.model( "m", "o2")
.batch_size(1)
.tpu_cost()
.Register(absl::Hours(1));
ASSERT_THAT(stats.ModelAndOpNames(),
UnorderedElementsAre(
std::tuple( "m", "o"),
std::tuple( "m", "o2"),
std::tuple( "m2", "o")));
}
TEST(BatchStatsTest, BatchSizesAreCorrect) {
ModelBatchStats stats;
stats.batch_size(1).tpu_cost().Register(absl::Hours(5));
stats.batch_size(4).tpu_cost().Register(absl::Hours(7));
stats.batch_size(1).tpu_cost().Register(absl::Hours(4));
stats.batch_size(2).tpu_cost().Register(absl::Hours(1));
ASSERT_THAT(stats.BatchSizes(), UnorderedElementsAre(1, 2, 4));
}
TEST(BatchStatsTest, BatchTimeoutIsCorrect) {
ModelBatchStats stats;
ASSERT_EQ(stats.batch_timeout_micros(), -1);
stats.SetBatchTimeoutMicros(100);
ASSERT_EQ(stats.batch_timeout_micros(), 100);
}
TEST(BatchStatsTest, NumBatchThreadsIsCorrect) {
ModelBatchStats stats;
ASSERT_EQ(stats.num_batch_threads(), -1);
stats.SetNumBatchThreads(16);
ASSERT_EQ(stats.num_batch_threads(), 16);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/batch_stats.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/batch_stats_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1c01a02e-63c2-4cb8-9d8e-94ac3c67739d | cpp | tensorflow/tensorflow | batch_input_task | tensorflow/core/kernels/batching_util/batch_input_task.h | tensorflow/core/kernels/batching_util/batch_input_task_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_BATCH_INPUT_TASK_H_
#define TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_BATCH_INPUT_TASK_H_
#include <algorithm>
#include <atomic>
#include <functional>
#include <memory>
#include <utility>
#include "absl/base/call_once.h"
#include "absl/container/fixed_array.h"
#include "absl/synchronization/mutex.h"
#include "tensorflow/core/kernels/batching_util/batch_scheduler.h"
#include "tensorflow/core/kernels/batching_util/concat_split_util.h"
#include "tensorflow/core/kernels/batching_util/input_split_metadata.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/util/incremental_barrier.h"
namespace tensorflow {
namespace serving {
namespace internal {
template <typename TaskType>
class BatchInputTaskHandleTestAccess;
template <typename TaskType>
class BatchInputTaskTestAccess;
template <typename TaskType>
class BatchInputTask;
template <typename TaskType>
class BatchInputTaskHandle : public BatchTask {
public:
BatchInputTaskHandle(
std::shared_ptr<BatchInputTask<TaskType>> batch_input_task, int split_id,
size_t task_size);
std::unique_ptr<TaskType> GetSplitTask();
size_t size() const override { return task_size_; }
private:
template <typename T>
friend class internal::BatchInputTaskHandleTestAccess;
int split_id() const { return split_id_; }
std::shared_ptr<BatchInputTask<TaskType>> batch_input_task_;
const int split_id_;
const size_t task_size_;
std::atomic<bool> once_{false};
};
template <typename TaskType>
class BatchInputTask
: public std::enable_shared_from_this<BatchInputTask<TaskType>> {
public:
using SplitInputFunc = std::function<Status(
std::unique_ptr<TaskType>* input_task, int first_output_task_size,
int input_batch_size_limit,
std::vector<std::unique_ptr<TaskType>>* output_tasks)>;
BatchInputTask(std::unique_ptr<TaskType> input_task,
int open_batch_remaining_slot, int batch_size_limit,
SplitInputFunc split_input_func);
void ToTaskHandles(
std::vector<std::unique_ptr<BatchInputTaskHandle<TaskType>>>*
output_task_handles);
private:
friend class BatchInputTaskHandle<TaskType>;
template <typename T>
friend class internal::BatchInputTaskTestAccess;
std::unique_ptr<TaskType> GetSplitTask(int split_id);
Status SplitBatches(std::vector<std::unique_ptr<TaskType>>* output_tasks);
std::unique_ptr<TaskType> input_task_;
const int input_task_size_ = 0;
const int open_batch_remaining_slot_;
const int batch_size_limit_;
const SplitInputFunc split_func_;
const InputSplitMetadata input_split_metadata_;
mutable absl::once_flag once_;
std::vector<std::unique_ptr<TaskType>> task_splits_;
Status split_status_;
};
template <typename TaskType>
BatchInputTaskHandle<TaskType>::BatchInputTaskHandle(
std::shared_ptr<BatchInputTask<TaskType>> batch_input_task, int split_id,
size_t task_size)
: batch_input_task_(batch_input_task),
split_id_(split_id),
task_size_(task_size) {}
template <typename TaskType>
std::unique_ptr<TaskType> BatchInputTaskHandle<TaskType>::GetSplitTask() {
if (once_.load(std::memory_order_acquire)) {
return nullptr;
}
once_.store(true, std::memory_order_release);
return batch_input_task_->GetSplitTask(split_id_);
}
template <typename TaskType>
BatchInputTask<TaskType>::BatchInputTask(std::unique_ptr<TaskType> input_task,
int open_batch_remaining_slot,
int batch_size_limit,
SplitInputFunc split_input_func)
: input_task_(std::move(input_task)),
input_task_size_(input_task_->size()),
open_batch_remaining_slot_(open_batch_remaining_slot),
batch_size_limit_(batch_size_limit),
split_func_(split_input_func),
input_split_metadata_(input_task_size_, open_batch_remaining_slot,
batch_size_limit) {}
template <typename TaskType>
void BatchInputTask<TaskType>::ToTaskHandles(
std::vector<std::unique_ptr<BatchInputTaskHandle<TaskType>>>*
task_handles) {
const absl::FixedArray<int>& task_sizes = input_split_metadata_.task_sizes();
task_handles->resize(task_sizes.size());
for (int i = 0; i < task_handles->size(); i++) {
(*task_handles)[i] = std::make_unique<BatchInputTaskHandle<TaskType>>(
this->shared_from_this(), i, task_sizes[i]);
}
}
template <typename TaskType>
std::unique_ptr<TaskType> BatchInputTask<TaskType>::GetSplitTask(int split_id) {
absl::call_once(once_,
[this]() { split_status_ = SplitBatches(&task_splits_); });
if (!split_status_.ok()) {
LOG_EVERY_N_SEC(WARNING, 60 )
<< "Split task with error: " << split_status_ << " split metadata is "
<< input_split_metadata_.DebugString();
return nullptr;
}
if (split_id >= 0 && split_id < task_splits_.size()) {
return std::move(task_splits_[split_id]);
}
return nullptr;
}
template <typename TaskType>
Status BatchInputTask<TaskType>::SplitBatches(
std::vector<std::unique_ptr<TaskType>>* output_tasks) {
return split_func_(&input_task_, open_batch_remaining_slot_,
batch_size_limit_, output_tasks);
}
}
}
}
#endif | #include "tensorflow/core/kernels/batching_util/batch_input_task.h"
#include <functional>
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/node_properties.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/batching_util/batch_resource_base.h"
#include "tensorflow/core/kernels/batching_util/input_split_metadata.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace serving {
namespace internal {
template <typename TaskType>
class BatchInputTaskHandleTestAccess {
public:
explicit BatchInputTaskHandleTestAccess(
BatchInputTaskHandle<TaskType>* handle)
: handle_(handle) {}
int split_id() const { return handle_->split_id(); }
private:
BatchInputTaskHandle<TaskType>* const handle_;
};
namespace {
using TensorMatrix = std::vector<std::vector<Tensor>>;
using SplitFunc = std::function<Status(
std::unique_ptr<BatchResourceBase::BatchTask>* input_task,
int first_output_task_size, int input_batch_size_limit,
std::vector<std::unique_ptr<BatchResourceBase::BatchTask>>* output_tasks)>;
template <typename T>
static Tensor CreateTensor(const TensorShape& input_shape,
gtl::ArraySlice<T> input_data) {
Tensor tensor(DataTypeToEnum<T>::value, input_shape);
test::FillValues<T>(&tensor, input_data);
return tensor;
}
NodeDef CreateBatchKernelNodeDef() {
NodeDef batch_kernel_node_def;
NodeDefBuilder batch_function_builder("BatchTPUInput", "BatchFunction");
batch_function_builder.Attr("max_batch_size", 128);
batch_function_builder.Attr("num_batch_threads", 8);
batch_function_builder.Attr("allowed_batch_sizes", {2, 4, 8});
batch_function_builder.Attr("batch_timeout_micros", 1000);
batch_function_builder.Attr("max_enqueued_batches", 100);
batch_function_builder.Attr("enable_large_batch_splitting", true);
std::vector<DataType> input_dtypes({DataType::DT_INT64, DataType::DT_INT64});
std::vector<NodeDefBuilder::NodeOut> inputs;
inputs.resize(2);
inputs[0] = NodeDefBuilder::NodeOut({"n1", 0, DataType::DT_INT64});
inputs[1] = NodeDefBuilder::NodeOut({"n2", 1, DataType::DT_INT64});
batch_function_builder.Attr("Tin", input_dtypes);
batch_function_builder.Input(inputs);
batch_function_builder.Attr("Tcaptured",
std::vector<DataType>{DataType::DT_INT64});
batch_function_builder.Input(std::vector<NodeDefBuilder::NodeOut>{
NodeDefBuilder::NodeOut({"n3", 1, DataType::DT_INT64})});
batch_function_builder.Attr("Tout",
std::vector<DataType>(4, DataType::DT_INT64));
NameAttrList f;
f.set_name("func_to_batch");
batch_function_builder.Attr("f", f);
TF_CHECK_OK(batch_function_builder.Finalize(&batch_kernel_node_def));
return batch_kernel_node_def;
}
class BatchInputTaskTest : public ::testing::Test {
protected:
BatchInputTaskTest() {
device_ = DeviceFactory::NewDevice("CPU", SessionOptions{},
"/job:a/replica:0/task:0");
Status op_kernel_creation_status;
batch_kernel_ = CreateOpKernel(
DEVICE_CPU, device_.get(), device_->GetAllocator(AllocatorAttributes{}),
CreateBatchKernelNodeDef(), TF_GRAPH_DEF_VERSION,
&op_kernel_creation_status);
TF_CHECK_OK(op_kernel_creation_status);
EXPECT_NE(batch_kernel_, nullptr);
op_kernel_context_params_.device = device_.get();
op_kernel_context_params_.op_kernel = batch_kernel_.get();
op_kernel_context_ = std::make_unique<OpKernelContext>(
&op_kernel_context_params_, 4 );
}
OpKernelContext* op_kernel_context() const {
return op_kernel_context_.get();
}
private:
std::unique_ptr<Device> device_;
std::unique_ptr<OpKernel> batch_kernel_;
OpKernelContext::Params op_kernel_context_params_;
std::unique_ptr<OpKernelContext> op_kernel_context_;
};
TEST_F(BatchInputTaskTest, BatchInputToSplitTasks) {
auto batch_task = std::make_unique<BatchResourceBase::BatchTask>();
batch_task->inputs.push_back(CreateTensor<int64_t>(
TensorShape({5, 2, 1}), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}));
batch_task->inputs.push_back(CreateTensor<int64_t>(
TensorShape({5, 1, 2}), {11, 12, 13, 14, 15, 16, 17, 18, 19, 20}));
batch_task->captured_inputs.push_back(
CreateTensor<int64_t>(TensorShape{1}, {0}));
batch_task->context = op_kernel_context();
bool batch_task_done_callback_executed = false;
batch_task->output = std::make_shared<TensorMatrix>();
batch_task->status = std::make_shared<ThreadSafeStatus>();
batch_task->done_callback = [&batch_task_done_callback_executed]() {
batch_task_done_callback_executed = true;
};
auto batch_input_task =
std::make_shared<BatchInputTask<BatchResourceBase::BatchTask>>(
std::move(batch_task), 1,
3, BatchResourceBase::SplitInputTask);
std::vector<
std::unique_ptr<BatchInputTaskHandle<BatchResourceBase::BatchTask>>>
output_tasks;
batch_input_task->ToTaskHandles(&output_tasks);
ASSERT_FALSE(batch_task_done_callback_executed);
const std::vector<int> expected_task_sizes{1, 3, 1};
for (int i = 0; i < output_tasks.size(); i++) {
EXPECT_EQ(
internal::BatchInputTaskHandleTestAccess<BatchResourceBase::BatchTask>(
output_tasks[i].get())
.split_id(),
i);
auto batch_task = output_tasks[i]->GetSplitTask();
ASSERT_NE(batch_task, nullptr);
EXPECT_EQ(batch_task->size(), expected_task_sizes[i]);
batch_task->done_callback();
EXPECT_EQ(output_tasks[i]->GetSplitTask(), nullptr);
}
ASSERT_TRUE(batch_task_done_callback_executed);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/batch_input_task.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/batch_input_task_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
106e29d3-9c69-48ae-b424-79984c7aed25 | cpp | tensorflow/tensorflow | basic_batch_scheduler | tensorflow/core/kernels/batching_util/basic_batch_scheduler.h | tensorflow/core/kernels/batching_util/basic_batch_scheduler_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_BASIC_BATCH_SCHEDULER_H_
#define TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_BASIC_BATCH_SCHEDULER_H_
#include <stddef.h>
#include <cstddef>
#include <functional>
#include <memory>
#include <string>
#include "tensorflow/core/kernels/batching_util/shared_batch_scheduler.h"
namespace tensorflow {
namespace serving {
template <typename TaskType>
class BasicBatchScheduler : public BatchScheduler<TaskType> {
public:
struct Options {
string thread_pool_name = {"batch_threads"};
int num_batch_threads = port::MaxParallelism();
std::shared_ptr<SharedBatchScheduler<TaskType>> shared_batch_scheduler =
nullptr;
int max_batch_size = 1000;
int64_t batch_timeout_micros = 0;
int max_enqueued_batches = 10;
bool enable_large_batch_splitting = false;
std::function<Status(std::unique_ptr<TaskType>* input_task,
int first_output_task_size, int input_batch_size_limit,
std::vector<std::unique_ptr<TaskType>>* output_tasks)>
split_input_task_func;
int max_execution_batch_size = 10;
Env* env = Env::Default();
};
static Status Create(const Options& options,
std::function<void(std::unique_ptr<Batch<TaskType>>)>
process_batch_callback,
std::unique_ptr<BasicBatchScheduler>* scheduler);
~BasicBatchScheduler() override = default;
Status Schedule(std::unique_ptr<TaskType>* task) override;
size_t NumEnqueuedTasks() const override;
size_t SchedulingCapacity() const override;
size_t max_task_size() const override {
return shared_scheduler_queue_->max_task_size();
}
private:
explicit BasicBatchScheduler(
std::unique_ptr<BatchScheduler<TaskType>> shared_scheduler_queue);
std::unique_ptr<BatchScheduler<TaskType>> shared_scheduler_queue_;
BasicBatchScheduler(const BasicBatchScheduler&) = delete;
void operator=(const BasicBatchScheduler&) = delete;
};
template <typename TaskType>
Status BasicBatchScheduler<TaskType>::Create(
const Options& options,
std::function<void(std::unique_ptr<Batch<TaskType>>)>
process_batch_callback,
std::unique_ptr<BasicBatchScheduler>* scheduler) {
std::shared_ptr<SharedBatchScheduler<TaskType>> shared_scheduler;
if (options.shared_batch_scheduler == nullptr) {
typename SharedBatchScheduler<TaskType>::Options shared_scheduler_options;
shared_scheduler_options.thread_pool_name = options.thread_pool_name;
shared_scheduler_options.num_batch_threads = options.num_batch_threads;
shared_scheduler_options.env = options.env;
TF_RETURN_IF_ERROR(SharedBatchScheduler<TaskType>::Create(
shared_scheduler_options, &shared_scheduler));
} else {
shared_scheduler = options.shared_batch_scheduler;
}
typename SharedBatchScheduler<TaskType>::QueueOptions
shared_scheduler_queue_options;
shared_scheduler_queue_options.input_batch_size_limit =
options.max_batch_size;
shared_scheduler_queue_options.batch_timeout_micros =
options.batch_timeout_micros;
shared_scheduler_queue_options.max_enqueued_batches =
options.max_enqueued_batches;
shared_scheduler_queue_options.enable_large_batch_splitting =
options.enable_large_batch_splitting;
shared_scheduler_queue_options.split_input_task_func =
options.split_input_task_func;
shared_scheduler_queue_options.max_execution_batch_size =
options.max_execution_batch_size;
std::unique_ptr<BatchScheduler<TaskType>> shared_scheduler_queue;
TF_RETURN_IF_ERROR(shared_scheduler->AddQueue(shared_scheduler_queue_options,
process_batch_callback,
&shared_scheduler_queue));
scheduler->reset(
new BasicBatchScheduler<TaskType>(std::move(shared_scheduler_queue)));
return absl::OkStatus();
}
template <typename TaskType>
Status BasicBatchScheduler<TaskType>::Schedule(
std::unique_ptr<TaskType>* task) {
return shared_scheduler_queue_->Schedule(task);
}
template <typename TaskType>
size_t BasicBatchScheduler<TaskType>::NumEnqueuedTasks() const {
return shared_scheduler_queue_->NumEnqueuedTasks();
}
template <typename TaskType>
size_t BasicBatchScheduler<TaskType>::SchedulingCapacity() const {
return shared_scheduler_queue_->SchedulingCapacity();
}
template <typename TaskType>
BasicBatchScheduler<TaskType>::BasicBatchScheduler(
std::unique_ptr<BatchScheduler<TaskType>> shared_scheduler_queue)
: shared_scheduler_queue_(std::move(shared_scheduler_queue)) {}
}
}
#endif | #include "tensorflow/core/kernels/batching_util/basic_batch_scheduler.h"
#include <utility>
#include "tensorflow/core/kernels/batching_util/batch_scheduler.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace serving {
namespace {
class FakeTask : public BatchTask {
public:
explicit FakeTask(size_t size) : size_(size) {}
~FakeTask() override = default;
size_t size() const override { return size_; }
private:
const size_t size_;
FakeTask(const FakeTask&) = delete;
void operator=(const FakeTask&) = delete;
};
Status ScheduleTask(size_t task_size, BatchScheduler<FakeTask>* scheduler) {
std::unique_ptr<FakeTask> task(new FakeTask(task_size));
Status status = scheduler->Schedule(&task);
CHECK_EQ(status.ok(), task == nullptr);
return status;
}
TEST(BasicBatchSchedulerTest, Basic) {
bool callback_called = false;
auto callback = [&callback_called](std::unique_ptr<Batch<FakeTask>> batch) {
callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(2, batch->num_tasks());
EXPECT_EQ(3, batch->task(0).size());
EXPECT_EQ(5, batch->task(1).size());
};
{
BasicBatchScheduler<FakeTask>::Options options;
options.max_batch_size = 10;
options.batch_timeout_micros = 100 * 1000;
options.num_batch_threads = 1;
options.max_enqueued_batches = 3;
std::unique_ptr<BasicBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
BasicBatchScheduler<FakeTask>::Create(options, callback, &scheduler));
EXPECT_EQ(10, scheduler->max_task_size());
EXPECT_EQ(0, scheduler->NumEnqueuedTasks());
EXPECT_EQ(3 * 10, scheduler->SchedulingCapacity());
TF_ASSERT_OK(ScheduleTask(3, scheduler.get()));
EXPECT_EQ(1, scheduler->NumEnqueuedTasks());
EXPECT_EQ((3 * 10) - 3, scheduler->SchedulingCapacity());
TF_ASSERT_OK(ScheduleTask(5, scheduler.get()));
EXPECT_EQ(2, scheduler->NumEnqueuedTasks());
EXPECT_EQ((3 * 10) - (3 + 5), scheduler->SchedulingCapacity());
}
EXPECT_TRUE(callback_called);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/basic_batch_scheduler.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/basic_batch_scheduler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8755924a-ced3-48b1-a66e-1a04454a738d | cpp | tensorflow/tensorflow | serial_device_batch_scheduler | tensorflow/core/kernels/batching_util/serial_device_batch_scheduler.h | tensorflow/core/kernels/batching_util/serial_device_batch_scheduler_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_SERIAL_DEVICE_BATCH_SCHEDULER_H_
#define TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_SERIAL_DEVICE_BATCH_SCHEDULER_H_
#include <algorithm>
#include <functional>
#include <memory>
#include <random>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/kernels/batching_util/batch_scheduler.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace serving {
namespace internal {
template <typename TaskType>
class SDBSBatch;
template <typename TaskType>
class SDBSQueue;
}
template <typename TaskType>
class SerialDeviceBatchScheduler : public std::enable_shared_from_this<
SerialDeviceBatchScheduler<TaskType>> {
public:
~SerialDeviceBatchScheduler();
struct Options {
string thread_pool_name = {"batch_threads"};
int64_t num_batch_threads = port::NumSchedulableCPUs();
int64_t full_batch_scheduling_boost_micros = 0;
Env* env = Env::Default();
int64_t initial_in_flight_batches_limit = 3;
std::function<int64()> get_pending_on_serial_device;
double target_pending = 2;
int64_t batches_to_average_over = 1000;
};
static Status Create(
const Options& options,
std::shared_ptr<SerialDeviceBatchScheduler<TaskType>>* scheduler);
struct QueueOptions {
int max_batch_size = 1000;
int max_enqueued_batches = 10;
};
using BatchProcessor = std::function<void(std::unique_ptr<Batch<TaskType>>)>;
Status AddQueue(const QueueOptions& options,
BatchProcessor process_batch_callback,
std::unique_ptr<BatchScheduler<TaskType>>* queue);
double in_flight_batches_limit() {
mutex_lock l(mu_);
return in_flight_batches_limit_;
}
double recent_low_traffic_ratio() {
mutex_lock l(mu_);
return recent_low_traffic_ratio_;
}
private:
friend class internal::SDBSQueue<TaskType>;
explicit SerialDeviceBatchScheduler(const Options& options);
void ProcessBatches();
void AddBatch(const internal::SDBSBatch<TaskType>* batch);
void RemoveQueue(const internal::SDBSQueue<TaskType>* queue);
Env* env() const { return options_.env; }
const Options options_;
std::vector<const internal::SDBSBatch<TaskType>*> batches_ TF_GUARDED_BY(mu_);
std::unordered_map<const internal::SDBSQueue<TaskType>*, BatchProcessor>
queues_and_callbacks_ TF_GUARDED_BY(mu_);
std::unique_ptr<thread::ThreadPool> batch_thread_pool_;
int64_t in_flight_batches_limit_ TF_GUARDED_BY(mu_);
int64_t processing_threads_ TF_GUARDED_BY(mu_) = 0;
int64_t batch_count_ TF_GUARDED_BY(mu_) = 0;
int64_t no_batch_count_ TF_GUARDED_BY(mu_) = 0;
int64_t pending_sum_ = 0;
int64_t batch_latency_sum_ = 0;
int64_t batch_period_micros_ = 0;
double recent_low_traffic_ratio_ = 0;
mutex mu_;
SerialDeviceBatchScheduler(const SerialDeviceBatchScheduler&) = delete;
void operator=(const SerialDeviceBatchScheduler&) = delete;
};
namespace internal {
template <typename TaskType>
class SDBSQueue : public BatchScheduler<TaskType> {
public:
using QueueOptions =
typename SerialDeviceBatchScheduler<TaskType>::QueueOptions;
SDBSQueue(std::shared_ptr<SerialDeviceBatchScheduler<TaskType>> scheduler,
const QueueOptions& options);
~SDBSQueue() override;
Status Schedule(std::unique_ptr<TaskType>* task) override;
size_t NumEnqueuedTasks() const override;
size_t SchedulingCapacity() const override;
void ReleaseBatch(const SDBSBatch<TaskType>* batch);
size_t max_task_size() const override { return options_.max_batch_size; }
private:
std::shared_ptr<SerialDeviceBatchScheduler<TaskType>> scheduler_;
const QueueOptions options_;
SDBSBatch<TaskType>* current_batch_ TF_GUARDED_BY(mu_) = nullptr;
int64_t num_enqueued_batches_ TF_GUARDED_BY(mu_) = 0;
int64_t num_enqueued_tasks_ TF_GUARDED_BY(mu_) = 0;
mutable mutex mu_;
SDBSQueue(const SDBSQueue&) = delete;
void operator=(const SDBSQueue&) = delete;
};
template <typename TaskType>
class SDBSBatch : public Batch<TaskType> {
public:
SDBSBatch(SDBSQueue<TaskType>* queue, int64_t creation_time_micros)
: queue_(queue), creation_time_micros_(creation_time_micros) {}
~SDBSBatch() override {}
SDBSQueue<TaskType>* queue() const { return queue_; }
int64_t creation_time_micros() const { return creation_time_micros_; }
private:
SDBSQueue<TaskType>* queue_;
const int64_t creation_time_micros_;
SDBSBatch(const SDBSBatch&) = delete;
void operator=(const SDBSBatch&) = delete;
};
}
template <typename TaskType>
Status SerialDeviceBatchScheduler<TaskType>::Create(
const Options& options,
std::shared_ptr<SerialDeviceBatchScheduler<TaskType>>* scheduler) {
if (options.num_batch_threads < 1) {
return errors::InvalidArgument("num_batch_threads must be positive; was ",
options.num_batch_threads);
}
if (options.initial_in_flight_batches_limit < 1) {
return errors::InvalidArgument(
"initial_in_flight_batches_limit must be positive; was ",
options.initial_in_flight_batches_limit);
}
if (options.initial_in_flight_batches_limit > options.num_batch_threads) {
return errors::InvalidArgument(
"initial_in_flight_batches_limit (",
options.initial_in_flight_batches_limit,
") should not be larger than num_batch_threads (",
options.num_batch_threads, ")");
}
if (options.full_batch_scheduling_boost_micros < 0) {
return errors::InvalidArgument(
"full_batch_scheduling_boost_micros can't be negative; was ",
options.full_batch_scheduling_boost_micros);
}
if (options.batches_to_average_over < 1) {
return errors::InvalidArgument(
"batches_to_average_over should be "
"greater than or equal to 1; was ",
options.batches_to_average_over);
}
if (options.target_pending <= 0) {
return errors::InvalidArgument(
"target_pending should be larger than zero; was ",
options.target_pending);
}
if (!options.get_pending_on_serial_device) {
return errors::InvalidArgument(
"get_pending_on_serial_device must be "
"specified");
}
scheduler->reset(new SerialDeviceBatchScheduler<TaskType>(options));
return absl::OkStatus();
}
template <typename TaskType>
SerialDeviceBatchScheduler<TaskType>::SerialDeviceBatchScheduler(
const Options& options)
: options_(options),
in_flight_batches_limit_(options.initial_in_flight_batches_limit),
processing_threads_(options.initial_in_flight_batches_limit) {
batch_thread_pool_.reset(new thread::ThreadPool(
env(), options.thread_pool_name, options.num_batch_threads));
for (int i = 0; i < processing_threads_; i++) {
batch_thread_pool_->Schedule(
std::bind(&SerialDeviceBatchScheduler<TaskType>::ProcessBatches, this));
}
}
template <typename TaskType>
SerialDeviceBatchScheduler<TaskType>::~SerialDeviceBatchScheduler() {
{
mutex_lock l(mu_);
processing_threads_ = 0;
}
batch_thread_pool_.reset();
}
template <typename TaskType>
Status SerialDeviceBatchScheduler<TaskType>::AddQueue(
const QueueOptions& options, BatchProcessor process_batch_callback,
std::unique_ptr<BatchScheduler<TaskType>>* queue) {
if (options.max_batch_size <= 0) {
return errors::InvalidArgument("max_batch_size must be positive; was ",
options.max_batch_size);
}
if (options.max_enqueued_batches <= 0) {
return errors::InvalidArgument(
"max_enqueued_batches must be positive; was ",
options.max_enqueued_batches);
}
internal::SDBSQueue<TaskType>* SDBS_queue_raw;
queue->reset(SDBS_queue_raw = new internal::SDBSQueue<TaskType>(
this->shared_from_this(), options));
mutex_lock l(mu_);
queues_and_callbacks_[SDBS_queue_raw] = process_batch_callback;
return absl::OkStatus();
}
template <typename TaskType>
void SerialDeviceBatchScheduler<TaskType>::AddBatch(
const internal::SDBSBatch<TaskType>* batch) {
mutex_lock l(mu_);
batches_.push_back(batch);
}
template <typename TaskType>
void SerialDeviceBatchScheduler<TaskType>::RemoveQueue(
const internal::SDBSQueue<TaskType>* queue) {
mutex_lock l(mu_);
queues_and_callbacks_.erase(queue);
}
template <typename TaskType>
void SerialDeviceBatchScheduler<TaskType>::ProcessBatches() {
const int64_t kIdleThreadSleepTimeMicros = 1000;
const double kMaxNoBatchRatio = .1;
const double kLowTrafficMovingAverageFactor = .1;
for (;;) {
mu_.lock();
if (processing_threads_ < 1 ||
processing_threads_ > in_flight_batches_limit_) {
processing_threads_--;
mu_.unlock();
break;
}
if (batches_.empty()) {
no_batch_count_++;
int64_t sleep_time = batch_period_micros_ ? batch_period_micros_
: kIdleThreadSleepTimeMicros;
mu_.unlock();
env()->SleepForMicroseconds(sleep_time);
continue;
}
auto best_it = batches_.begin();
double best_score =
(*best_it)->creation_time_micros() -
options_.full_batch_scheduling_boost_micros * (*best_it)->size() /
static_cast<double>((*best_it)->queue()->max_task_size());
for (auto it = batches_.begin() + 1; it != batches_.end(); it++) {
const double score =
(*it)->creation_time_micros() -
options_.full_batch_scheduling_boost_micros * (*it)->size() /
static_cast<double>((*it)->queue()->max_task_size());
if (score < best_score) {
best_score = score;
best_it = it;
}
}
const internal::SDBSBatch<TaskType>* batch = *best_it;
batches_.erase(best_it);
batch->queue()->ReleaseBatch(batch);
auto callback = queues_and_callbacks_[batch->queue()];
mu_.unlock();
int64_t start_time = env()->NowMicros();
callback(std::unique_ptr<Batch<TaskType>>(
const_cast<internal::SDBSBatch<TaskType>*>(batch)));
int64_t end_time = env()->NowMicros();
mu_.lock();
batch_count_++;
batch_latency_sum_ += end_time - start_time;
pending_sum_ += options_.get_pending_on_serial_device();
if (batch_count_ == options_.batches_to_average_over) {
recent_low_traffic_ratio_ *= (1 - kLowTrafficMovingAverageFactor);
if (no_batch_count_ < kMaxNoBatchRatio * batch_count_) {
double avg_pending = pending_sum_ / static_cast<double>(batch_count_);
batch_period_micros_ =
batch_latency_sum_ / batch_count_ / in_flight_batches_limit_;
in_flight_batches_limit_ +=
std::round(options_.target_pending - avg_pending);
in_flight_batches_limit_ =
std::max(in_flight_batches_limit_, int64_t{1});
in_flight_batches_limit_ =
std::min(in_flight_batches_limit_, options_.num_batch_threads);
if (processing_threads_ > 0 &&
processing_threads_ < in_flight_batches_limit_) {
int extra_threads = in_flight_batches_limit_ - processing_threads_;
for (int i = 0; i < extra_threads; i++) {
batch_thread_pool_->Schedule(std::bind(
&SerialDeviceBatchScheduler<TaskType>::ProcessBatches, this));
}
processing_threads_ = in_flight_batches_limit_;
}
} else {
recent_low_traffic_ratio_ += kLowTrafficMovingAverageFactor;
}
batch_count_ = 0;
no_batch_count_ = 0;
pending_sum_ = 0;
batch_latency_sum_ = 0;
}
mu_.unlock();
}
}
namespace internal {
template <typename TaskType>
SDBSQueue<TaskType>::SDBSQueue(
std::shared_ptr<SerialDeviceBatchScheduler<TaskType>> scheduler,
const QueueOptions& options)
: scheduler_(scheduler), options_(options) {}
template <typename TaskType>
SDBSQueue<TaskType>::~SDBSQueue() {
const int kSleepMicros = 1000;
for (;;) {
{
mutex_lock l(mu_);
if (num_enqueued_batches_ == 0) {
break;
}
}
scheduler_->env()->SleepForMicroseconds(kSleepMicros);
}
scheduler_->RemoveQueue(this);
}
template <typename TaskType>
Status SDBSQueue<TaskType>::Schedule(std::unique_ptr<TaskType>* task) {
SDBSBatch<TaskType>* new_batch = nullptr;
size_t size = (*task)->size();
if (size > options_.max_batch_size) {
return errors::InvalidArgument("Task size ", size,
" is larger than maximum batch size ",
options_.max_batch_size);
}
{
mutex_lock l(mu_);
if (current_batch_ &&
current_batch_->size() + size > options_.max_batch_size) {
if (num_enqueued_batches_ >= options_.max_enqueued_batches) {
return errors::Unavailable("The batch scheduling queue is full");
}
current_batch_->Close();
current_batch_ = nullptr;
}
if (!current_batch_) {
num_enqueued_batches_++;
current_batch_ = new_batch =
new SDBSBatch<TaskType>(this, scheduler_->env()->NowMicros());
}
current_batch_->AddTask(std::move(*task));
num_enqueued_tasks_++;
}
if (new_batch != nullptr) scheduler_->AddBatch(new_batch);
return absl::OkStatus();
}
template <typename TaskType>
void SDBSQueue<TaskType>::ReleaseBatch(const SDBSBatch<TaskType>* batch) {
mutex_lock l(mu_);
num_enqueued_batches_--;
num_enqueued_tasks_ -= batch->num_tasks();
if (batch == current_batch_) {
current_batch_->Close();
current_batch_ = nullptr;
}
}
template <typename TaskType>
size_t SDBSQueue<TaskType>::NumEnqueuedTasks() const {
mutex_lock l(mu_);
return num_enqueued_tasks_;
}
template <typename TaskType>
size_t SDBSQueue<TaskType>::SchedulingCapacity() const {
mutex_lock l(mu_);
const int current_batch_capacity =
current_batch_ ? options_.max_batch_size - current_batch_->size() : 0;
const int spare_batches =
options_.max_enqueued_batches - num_enqueued_batches_;
return spare_batches * options_.max_batch_size + current_batch_capacity;
}
}
}
}
#endif | #include "tensorflow/core/kernels/batching_util/serial_device_batch_scheduler.h"
#include "tensorflow/core/kernels/batching_util/fake_clock_env.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace serving {
namespace anonymous {
class FakeTask : public BatchTask {
public:
explicit FakeTask(size_t size) : size_(size) {}
~FakeTask() override = default;
size_t size() const override { return size_; }
private:
const size_t size_;
FakeTask(const FakeTask&) = delete;
void operator=(const FakeTask&) = delete;
};
Status ScheduleTask(size_t task_size, BatchScheduler<FakeTask>* scheduler) {
std::unique_ptr<FakeTask> task(new FakeTask(task_size));
Status status = scheduler->Schedule(&task);
CHECK_EQ(status.ok(), task == nullptr);
return status;
}
std::unique_ptr<Thread> CreateFakeClockAdvancerThread(
test_util::FakeClockEnv* env, Notification* start, Notification* stop) {
return std::unique_ptr<Thread>(Env::Default()->StartThread(
{}, "FakeClockAdvancerThread", [env, start, stop] {
start->WaitForNotification();
while (!stop->HasBeenNotified()) {
env->AdvanceByMicroseconds(10);
Env::Default()->SleepForMicroseconds(10);
}
}));
}
TEST(SerialDeviceBatchSchedulerTest, BadOptions) {
using Scheduler = SerialDeviceBatchScheduler<FakeTask>;
std::shared_ptr<Scheduler> scheduler;
Scheduler::Options default_options;
default_options.get_pending_on_serial_device = []() { return 0; };
Scheduler::Options options = default_options;
options.num_batch_threads = 0;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
options = default_options;
options.initial_in_flight_batches_limit = 0;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
options = default_options;
options.num_batch_threads = 5;
options.initial_in_flight_batches_limit = 8;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
options = default_options;
options.batches_to_average_over = -5;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
options = default_options;
options.target_pending = 0;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
options = Scheduler::Options();
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
}
TEST(SerialDeviceBatchSchedulerTest, InFlightBatchesLimit) {
SerialDeviceBatchScheduler<FakeTask>::Options options;
options.num_batch_threads = 3;
options.initial_in_flight_batches_limit = 2;
options.batches_to_average_over = 1000;
options.get_pending_on_serial_device = []() { return 0; };
mutex mu;
int processed_batches = 0;
Notification finish_processing;
auto queue_callback = [&mu, &processed_batches, &finish_processing](
std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
EXPECT_GT(batch->num_tasks(), 0);
mu.lock();
int batch_num = ++processed_batches;
mu.unlock();
if (batch_num == 2) {
Env::Default()->SleepForMicroseconds(1000);
finish_processing.Notify();
}
if (batch_num == 3) {
ASSERT_TRUE(finish_processing.HasBeenNotified());
}
finish_processing.WaitForNotification();
};
std::shared_ptr<SerialDeviceBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
SerialDeviceBatchScheduler<FakeTask>::Create(options, &scheduler));
std::unique_ptr<BatchScheduler<FakeTask>> queue1;
std::unique_ptr<BatchScheduler<FakeTask>> queue2;
std::unique_ptr<BatchScheduler<FakeTask>> queue3;
TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue1));
TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue2));
TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue3));
TF_ASSERT_OK(ScheduleTask(100, queue1.get()));
TF_ASSERT_OK(ScheduleTask(100, queue2.get()));
TF_ASSERT_OK(ScheduleTask(100, queue3.get()));
}
TEST(SerialDeviceBatchSchedulerTest, PendingOnSerialDevice) {
mutex mu;
int pending;
SerialDeviceBatchScheduler<FakeTask>::Options options;
options.num_batch_threads = 3;
options.initial_in_flight_batches_limit = 1;
options.batches_to_average_over = 1;
options.target_pending = 3;
options.get_pending_on_serial_device = [&mu, &pending]() {
mutex_lock l(mu);
return pending;
};
std::shared_ptr<SerialDeviceBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
SerialDeviceBatchScheduler<FakeTask>::Create(options, &scheduler));
int processed_batches = 0;
Notification start_processing;
auto queue_callback = [&mu, &processed_batches, &start_processing, &pending,
&scheduler](std::unique_ptr<Batch<FakeTask>> batch) {
int batch_num;
{
mutex_lock l(mu);
batch_num = ++processed_batches;
}
switch (batch_num) {
case 1:
start_processing.WaitForNotification();
{
mutex_lock l(mu);
pending = 3;
}
break;
case 2:
CHECK_EQ(scheduler->in_flight_batches_limit(), 1);
{
mutex_lock l(mu);
pending = 1;
}
break;
case 3:
CHECK_EQ(scheduler->in_flight_batches_limit(), 3);
{
mutex_lock l(mu);
pending = 3;
}
break;
default:
break;
}
};
std::unique_ptr<BatchScheduler<FakeTask>> queue;
TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue));
for (int i = 0; i < 3; i++) {
TF_ASSERT_OK(ScheduleTask(800, queue.get()));
}
start_processing.Notify();
}
TEST(SerialDeviceBatchSchedulerTest, FullBatchSchedulingBoostMicros) {
test_util::FakeClockEnv env(Env::Default());
Notification start_teardown, stop_teardown;
std::unique_ptr<Thread> teardown_thread =
CreateFakeClockAdvancerThread(&env, &start_teardown, &stop_teardown);
{
SerialDeviceBatchScheduler<FakeTask>::Options options;
options.env = &env;
options.initial_in_flight_batches_limit = 1;
options.batches_to_average_over = 1000;
options.full_batch_scheduling_boost_micros = 10;
options.get_pending_on_serial_device = []() { return 0; };
mutex mu;
int processed_batches = 0;
auto queue_callback =
[&mu, &processed_batches](std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
mutex_lock l(mu);
processed_batches++;
switch (processed_batches) {
case 1:
EXPECT_EQ(1000, batch->size());
break;
case 2:
EXPECT_EQ(100, batch->size());
break;
case 3:
EXPECT_EQ(80, batch->size());
break;
default:
EXPECT_TRUE(false) << "Should only have 3 batches";
}
};
std::shared_ptr<SerialDeviceBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
SerialDeviceBatchScheduler<FakeTask>::Create(options, &scheduler));
Env::Default()->SleepForMicroseconds(1000);
SerialDeviceBatchScheduler<FakeTask>::QueueOptions queue_options;
std::unique_ptr<BatchScheduler<FakeTask>> queue1;
std::unique_ptr<BatchScheduler<FakeTask>> queue2;
std::unique_ptr<BatchScheduler<FakeTask>> queue3;
queue_options.max_batch_size = 1000;
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_callback, &queue1));
queue_options.max_batch_size = 1000;
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_callback, &queue2));
queue_options.max_batch_size = 100;
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_callback, &queue3));
TF_ASSERT_OK(ScheduleTask(100, queue1.get()));
env.AdvanceByMicroseconds(3);
TF_ASSERT_OK(ScheduleTask(1000, queue2.get()));
env.AdvanceByMicroseconds(5);
TF_ASSERT_OK(ScheduleTask(80, queue3.get()));
env.AdvanceByMicroseconds(1000);
start_teardown.Notify();
}
stop_teardown.Notify();
}
TEST(SerialDeviceBatchSchedulerTest, DeleteQueue) {
SerialDeviceBatchScheduler<FakeTask>::Options options;
options.initial_in_flight_batches_limit = 1;
options.batches_to_average_over = 1000;
options.get_pending_on_serial_device = []() { return 0; };
mutex mu;
int processed_batches = 0;
Notification finish_processing;
auto queue_callback = [&mu, &processed_batches, &finish_processing](
std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
EXPECT_GT(batch->num_tasks(), 0);
finish_processing.WaitForNotification();
mu.lock();
processed_batches++;
mu.unlock();
};
std::shared_ptr<SerialDeviceBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
SerialDeviceBatchScheduler<FakeTask>::Create(options, &scheduler));
std::unique_ptr<BatchScheduler<FakeTask>> queue;
TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue));
for (int i = 0; i < 2; i++) {
TF_ASSERT_OK(ScheduleTask(800, queue.get()));
}
std::unique_ptr<Thread> queue_deleter(Env::Default()->StartThread(
{}, "QueueDeleterThread",
[&queue, &mu, &processed_batches, scheduler]() mutable {
queue.reset();
{
mutex_lock l(mu);
EXPECT_GT(processed_batches, 0);
}
scheduler.reset();
mutex_lock l(mu);
EXPECT_EQ(processed_batches, 2);
}));
scheduler.reset();
Env::Default()->SleepForMicroseconds(1000);
finish_processing.Notify();
}
TEST(SerialDeviceBatchSchedulerTest, DeleteScheduler) {
SerialDeviceBatchScheduler<FakeTask>::Options options;
options.initial_in_flight_batches_limit = 1;
options.batches_to_average_over = 1000;
options.get_pending_on_serial_device = []() { return 0; };
mutex mu;
int processed_batches = 0;
Notification start_processing;
Notification finish_processing;
auto queue_callback =
[&mu, &processed_batches, &start_processing,
&finish_processing](std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
EXPECT_GT(batch->num_tasks(), 0);
start_processing.WaitForNotification();
mutex_lock l(mu);
processed_batches++;
if (processed_batches == 2) {
finish_processing.Notify();
}
};
std::shared_ptr<SerialDeviceBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
SerialDeviceBatchScheduler<FakeTask>::Create(options, &scheduler));
std::unique_ptr<BatchScheduler<FakeTask>> queue;
TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue));
for (int i = 0; i < 2; i++) {
TF_ASSERT_OK(ScheduleTask(800, queue.get()));
}
scheduler.reset();
start_processing.Notify();
finish_processing.WaitForNotification();
}
TEST(SerialDeviceBatchSchedulerTest, QueueCapacityInfo) {
SerialDeviceBatchScheduler<FakeTask>::Options options;
options.initial_in_flight_batches_limit = 1;
options.batches_to_average_over = 1000;
options.full_batch_scheduling_boost_micros = 1000;
options.get_pending_on_serial_device = []() { return 0; };
mutex mu;
int processed_batches = 0;
Notification finish_processing;
auto queue_callback = [&mu, &processed_batches, &finish_processing](
std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
EXPECT_GT(batch->num_tasks(), 0);
mu.lock();
int batch_num = ++processed_batches;
mu.unlock();
if (batch_num == 1) {
finish_processing.WaitForNotification();
}
};
std::shared_ptr<SerialDeviceBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
SerialDeviceBatchScheduler<FakeTask>::Create(options, &scheduler));
std::unique_ptr<BatchScheduler<FakeTask>> queue1;
std::unique_ptr<BatchScheduler<FakeTask>> queue2;
TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue1));
TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue2));
TF_ASSERT_OK(ScheduleTask(800, queue1.get()));
TF_ASSERT_OK(ScheduleTask(100, queue2.get()));
EXPECT_EQ(queue2->NumEnqueuedTasks(), 1);
EXPECT_EQ(queue2->SchedulingCapacity(), 9 * 1000 + 900);
TF_ASSERT_OK(ScheduleTask(100, queue2.get()));
TF_ASSERT_OK(ScheduleTask(200, queue2.get()));
EXPECT_EQ(queue2->NumEnqueuedTasks(), 3);
EXPECT_EQ(queue2->SchedulingCapacity(), 9 * 1000 + 600);
TF_ASSERT_OK(ScheduleTask(700, queue2.get()));
EXPECT_EQ(queue2->NumEnqueuedTasks(), 4);
EXPECT_EQ(queue2->SchedulingCapacity(), 8 * 1000 + 300);
finish_processing.Notify();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/serial_device_batch_scheduler.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/serial_device_batch_scheduler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c0ebcd67-aa31-4bde-8d1d-ac3a89a6019f | cpp | tensorflow/tensorflow | adaptive_shared_batch_scheduler | tensorflow/core/kernels/batching_util/adaptive_shared_batch_scheduler.h | tensorflow/core/kernels/batching_util/adaptive_shared_batch_scheduler_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_ADAPTIVE_SHARED_BATCH_SCHEDULER_H_
#define TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_ADAPTIVE_SHARED_BATCH_SCHEDULER_H_
#include <algorithm>
#include <atomic>
#include <functional>
#include <memory>
#include <random>
#include <unordered_map>
#include <vector>
#include "absl/types/optional.h"
#include "tensorflow/core/kernels/batching_util/batch_scheduler.h"
#include "tensorflow/core/kernels/batching_util/periodic_function.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/byte_order.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/platform/threadpool_interface.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/lib/connected_traceme.h"
namespace tensorflow {
namespace serving {
namespace internal {
template <typename TaskType>
class ASBSBatch;
template <typename TaskType>
class ASBSQueue;
}
template <typename TaskType>
class AdaptiveSharedBatchScheduler
: public std::enable_shared_from_this<
AdaptiveSharedBatchScheduler<TaskType>> {
public:
~AdaptiveSharedBatchScheduler() {
if (owned_batch_thread_pool_) {
delete batch_thread_pool_;
}
}
struct Options {
string thread_pool_name = {"batch_threads"};
int64_t num_batch_threads = port::MaxParallelism();
thread::ThreadPool* thread_pool = nullptr;
int64_t min_in_flight_batches_limit = 1;
int64_t full_batch_scheduling_boost_micros = 0;
Env* env = Env::Default();
double initial_in_flight_batches_limit = 3;
int64_t batches_to_average_over = 1000;
bool fifo_scheduling = false;
};
static Status Create(
const Options& options,
std::shared_ptr<AdaptiveSharedBatchScheduler<TaskType>>* scheduler);
struct QueueOptions {
int max_batch_size = 1000;
absl::optional<int> max_input_task_size = absl::nullopt;
absl::optional<int> max_tasks_per_batch = absl::nullopt;
int max_enqueued_batches = 10;
int64_t batch_timeout_micros = 0;
std::function<Status(std::unique_ptr<TaskType>* input_task, int first_size,
int max_batch_size,
std::vector<std::unique_ptr<TaskType>>* output_tasks)>
split_input_task_func;
bool disable_padding = false;
};
using BatchProcessor = std::function<void(std::unique_ptr<Batch<TaskType>>)>;
Status AddQueue(const QueueOptions& options,
BatchProcessor process_batch_callback,
std::unique_ptr<BatchScheduler<TaskType>>* queue);
double in_flight_batches_limit() {
mutex_lock l(mu_);
return in_flight_batches_limit_;
}
private:
friend class internal::ASBSQueue<TaskType>;
explicit AdaptiveSharedBatchScheduler(const Options& options);
void CallbackWrapper(const internal::ASBSBatch<TaskType>* batch,
BatchProcessor callback, bool is_express);
void MaybeScheduleNextBatch() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void MaybeScheduleNextBatchFIFO() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void MaybeScheduleClosedBatches();
void MaybeScheduleClosedBatchesLocked() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void MaybeScheduleClosedBatchesLockedFIFO() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void MaybeAdjustInflightLimit() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void AddBatch(const internal::ASBSBatch<TaskType>* batch);
void RemoveQueue(const internal::ASBSQueue<TaskType>* queue);
Env* GetEnv() const { return options_.env; }
const Options options_;
std::vector<const internal::ASBSBatch<TaskType>*> batches_ TF_GUARDED_BY(mu_);
std::deque<const internal::ASBSBatch<TaskType>*> fifo_batches_
TF_GUARDED_BY(mu_);
std::unordered_map<const internal::ASBSQueue<TaskType>*, BatchProcessor>
queues_and_callbacks_ TF_GUARDED_BY(mu_);
mutex mu_;
thread::ThreadPool* batch_thread_pool_;
bool owned_batch_thread_pool_ = false;
double in_flight_batches_limit_ TF_GUARDED_BY(mu_);
int64_t in_flight_batches_ TF_GUARDED_BY(mu_) = 0;
int64_t in_flight_express_batches_ TF_GUARDED_BY(mu_) = 0;
std::default_random_engine rand_engine_;
std::uniform_real_distribution<double> rand_double_;
int64_t batch_count_ TF_GUARDED_BY(mu_) = 0;
struct DelayStats {
int64_t batch_latency_sum = 0;
double last_avg_latency_ms = 0;
bool last_latency_decreased = false;
int step_direction = 1;
};
DelayStats batch_delay_stats_ TF_GUARDED_BY(mu_);
constexpr static double kMaxStepSizeMultiplier = 0.125;
constexpr static double kMinStepSizeMultiplier = 0.0078125;
double step_size_multiplier_ TF_GUARDED_BY(mu_) = kMaxStepSizeMultiplier;
AdaptiveSharedBatchScheduler(const AdaptiveSharedBatchScheduler&) = delete;
void operator=(const AdaptiveSharedBatchScheduler&) = delete;
};
namespace internal {
template <typename TaskType>
class ASBSQueue : public BatchScheduler<TaskType> {
public:
using QueueOptions =
typename AdaptiveSharedBatchScheduler<TaskType>::QueueOptions;
ASBSQueue(std::shared_ptr<AdaptiveSharedBatchScheduler<TaskType>> scheduler,
const QueueOptions& options);
~ASBSQueue() override;
Status Schedule(std::unique_ptr<TaskType>* task) override;
size_t NumEnqueuedTasks() const override;
size_t SchedulingCapacity() const override;
void ReleaseBatch(const ASBSBatch<TaskType>* batch);
size_t max_task_size() const override { return options_.max_batch_size; }
private:
size_t SchedulingCapacityLocked() const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
static uint64 NewTraceMeContextIdForBatch();
std::shared_ptr<AdaptiveSharedBatchScheduler<TaskType>> scheduler_;
const QueueOptions options_;
ASBSBatch<TaskType>* current_batch_ TF_GUARDED_BY(mu_) = nullptr;
int64_t num_enqueued_batches_ TF_GUARDED_BY(mu_) = 0;
int64_t num_enqueued_tasks_ TF_GUARDED_BY(mu_) = 0;
mutable mutex mu_;
ASBSQueue(const ASBSQueue&) = delete;
void operator=(const ASBSQueue&) = delete;
};
template <typename TaskType>
class ASBSBatch : public Batch<TaskType> {
public:
ASBSBatch(ASBSQueue<TaskType>* queue, int64_t creation_time_micros,
int64_t batch_timeout_micros, uint64 traceme_context_id)
: queue_(queue),
creation_time_micros_(creation_time_micros),
schedulable_time_micros_(creation_time_micros + batch_timeout_micros),
traceme_context_id_(traceme_context_id) {}
~ASBSBatch() override {}
ASBSQueue<TaskType>* queue() const { return queue_; }
int64_t creation_time_micros() const { return creation_time_micros_; }
int64_t schedulable_time_micros() const { return schedulable_time_micros_; }
uint64 traceme_context_id() const { return traceme_context_id_; }
private:
ASBSQueue<TaskType>* queue_;
const int64_t creation_time_micros_;
const int64_t schedulable_time_micros_;
const uint64 traceme_context_id_;
ASBSBatch(const ASBSBatch&) = delete;
void operator=(const ASBSBatch&) = delete;
};
}
template <typename TaskType>
constexpr double AdaptiveSharedBatchScheduler<TaskType>::kMaxStepSizeMultiplier;
template <typename TaskType>
constexpr double AdaptiveSharedBatchScheduler<TaskType>::kMinStepSizeMultiplier;
template <typename TaskType>
Status AdaptiveSharedBatchScheduler<TaskType>::Create(
const Options& options,
std::shared_ptr<AdaptiveSharedBatchScheduler<TaskType>>* scheduler) {
if (options.num_batch_threads < 1) {
return errors::InvalidArgument("num_batch_threads must be positive; was ",
options.num_batch_threads);
}
if (options.min_in_flight_batches_limit < 1) {
return errors::InvalidArgument(
"min_in_flight_batches_limit must be >= 1; was ",
options.min_in_flight_batches_limit);
}
if (options.min_in_flight_batches_limit > options.num_batch_threads) {
return errors::InvalidArgument(
"min_in_flight_batches_limit (", options.min_in_flight_batches_limit,
") must be <= num_batch_threads (", options.num_batch_threads, ")");
}
if (options.full_batch_scheduling_boost_micros < 0) {
return errors::InvalidArgument(
"full_batch_scheduling_boost_micros can't be negative; was ",
options.full_batch_scheduling_boost_micros);
}
if (options.initial_in_flight_batches_limit > options.num_batch_threads) {
return errors::InvalidArgument(
"initial_in_flight_batches_limit (",
options.initial_in_flight_batches_limit,
") should not be larger than num_batch_threads (",
options.num_batch_threads, ")");
}
if (options.initial_in_flight_batches_limit <
options.min_in_flight_batches_limit) {
return errors::InvalidArgument("initial_in_flight_batches_limit (",
options.initial_in_flight_batches_limit,
"must be >= min_in_flight_batches_limit (",
options.min_in_flight_batches_limit, ")");
}
if (options.batches_to_average_over < 1) {
return errors::InvalidArgument(
"batches_to_average_over should be "
"greater than or equal to 1; was ",
options.batches_to_average_over);
}
scheduler->reset(new AdaptiveSharedBatchScheduler<TaskType>(options));
return absl::OkStatus();
}
template <typename TaskType>
AdaptiveSharedBatchScheduler<TaskType>::AdaptiveSharedBatchScheduler(
const Options& options)
: options_(options),
in_flight_batches_limit_(options.initial_in_flight_batches_limit),
rand_double_(0.0, 1.0) {
std::random_device device;
rand_engine_.seed(device());
if (options.thread_pool == nullptr) {
owned_batch_thread_pool_ = true;
batch_thread_pool_ = new thread::ThreadPool(
GetEnv(), options.thread_pool_name, options.num_batch_threads);
} else {
owned_batch_thread_pool_ = false;
batch_thread_pool_ = options.thread_pool;
}
}
template <typename TaskType>
Status AdaptiveSharedBatchScheduler<TaskType>::AddQueue(
const QueueOptions& options, BatchProcessor process_batch_callback,
std::unique_ptr<BatchScheduler<TaskType>>* queue) {
if (options.max_batch_size <= 0) {
return errors::InvalidArgument("max_batch_size must be positive; was ",
options.max_batch_size);
}
if (options.max_enqueued_batches <= 0) {
return errors::InvalidArgument(
"max_enqueued_batches must be positive; was ",
options.max_enqueued_batches);
}
if (options.max_input_task_size.has_value()) {
if (options.max_input_task_size.value() < options.max_batch_size) {
return errors::InvalidArgument(
"max_input_task_size must be larger than or equal to max_batch_size;"
"got max_input_task_size as ",
options.max_input_task_size.value(), " and max_batch_size as ",
options.max_batch_size);
}
}
internal::ASBSQueue<TaskType>* asbs_queue_raw;
queue->reset(asbs_queue_raw = new internal::ASBSQueue<TaskType>(
this->shared_from_this(), options));
mutex_lock l(mu_);
queues_and_callbacks_[asbs_queue_raw] = process_batch_callback;
return absl::OkStatus();
}
template <typename TaskType>
void AdaptiveSharedBatchScheduler<TaskType>::AddBatch(
const internal::ASBSBatch<TaskType>* batch) {
mutex_lock l(mu_);
if (options_.fifo_scheduling) {
fifo_batches_.push_back(batch);
} else {
batches_.push_back(batch);
}
int64_t delay_micros =
batch->schedulable_time_micros() - GetEnv()->NowMicros();
if (delay_micros <= 0) {
MaybeScheduleNextBatch();
return;
}
GetEnv()->SchedClosureAfter(
delay_micros, [this, lifetime_preserver = this->shared_from_this()] {
mutex_lock l(mu_);
MaybeScheduleNextBatch();
});
}
template <typename TaskType>
void AdaptiveSharedBatchScheduler<TaskType>::RemoveQueue(
const internal::ASBSQueue<TaskType>* queue) {
mutex_lock l(mu_);
queues_and_callbacks_.erase(queue);
}
template <typename TaskType>
void AdaptiveSharedBatchScheduler<TaskType>::MaybeScheduleNextBatchFIFO() {
const internal::ASBSBatch<TaskType>* batch = *fifo_batches_.begin();
if (batch->schedulable_time_micros() > GetEnv()->NowMicros()) {
return;
}
fifo_batches_.pop_front();
batch->queue()->ReleaseBatch(batch);
batch_thread_pool_->Schedule(std::bind(
&AdaptiveSharedBatchScheduler<TaskType>::CallbackWrapper, this, batch,
queues_and_callbacks_[batch->queue()], false ));
in_flight_batches_++;
}
template <typename TaskType>
void AdaptiveSharedBatchScheduler<
TaskType>::MaybeScheduleClosedBatchesLockedFIFO() {
int available_threads =
static_cast<int>(options_.num_batch_threads - in_flight_batches_ -
in_flight_express_batches_);
for (auto it = fifo_batches_.begin();
it != fifo_batches_.end() && available_threads > 0;
it = fifo_batches_.begin()) {
if ((*it)->IsClosed()) {
const internal::ASBSBatch<TaskType>* batch = *it;
fifo_batches_.pop_front();
batch->queue()->ReleaseBatch(batch);
batch_thread_pool_->Schedule(
std::bind(&AdaptiveSharedBatchScheduler<TaskType>::CallbackWrapper,
this, batch, queues_and_callbacks_[batch->queue()], true));
in_flight_express_batches_++;
available_threads--;
} else {
break;
}
}
}
template <typename TaskType>
void AdaptiveSharedBatchScheduler<TaskType>::MaybeScheduleNextBatch() {
bool batch_empty =
options_.fifo_scheduling ? fifo_batches_.empty() : batches_.empty();
if (batch_empty || in_flight_batches_ >= in_flight_batches_limit_) return;
if (in_flight_batches_limit_ - in_flight_batches_ < 1 &&
rand_double_(rand_engine_) >
in_flight_batches_limit_ - in_flight_batches_) {
return;
}
if (options_.fifo_scheduling) {
MaybeScheduleNextBatchFIFO();
return;
}
auto best_it = batches_.end();
double best_score = (std::numeric_limits<double>::max)();
int64_t now_micros = GetEnv()->NowMicros();
for (auto it = batches_.begin(); it != batches_.end(); it++) {
if ((*it)->schedulable_time_micros() > now_micros) continue;
const double score =
(*it)->creation_time_micros() -
options_.full_batch_scheduling_boost_micros * (*it)->size() /
static_cast<double>((*it)->queue()->max_task_size());
if (best_it == batches_.end() || score < best_score) {
best_score = score;
best_it = it;
}
}
if (best_it == batches_.end()) return;
const internal::ASBSBatch<TaskType>* batch = *best_it;
batches_.erase(best_it);
batch->queue()->ReleaseBatch(batch);
batch_thread_pool_->Schedule(
std::bind(&AdaptiveSharedBatchScheduler<TaskType>::CallbackWrapper, this,
batch, queues_and_callbacks_[batch->queue()], false));
in_flight_batches_++;
}
template <typename TaskType>
void AdaptiveSharedBatchScheduler<TaskType>::MaybeScheduleClosedBatches() {
mutex_lock l(mu_);
MaybeScheduleClosedBatchesLocked();
}
template <typename TaskType>
void AdaptiveSharedBatchScheduler<
TaskType>::MaybeScheduleClosedBatchesLocked() {
if (options_.fifo_scheduling) {
MaybeScheduleClosedBatchesLockedFIFO();
return;
}
int available_threads =
static_cast<int>(options_.num_batch_threads - in_flight_batches_ -
in_flight_express_batches_);
for (auto it = batches_.begin();
it != batches_.end() && available_threads > 0;) {
if ((*it)->IsClosed()) {
const internal::ASBSBatch<TaskType>* batch = *it;
it = batches_.erase(it);
batch->queue()->ReleaseBatch(batch);
batch_thread_pool_->Schedule(
std::bind(&AdaptiveSharedBatchScheduler<TaskType>::CallbackWrapper,
this, batch, queues_and_callbacks_[batch->queue()], true));
in_flight_express_batches_++;
available_threads--;
} else {
++it;
}
}
}
template <typename TaskType>
void AdaptiveSharedBatchScheduler<TaskType>::CallbackWrapper(
const internal::ASBSBatch<TaskType>* batch,
AdaptiveSharedBatchScheduler<TaskType>::BatchProcessor callback,
bool is_express) {
tsl::profiler::TraceMeConsumer trace_me(
[&] {
return profiler::TraceMeEncode(
"ProcessBatch", {{"batch_size_before_padding", batch->size()},
{"_r", 2} });
},
tsl::profiler::ContextType::kAdaptiveSharedBatchScheduler,
batch->traceme_context_id());
const int64_t start_time = batch->creation_time_micros();
callback(std::unique_ptr<Batch<TaskType>>(
const_cast<internal::ASBSBatch<TaskType>*>(batch)));
int64_t end_time = GetEnv()->NowMicros();
mutex_lock l(mu_);
if (is_express) {
in_flight_express_batches_--;
MaybeScheduleClosedBatchesLocked();
return;
}
in_flight_batches_--;
batch_count_++;
batch_delay_stats_.batch_latency_sum += end_time - start_time;
MaybeAdjustInflightLimit();
MaybeScheduleNextBatch();
}
template <typename TaskType>
void AdaptiveSharedBatchScheduler<TaskType>::MaybeAdjustInflightLimit() {
if (batch_count_ == options_.batches_to_average_over) {
double current_avg_latency_ms =
(batch_delay_stats_.batch_latency_sum / 1000.) / batch_count_;
bool current_latency_decreased =
current_avg_latency_ms < batch_delay_stats_.last_avg_latency_ms;
if (current_latency_decreased) {
step_size_multiplier_ *=
(batch_delay_stats_.last_latency_decreased ? 2 : 0.5);
step_size_multiplier_ =
std::min(step_size_multiplier_, kMaxStepSizeMultiplier);
step_size_multiplier_ =
std::max(step_size_multiplier_, kMinStepSizeMultiplier);
} else {
batch_delay_stats_.step_direction = -batch_delay_stats_.step_direction;
}
in_flight_batches_limit_ += batch_delay_stats_.step_direction *
in_flight_batches_limit_ *
step_size_multiplier_;
in_flight_batches_limit_ =
std::min(in_flight_batches_limit_,
static_cast<double>(options_.num_batch_threads));
in_flight_batches_limit_ =
std::max(in_flight_batches_limit_,
static_cast<double>(options_.min_in_flight_batches_limit));
batch_delay_stats_.last_avg_latency_ms = current_avg_latency_ms;
batch_delay_stats_.last_latency_decreased = current_latency_decreased;
batch_count_ = 0;
batch_delay_stats_.batch_latency_sum = 0;
}
}
namespace internal {
template <typename TaskType>
ASBSQueue<TaskType>::ASBSQueue(
std::shared_ptr<AdaptiveSharedBatchScheduler<TaskType>> scheduler,
const QueueOptions& options)
: scheduler_(scheduler), options_(options) {}
template <typename TaskType>
ASBSQueue<TaskType>::~ASBSQueue() {
const int kSleepMicros = 1000;
for (;;) {
{
mutex_lock l(mu_);
if (num_enqueued_batches_ == 0) {
break;
}
}
scheduler_->GetEnv()->SleepForMicroseconds(kSleepMicros);
}
scheduler_->RemoveQueue(this);
}
template <typename TaskType>
Status ASBSQueue<TaskType>::Schedule(std::unique_ptr<TaskType>* task) {
size_t size = (*task)->size();
if (options_.split_input_task_func == nullptr &&
size > options_.max_batch_size) {
return errors::InvalidArgument("Task size ", size,
" is larger than maximum batch size ",
options_.max_batch_size);
}
if (options_.max_input_task_size.has_value() &&
(size > options_.max_input_task_size.value())) {
return errors::InvalidArgument("Task size ", size,
" is larger than max input task size ",
options_.max_input_task_size.value());
}
std::vector<std::unique_ptr<TaskType>> tasks_to_schedule;
std::vector<ASBSBatch<TaskType>*> new_batches;
bool closed_batch = false;
{
mutex_lock l(mu_);
if (size > SchedulingCapacityLocked()) {
return errors::Unavailable("The batch scheduling queue is full");
}
int remaining_batch_size =
current_batch_ == nullptr
? options_.max_batch_size
: options_.max_batch_size - current_batch_->size();
if (options_.split_input_task_func == nullptr ||
size <= remaining_batch_size) {
tasks_to_schedule.push_back(std::move(*task));
} else {
TF_RETURN_IF_ERROR(options_.split_input_task_func(
task, remaining_batch_size, options_.max_batch_size,
&tasks_to_schedule));
}
for (auto& task : tasks_to_schedule) {
if (current_batch_ &&
current_batch_->size() + task->size() > options_.max_batch_size) {
current_batch_->Close();
closed_batch = true;
current_batch_ = nullptr;
}
if (!current_batch_) {
num_enqueued_batches_++;
current_batch_ = new ASBSBatch<TaskType>(
this, scheduler_->GetEnv()->NowMicros(),
options_.batch_timeout_micros, NewTraceMeContextIdForBatch());
new_batches.push_back(current_batch_);
}
tsl::profiler::TraceMeProducer trace_me(
[task_size = task->size()] {
return profiler::TraceMeEncode(
"ASBSQueue::Schedule",
{{"batching_input_task_size", task_size}});
},
tsl::profiler::ContextType::kAdaptiveSharedBatchScheduler,
this->current_batch_->traceme_context_id());
current_batch_->AddTask(std::move(task));
num_enqueued_tasks_++;
bool reached_max_tasks =
(options_.max_tasks_per_batch.has_value() &&
current_batch_->num_tasks() >= options_.max_tasks_per_batch.value());
if (current_batch_->size() == options_.max_batch_size ||
reached_max_tasks) {
current_batch_->Close();
closed_batch = true;
current_batch_ = nullptr;
}
}
}
for (auto* batch : new_batches) {
scheduler_->AddBatch(batch);
}
if (closed_batch) {
scheduler_->MaybeScheduleClosedBatches();
}
return absl::OkStatus();
}
template <typename TaskType>
void ASBSQueue<TaskType>::ReleaseBatch(const ASBSBatch<TaskType>* batch) {
mutex_lock l(mu_);
num_enqueued_batches_--;
num_enqueued_tasks_ -= batch->num_tasks();
if (batch == current_batch_) {
current_batch_->Close();
current_batch_ = nullptr;
}
}
template <typename TaskType>
size_t ASBSQueue<TaskType>::NumEnqueuedTasks() const {
mutex_lock l(mu_);
return num_enqueued_tasks_;
}
template <typename TaskType>
size_t ASBSQueue<TaskType>::SchedulingCapacity() const {
mutex_lock l(mu_);
return SchedulingCapacityLocked();
}
template <typename TaskType>
size_t ASBSQueue<TaskType>::SchedulingCapacityLocked() const {
const int current_batch_capacity =
current_batch_ ? options_.max_batch_size - current_batch_->size() : 0;
const int spare_batches =
options_.max_enqueued_batches - num_enqueued_batches_;
return spare_batches * options_.max_batch_size + current_batch_capacity;
}
template <typename TaskType>
uint64 ASBSQueue<TaskType>::NewTraceMeContextIdForBatch() {
static std::atomic<uint64> traceme_context_id(0);
return traceme_context_id.fetch_add(1, std::memory_order_relaxed);
}
}
}
}
#endif | #include "tensorflow/core/kernels/batching_util/adaptive_shared_batch_scheduler.h"
#include "tensorflow/core/kernels/batching_util/fake_clock_env.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace serving {
namespace anonymous {
class FakeTask : public BatchTask {
public:
explicit FakeTask(size_t size) : size_(size) {}
~FakeTask() override = default;
size_t size() const override { return size_; }
void set_size(size_t size) { size_ = size; }
private:
size_t size_;
FakeTask(const FakeTask&) = delete;
void operator=(const FakeTask&) = delete;
};
Status ScheduleTask(size_t task_size, BatchScheduler<FakeTask>* scheduler) {
std::unique_ptr<FakeTask> task(new FakeTask(task_size));
Status status = scheduler->Schedule(&task);
CHECK_EQ(status.ok(), task == nullptr);
return status;
}
std::unique_ptr<Thread> CreateFakeClockAdvancerThread(
test_util::FakeClockEnv* env, Notification* start, Notification* stop) {
return std::unique_ptr<Thread>(Env::Default()->StartThread(
{}, "FakeClockAdvancerThread", [env, start, stop] {
start->WaitForNotification();
while (!stop->HasBeenNotified()) {
env->AdvanceByMicroseconds(10);
Env::Default()->SleepForMicroseconds(10);
}
}));
}
TEST(AdaptiveSharedBatchSchedulerTest, BadOptions) {
using Scheduler = AdaptiveSharedBatchScheduler<FakeTask>;
std::shared_ptr<Scheduler> scheduler;
Scheduler::Options options;
options.num_batch_threads = 0;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
options = Scheduler::Options();
options.initial_in_flight_batches_limit = 0.5;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
options = Scheduler::Options();
options.num_batch_threads = 5;
options.initial_in_flight_batches_limit = 8;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
options = Scheduler::Options();
options.batches_to_average_over = -5;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
options = Scheduler::Options();
options.min_in_flight_batches_limit = 0;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
options = Scheduler::Options();
options.min_in_flight_batches_limit = 5;
options.num_batch_threads = 3;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
options = Scheduler::Options();
options.initial_in_flight_batches_limit = 1;
options.min_in_flight_batches_limit = 2;
options.num_batch_threads = 3;
EXPECT_FALSE(Scheduler::Create(options, &scheduler).ok());
}
TEST(AdaptiveSharedBatchSchedulerTest, InFlightBatchesLimit) {
AdaptiveSharedBatchScheduler<FakeTask>::Options options;
options.initial_in_flight_batches_limit = 2;
options.batches_to_average_over = 1000;
mutex mu;
int processed_batches = 0;
Notification finish_processing;
auto queue_callback = [&mu, &processed_batches, &finish_processing](
std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
EXPECT_GT(batch->num_tasks(), 0);
mu.lock();
int batch_num = ++processed_batches;
mu.unlock();
if (batch_num == 2) {
Env::Default()->SleepForMicroseconds(1000);
finish_processing.Notify();
}
if (batch_num == 3) {
ASSERT_TRUE(finish_processing.HasBeenNotified());
}
finish_processing.WaitForNotification();
};
std::shared_ptr<AdaptiveSharedBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
AdaptiveSharedBatchScheduler<FakeTask>::Create(options, &scheduler));
std::unique_ptr<BatchScheduler<FakeTask>> queue;
TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue));
TF_ASSERT_OK(ScheduleTask(100, queue.get()));
while (queue->NumEnqueuedTasks() > 0) {
}
TF_ASSERT_OK(ScheduleTask(100, queue.get()));
while (queue->NumEnqueuedTasks() > 0) {
}
TF_ASSERT_OK(ScheduleTask(100, queue.get()));
}
TEST(AdaptiveSharedBatchSchedulerTest, InFlightBatchesLimitTuning) {
test_util::FakeClockEnv env(Env::Default());
Notification start_teardown, stop_teardown;
std::unique_ptr<Thread> teardown_thread =
CreateFakeClockAdvancerThread(&env, &start_teardown, &stop_teardown);
{
AdaptiveSharedBatchScheduler<FakeTask>::Options options;
options.env = &env;
options.initial_in_flight_batches_limit = 2;
options.batches_to_average_over = 1;
auto queue_callback = [&env](std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
switch (batch->size()) {
case 0:
env.AdvanceByMicroseconds(10);
break;
case 1:
env.AdvanceByMicroseconds(15);
break;
case 2:
env.AdvanceByMicroseconds(10);
break;
case 3:
env.AdvanceByMicroseconds(11);
break;
}
};
std::shared_ptr<AdaptiveSharedBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
AdaptiveSharedBatchScheduler<FakeTask>::Create(options, &scheduler));
std::unique_ptr<BatchScheduler<FakeTask>> queue;
TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue));
TF_ASSERT_OK(ScheduleTask(0, queue.get()));
double in_flight_batches_limit = 2;
while (scheduler->in_flight_batches_limit() == in_flight_batches_limit) {
}
EXPECT_LT(scheduler->in_flight_batches_limit(), in_flight_batches_limit);
in_flight_batches_limit = scheduler->in_flight_batches_limit();
TF_ASSERT_OK(ScheduleTask(1, queue.get()));
while (scheduler->in_flight_batches_limit() == in_flight_batches_limit) {
}
EXPECT_GT(scheduler->in_flight_batches_limit(), in_flight_batches_limit);
in_flight_batches_limit = scheduler->in_flight_batches_limit();
TF_ASSERT_OK(ScheduleTask(2, queue.get()));
while (scheduler->in_flight_batches_limit() == in_flight_batches_limit) {
}
EXPECT_GT(scheduler->in_flight_batches_limit(), in_flight_batches_limit);
in_flight_batches_limit = scheduler->in_flight_batches_limit();
TF_ASSERT_OK(ScheduleTask(3, queue.get()));
while (scheduler->in_flight_batches_limit() == in_flight_batches_limit) {
}
EXPECT_LT(scheduler->in_flight_batches_limit(), in_flight_batches_limit);
start_teardown.Notify();
}
stop_teardown.Notify();
}
TEST(AdaptiveSharedBatchSchedulerTest, FullBatchSchedulingBoostMicros) {
test_util::FakeClockEnv env(Env::Default());
Notification start_teardown, stop_teardown;
std::unique_ptr<Thread> teardown_thread =
CreateFakeClockAdvancerThread(&env, &start_teardown, &stop_teardown);
{
AdaptiveSharedBatchScheduler<FakeTask>::Options options;
options.env = &env;
options.initial_in_flight_batches_limit = 1;
options.num_batch_threads = 1;
options.batches_to_average_over = 1000;
options.full_batch_scheduling_boost_micros = 100;
mutex mu;
int processed_batches = 0;
Notification finish_processing;
auto queue_callback = [&mu, &processed_batches, &finish_processing](
std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
finish_processing.WaitForNotification();
mutex_lock l(mu);
processed_batches++;
switch (processed_batches) {
case 1:
EXPECT_EQ(100, batch->size());
break;
case 2:
EXPECT_EQ(50, batch->size());
break;
case 3:
EXPECT_EQ(900, batch->size());
break;
case 4:
EXPECT_EQ(200, batch->size());
break;
default:
EXPECT_TRUE(false) << "Should only have 4 batches";
}
};
std::shared_ptr<AdaptiveSharedBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
AdaptiveSharedBatchScheduler<FakeTask>::Create(options, &scheduler));
AdaptiveSharedBatchScheduler<FakeTask>::QueueOptions queue_options;
std::unique_ptr<BatchScheduler<FakeTask>> queue1;
std::unique_ptr<BatchScheduler<FakeTask>> queue2;
queue_options.max_batch_size = 1000;
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_callback, &queue1));
queue_options.max_batch_size = 100;
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_callback, &queue2));
TF_ASSERT_OK(ScheduleTask(100, queue1.get()));
while (queue1->NumEnqueuedTasks() > 0) {
}
TF_ASSERT_OK(ScheduleTask(100, queue1.get()));
env.AdvanceByMicroseconds(10);
TF_ASSERT_OK(ScheduleTask(100, queue1.get()));
env.AdvanceByMicroseconds(10);
TF_ASSERT_OK(ScheduleTask(50, queue2.get()));
env.AdvanceByMicroseconds(45);
TF_ASSERT_OK(ScheduleTask(900, queue1.get()));
finish_processing.Notify();
start_teardown.Notify();
}
stop_teardown.Notify();
}
TEST(AdaptiveSharedBatchSchedulerTest, FIFO) {
test_util::FakeClockEnv env(Env::Default());
Notification start_teardown, stop_teardown;
std::unique_ptr<Thread> teardown_thread =
CreateFakeClockAdvancerThread(&env, &start_teardown, &stop_teardown);
{
AdaptiveSharedBatchScheduler<FakeTask>::Options options;
options.env = &env;
options.initial_in_flight_batches_limit = 1;
options.num_batch_threads = 1;
options.batches_to_average_over = 1000;
options.full_batch_scheduling_boost_micros = 0;
options.fifo_scheduling = true;
mutex mu;
int processed_batches = 0;
Notification finish_processing;
auto queue_callback = [&mu, &processed_batches, &finish_processing](
std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
finish_processing.WaitForNotification();
mutex_lock l(mu);
processed_batches++;
switch (processed_batches) {
case 1:
EXPECT_EQ(100, batch->size());
break;
case 2:
EXPECT_EQ(200, batch->size());
break;
case 3:
EXPECT_EQ(50, batch->size());
break;
case 4:
EXPECT_EQ(900, batch->size());
break;
default:
EXPECT_TRUE(false) << "Should only have 4 batches";
}
};
std::shared_ptr<AdaptiveSharedBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
AdaptiveSharedBatchScheduler<FakeTask>::Create(options, &scheduler));
AdaptiveSharedBatchScheduler<FakeTask>::QueueOptions queue_options;
std::unique_ptr<BatchScheduler<FakeTask>> queue1;
std::unique_ptr<BatchScheduler<FakeTask>> queue2;
queue_options.max_batch_size = 1000;
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_callback, &queue1));
queue_options.max_batch_size = 100;
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_callback, &queue2));
TF_ASSERT_OK(ScheduleTask(100, queue1.get()));
env.AdvanceByMicroseconds(30);
TF_ASSERT_OK(ScheduleTask(100, queue1.get()));
env.AdvanceByMicroseconds(10);
TF_ASSERT_OK(ScheduleTask(100, queue1.get()));
env.AdvanceByMicroseconds(10);
TF_ASSERT_OK(ScheduleTask(50, queue2.get()));
env.AdvanceByMicroseconds(45);
TF_ASSERT_OK(ScheduleTask(900, queue1.get()));
finish_processing.Notify();
start_teardown.Notify();
}
stop_teardown.Notify();
}
TEST(AdaptiveSharedBatchSchedulerTest, DeleteQueue) {
AdaptiveSharedBatchScheduler<FakeTask>::Options options;
options.initial_in_flight_batches_limit = 1;
options.num_batch_threads = 1;
options.batches_to_average_over = 1000;
mutex mu;
int processed_batches = 0;
Notification finish_processing;
auto queue_callback = [&mu, &processed_batches, &finish_processing](
std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
EXPECT_GT(batch->num_tasks(), 0);
finish_processing.WaitForNotification();
mu.lock();
processed_batches++;
mu.unlock();
};
auto processed_checker = gtl::MakeCleanup([&mu, &processed_batches] {
mutex_lock l(mu);
EXPECT_EQ(processed_batches, 2);
});
std::shared_ptr<AdaptiveSharedBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
AdaptiveSharedBatchScheduler<FakeTask>::Create(options, &scheduler));
std::unique_ptr<BatchScheduler<FakeTask>> queue;
TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue));
TF_ASSERT_OK(ScheduleTask(100, queue.get()));
while (queue->NumEnqueuedTasks() > 0) {
}
TF_ASSERT_OK(ScheduleTask(100, queue.get()));
Env::Default()->SchedClosureAfter(
1000, [&finish_processing] { finish_processing.Notify(); });
}
TEST(AdaptiveSharedBatchSchedulerTest, QueueCapacityInfo) {
AdaptiveSharedBatchScheduler<FakeTask>::Options options;
options.initial_in_flight_batches_limit = 1;
options.batches_to_average_over = 1000;
mutex mu;
int processed_batches = 0;
Notification finish_processing;
auto queue_callback = [&mu, &processed_batches, &finish_processing](
std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
EXPECT_GT(batch->num_tasks(), 0);
mu.lock();
int batch_num = ++processed_batches;
mu.unlock();
if (batch_num == 1) {
finish_processing.WaitForNotification();
}
};
std::shared_ptr<AdaptiveSharedBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
AdaptiveSharedBatchScheduler<FakeTask>::Create(options, &scheduler));
std::unique_ptr<BatchScheduler<FakeTask>> queue;
TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue));
TF_ASSERT_OK(ScheduleTask(100, queue.get()));
while (queue->NumEnqueuedTasks() > 0) {
}
TF_ASSERT_OK(ScheduleTask(100, queue.get()));
EXPECT_EQ(queue->NumEnqueuedTasks(), 1);
EXPECT_EQ(queue->SchedulingCapacity(), 9 * 1000 + 900);
TF_ASSERT_OK(ScheduleTask(100, queue.get()));
TF_ASSERT_OK(ScheduleTask(200, queue.get()));
EXPECT_EQ(queue->NumEnqueuedTasks(), 3);
EXPECT_EQ(queue->SchedulingCapacity(), 9 * 1000 + 600);
TF_ASSERT_OK(ScheduleTask(700, queue.get()));
EXPECT_EQ(queue->NumEnqueuedTasks(), 1);
EXPECT_EQ(queue->SchedulingCapacity(), 9 * 1000 + 300);
finish_processing.Notify();
}
TEST(AdaptiveSharedBatchSchedulerTest, FullBatches) {
std::shared_ptr<AdaptiveSharedBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(AdaptiveSharedBatchScheduler<FakeTask>::Create({}, &scheduler));
auto queue_callback = [](std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
};
AdaptiveSharedBatchScheduler<FakeTask>::QueueOptions queue_options;
queue_options.max_batch_size = 100;
queue_options.batch_timeout_micros = 1000000000000;
std::unique_ptr<BatchScheduler<FakeTask>> queue;
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_callback, &queue));
TF_ASSERT_OK(ScheduleTask(100, queue.get()));
}
TEST(AdaptiveSharedBatchSchedulerTest, TruncateBatches) {
mutex mu;
int processed_batches = 0;
auto queue_callback =
[&mu, &processed_batches](std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
mutex_lock l(mu);
++processed_batches;
};
std::shared_ptr<AdaptiveSharedBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(AdaptiveSharedBatchScheduler<FakeTask>::Create({}, &scheduler));
std::unique_ptr<BatchScheduler<FakeTask>> queue;
AdaptiveSharedBatchScheduler<FakeTask>::QueueOptions queue_options;
queue_options.max_batch_size = 100;
queue_options.batch_timeout_micros = 1000000;
queue_options.split_input_task_func =
[](std::unique_ptr<FakeTask>* input_task, int first_size, int max_size,
std::vector<std::unique_ptr<FakeTask>>* output_tasks) {
EXPECT_EQ(first_size, 70);
output_tasks->push_back(std::move(*input_task));
int remaining_size = output_tasks->back()->size() - first_size;
output_tasks->back()->set_size(first_size);
while (remaining_size > 0) {
int task_size = std::min(remaining_size, max_size);
output_tasks->emplace_back(new FakeTask(task_size));
remaining_size -= task_size;
}
return absl::OkStatus();
};
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_callback, &queue));
TF_ASSERT_OK(ScheduleTask(30, queue.get()));
TF_ASSERT_OK(ScheduleTask(350, queue.get()));
while (true) {
mutex_lock l(mu);
if (processed_batches == 4) break;
}
}
TEST(AdaptiveSharedBatchSchedulerTest, MaxTasksPerBatch) {
mutex mu;
int processed_batches = 0;
auto queue_callback =
[&mu, &processed_batches](std::unique_ptr<Batch<FakeTask>> batch) {
ASSERT_TRUE(batch->IsClosed());
mutex_lock l(mu);
++processed_batches;
};
std::shared_ptr<AdaptiveSharedBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(AdaptiveSharedBatchScheduler<FakeTask>::Create({}, &scheduler));
std::unique_ptr<BatchScheduler<FakeTask>> queue;
AdaptiveSharedBatchScheduler<FakeTask>::QueueOptions queue_options;
queue_options.max_batch_size = 100;
queue_options.batch_timeout_micros = 1000000;
queue_options.max_tasks_per_batch = 2;
TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_callback, &queue));
TF_ASSERT_OK(ScheduleTask(10, queue.get()));
EXPECT_EQ(queue->NumEnqueuedTasks(), 1);
TF_ASSERT_OK(ScheduleTask(10, queue.get()));
EXPECT_EQ(queue->NumEnqueuedTasks(), 0);
TF_ASSERT_OK(ScheduleTask(10, queue.get()));
TF_ASSERT_OK(ScheduleTask(10, queue.get()));
TF_ASSERT_OK(ScheduleTask(10, queue.get()));
TF_ASSERT_OK(ScheduleTask(10, queue.get()));
while (true) {
mutex_lock l(mu);
if (processed_batches == 3) break;
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/adaptive_shared_batch_scheduler.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/adaptive_shared_batch_scheduler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2896c2b7-5f4b-4da3-aa65-c05452830b3a | cpp | tensorflow/tensorflow | mkl_quantized_conv_ops | tensorflow/core/kernels/mkl/mkl_quantized_conv_ops.h | tensorflow/core/kernels/mkl/mkl_quantized_conv_ops_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_MKL_MKL_QUANTIZED_CONV_OPS_H_
#define TENSORFLOW_CORE_KERNELS_MKL_MKL_QUANTIZED_CONV_OPS_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor.h"
#ifdef INTEL_MKL
namespace tensorflow {
template <class T>
float MklFloatForOneQuantizedLevel(float range_min, float range_max) {
int64 highest = static_cast<int64_t>(Eigen::NumTraits<T>::highest());
int64 lowest = static_cast<int64_t>(Eigen::NumTraits<T>::lowest());
if (lowest < -highest) ++lowest;
const float float_for_one_quantized_level =
(range_max - range_min) / (highest - lowest);
return float_for_one_quantized_level;
}
template <class T1, class T2, class T3>
void MklQuantizationRangeForMultiplication(float min_a, float max_a,
float min_b, float max_b,
float* min_c, float* max_c) {
const float a_float_for_one_quant_level =
MklFloatForOneQuantizedLevel<T1>(min_a, max_a);
const float b_float_for_one_quant_level =
MklFloatForOneQuantizedLevel<T2>(min_b, max_b);
const int64 c_highest = static_cast<int64_t>(Eigen::NumTraits<T3>::highest());
const int64 c_lowest = static_cast<int64_t>(Eigen::NumTraits<T3>::lowest());
const float c_float_for_one_quant_level =
a_float_for_one_quant_level * b_float_for_one_quant_level;
*min_c = c_float_for_one_quant_level * c_lowest;
*max_c = c_float_for_one_quant_level * c_highest;
}
template <class T1, class T2, class T3>
void MklQuantizationRangeForMultiplication(float min_a, float max_a,
const Tensor& min_b_vector,
const Tensor& max_b_vector,
Tensor** min_c_vector,
Tensor** max_c_vector) {
DCHECK(min_b_vector.NumElements() == (*min_c_vector)->NumElements());
DCHECK(max_b_vector.NumElements() == (*max_c_vector)->NumElements());
size_t n_channel = min_b_vector.NumElements();
const int64 c_highest = static_cast<int64_t>(Eigen::NumTraits<T3>::highest());
const int64 c_lowest = static_cast<int64_t>(Eigen::NumTraits<T3>::lowest());
const float* min_b = min_b_vector.flat<float>().data();
const float* max_b = max_b_vector.flat<float>().data();
float* min_c = (*min_c_vector)->flat<float>().data();
float* max_c = (*max_c_vector)->flat<float>().data();
#ifdef ENABLE_ONEDNN_OPENMP
#pragma omp parallel for
#endif
for (int64_t n = 0; n < n_channel; ++n) {
float a_float_for_one_quant_level =
MklFloatForOneQuantizedLevel<T1>(min_a, max_a);
float b_float_for_one_quant_level =
MklFloatForOneQuantizedLevel<T2>(min_b[n], max_b[n]);
float c_float_for_one_quant_level =
a_float_for_one_quant_level * b_float_for_one_quant_level;
min_c[n] = c_float_for_one_quant_level * c_lowest;
max_c[n] = c_float_for_one_quant_level * c_highest;
}
}
}
#endif
#endif | #if defined(INTEL_MKL) && defined(ENABLE_MKL)
#define EIGEN_USE_THREADS
#include <functional>
#include <memory>
#include <vector>
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/mkl/mkl_kernel_util.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/kernels/quantization_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session.h"
namespace tensorflow {
class QuantizedConv2DTest : public OpsTestBase {
protected:
template <typename Tinput>
void ConfigureQuantizedConv2D(const bool old_api, const int& stride,
const string& padding,
const std::vector<int> padding_values = {}) {
if (old_api) {
TF_ASSERT_OK(NodeDefBuilder("quantized_conv_op", "_MklQuantizedConv2D")
.Input(FakeInput(DataTypeToEnum<Tinput>::v()))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("Tinput", DataTypeToEnum<Tinput>::v())
.Attr("Tfilter", DataTypeToEnum<qint8>::v())
.Attr("out_type", DataTypeToEnum<qint32>::v())
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", padding)
.Attr("padding_list", padding_values)
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
} else {
TF_EXPECT_OK(
NodeDefBuilder("quantized_conv_op", "_FusedQuantizedConv2D")
.Attr("Thost_inputs", {DataTypeToEnum<Tinput>::v(), DT_QINT8,
DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT})
.Attr("Thost_outputs", {DT_QINT32, DT_FLOAT, DT_FLOAT})
.Attr("Tdevice_inputs", std::vector<DataType>())
.Attr("Tdevice_outputs", std::vector<DataType>())
.Attr("Tinput", DataTypeToEnum<Tinput>::v())
.Attr("Tfilter", DT_QINT8)
.Attr("Tsummand", DT_QINT32)
.Attr("out_type", DT_QINT32)
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", padding)
.Attr("explicit_paddings", padding_values)
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
}
TF_ASSERT_OK(InitOp());
}
void RunQuantizedDepthwiseConv2DOp(const bool& bias_enabled) {
const int depth = 2;
const int image_width = 2;
const int image_height = 3;
const int image_batch_count = 1;
AddInputFromArray<quint8>(
TensorShape({image_batch_count, image_height, image_width, depth}),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
const int filter_size = 3;
const int filter_count = 1;
AddInputFromArray<qint8>(
TensorShape({filter_size, filter_size, depth, filter_count}),
{1, 2, 7, 8, 13, 14, 3, 4, 9, 10, 15, 16, 5, 6, 11, 12, 17, 18});
if (bias_enabled) {
AddInputFromArray<float>(TensorShape({depth}), {1.0f, 1.0f});
}
AddInputFromArray<float>(TensorShape({}), {0.0f});
AddInputFromArray<float>(TensorShape({}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {-127.0f});
AddInputFromArray<float>(TensorShape({}), {127.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(DT_QINT32, TensorShape({image_batch_count, image_height,
image_width, depth}));
if (bias_enabled) {
test::FillValues<qint32>(&expected, {229, 301, 133, 181, 483, 597, 267,
345, 373, 453, 181, 237});
} else {
test::FillValues<qint32>(&expected, {228, 300, 132, 180, 482, 596, 266,
344, 372, 452, 180, 236});
}
const Tensor& output = *GetOutput(0);
test::ExpectTensorEqual<qint32>(expected, output);
}
void TestSmall(const bool old_api) {
const int stride = 1;
const string padding = "SAME";
ConfigureQuantizedConv2D<quint8>(old_api, stride, padding);
const int depth = 1;
const int image_width = 4;
const int image_height = 3;
const int image_batch_count = 1;
const float image_min = 0.0f;
const float image_max = 255.0f;
Tensor image_float(DT_FLOAT,
{image_batch_count, image_height, image_width, depth});
test::FillValues<float>(&image_float,
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
Tensor image_quantized =
FloatTensorToQuantized<quint8>(image_float, image_min, image_max);
const int filter_size = 3;
const int filter_count = 1;
const float filter_min = -127.0f;
const float filter_max = 127.0f;
Tensor filter_float(DT_FLOAT,
{filter_size, filter_size, depth, filter_count});
test::FillValues<float>(&filter_float, {1, 4, 7, 2, 5, 8, 3, 6, 9});
Tensor filter_quantized =
FloatTensorToQuantized<qint8>(filter_float, filter_min, filter_max);
AddInputFromArray<quint8>(image_quantized.shape(),
image_quantized.flat<quint8>());
AddInputFromArray<qint8>(filter_quantized.shape(),
filter_quantized.flat<qint8>());
AddInputFromArray<float>(TensorShape({}), {image_min});
AddInputFromArray<float>(TensorShape({}), {image_max});
AddInputFromArray<float>(TensorShape({}), {filter_min});
AddInputFromArray<float>(TensorShape({}), {filter_max});
TF_ASSERT_OK(RunOpKernel());
const int expected_width = image_width;
const int expected_height = image_height;
Tensor expected_float(DT_FLOAT,
TensorShape({image_batch_count, expected_height,
expected_width, filter_count}));
test::FillValues<float>(&expected_float, {105, 150, 183, 95, 235, 312, 357,
178, 187, 234, 261, 121});
const Tensor& output = *GetOutput(0);
const float output_min = GetOutput(1)->scalar<float>()();
const float output_max = GetOutput(2)->scalar<float>()();
Tensor output_float =
QuantizedTensorToFloat<qint32>(output, output_min, output_max);
test::ExpectTensorNear<float>(expected_float, output_float, 1.0);
}
void TestSmallS8(const bool old_api) {
const int stride = 1;
const int depth = 1;
const int image_width = 3;
const int image_height = 3;
const int image_batch_count = 1;
const float image_min = -127.0f;
const float image_max = 127.0f;
const string padding = "VALID";
ConfigureQuantizedConv2D<qint8>(old_api, stride, padding);
Tensor image_float(DT_FLOAT,
{image_batch_count, image_height, image_width, depth});
test::FillValues<float>(&image_float, {2, 3, 4, 6, -4, -2, 3, 0, 4});
Tensor image_quantized =
FloatTensorToQuantized<qint8>(image_float, image_min, image_max);
const int filter_size = 3;
const int filter_count = 1;
const float filter_min = -127.0f;
const float filter_max = 127.0f;
Tensor filter_float(DT_FLOAT,
{filter_size, filter_size, depth, filter_count});
test::FillValues<float>(&filter_float, {1, 4, 2, 0, 5, -1, 3, -1, -3});
Tensor filter_quantized =
FloatTensorToQuantized<qint8>(filter_float, filter_min, filter_max);
AddInputFromArray<qint8>(image_quantized.shape(),
image_quantized.flat<qint8>());
AddInputFromArray<qint8>(filter_quantized.shape(),
filter_quantized.flat<qint8>());
AddInputFromArray<float>(TensorShape({}), {image_min});
AddInputFromArray<float>(TensorShape({}), {image_max});
AddInputFromArray<float>(TensorShape({}), {filter_min});
AddInputFromArray<float>(TensorShape({}), {filter_max});
TF_ASSERT_OK(RunOpKernel());
const int expected_width = 1;
const int expected_height = 1;
Tensor expected_float(DT_FLOAT,
TensorShape({image_batch_count, expected_height,
expected_width, filter_count}));
test::FillValues<float>(&expected_float, {1});
const Tensor& output = *GetOutput(0);
const float output_min = GetOutput(1)->scalar<float>()();
const float output_max = GetOutput(2)->scalar<float>()();
Tensor output_float =
QuantizedTensorToFloat<qint32>(output, output_min, output_max);
test::ExpectTensorNear<float>(expected_float, output_float, 1.0);
}
void TestSmall32Bit(const bool old_api) {
const int stride = 1;
const string padding = "SAME";
ConfigureQuantizedConv2D<quint8>(old_api, stride, padding);
const int depth = 1;
const int image_width = 4;
const int image_height = 3;
const int image_batch_count = 1;
AddInputFromArray<quint8>(
TensorShape({image_batch_count, image_height, image_width, depth}),
{10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120});
const int filter_size = 3;
const int filter_count = 1;
AddInputFromArray<qint8>(
TensorShape({filter_size, filter_size, depth, filter_count}),
{10, 40, 70, 20, 50, 80, 30, 60, 90});
AddInputFromArray<float>(TensorShape({}), {0.0f});
AddInputFromArray<float>(TensorShape({}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {-127.0f});
AddInputFromArray<float>(TensorShape({}), {127.0f});
TF_ASSERT_OK(RunOpKernel());
const int expected_width = image_width;
const int expected_height = image_height;
Tensor expected(DT_QINT32, TensorShape({image_batch_count, expected_height,
expected_width, filter_count}));
test::FillValues<qint32>(
&expected, {10500, 15000, 18300, 9500, 23500, 31200, 35700, 17800,
18700, 23400, 26100, 12100});
const Tensor& output = *GetOutput(0);
test::ExpectTensorEqual<qint32>(expected, output);
}
void TestSmall32BitWithPadding(const bool old_api) {
const int stride = 1;
const string padding = "SAME";
ConfigureQuantizedConv2D<quint8>(old_api, stride, padding);
const int depth = 1;
const int image_width = 4;
const int image_height = 3;
const int image_batch_count = 1;
AddInputFromArray<quint8>(
TensorShape({image_batch_count, image_height, image_width, depth}),
{10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120});
const int filter_size = 3;
const int filter_count = 1;
AddInputFromArray<qint8>(
TensorShape({filter_size, filter_size, depth, filter_count}),
{10, 40, 70, 20, 50, 80, 30, 60, 90});
AddInputFromArray<float>(TensorShape({}), {0.0f});
AddInputFromArray<float>(TensorShape({}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {-127.0f});
AddInputFromArray<float>(TensorShape({}), {127.0f});
TF_ASSERT_OK(RunOpKernel());
const int expected_width = image_width;
const int expected_height = image_height;
Tensor expected(DT_QINT32, TensorShape({image_batch_count, expected_height,
expected_width, filter_count}));
test::FillValues<qint32>(
&expected, {10500, 15000, 18300, 9500, 23500, 31200, 35700, 17800,
18700, 23400, 26100, 12100});
const Tensor& output = *GetOutput(0);
test::ExpectTensorEqual<qint32>(expected, output);
}
void TestOddPadding(const bool old_api) {
const int stride = 2;
string padding = "SAME";
ConfigureQuantizedConv2D<quint8>(old_api, stride, padding);
const int depth = 1;
const int image_width = 4;
const int image_height = 4;
const int image_batch_count = 1;
AddInputFromArray<quint8>(
TensorShape({image_batch_count, image_height, image_width, depth}),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
const int filter_size = 3;
const int filter_count = 1;
AddInputFromArray<qint8>(
TensorShape({filter_size, filter_size, depth, filter_count}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<float>(TensorShape({}), {0.0f});
AddInputFromArray<float>(TensorShape({}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {-127.0f});
AddInputFromArray<float>(TensorShape({}), {127.0f});
TF_ASSERT_OK(RunOpKernel());
const int expected_width = image_width / stride;
const int expected_height = image_height / stride;
Tensor expected(DT_QINT32, TensorShape({image_batch_count, expected_height,
expected_width, filter_count}));
test::FillValues<qint32>(&expected, {348, 252, 274, 175});
const Tensor& output = *GetOutput(0);
test::ExpectTensorEqual<qint32>(expected, output);
}
void TestOddPaddingBatch(const bool old_api) {
const int stride = 2;
const string padding = "SAME";
ConfigureQuantizedConv2D<quint8>(old_api, stride, padding);
const int depth = 1;
const int image_width = 4;
const int image_height = 4;
const int image_batch_count = 3;
AddInputFromArray<quint8>(
TensorShape({image_batch_count, image_height, image_width, depth}),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
const int filter_size = 3;
const int filter_count = 1;
AddInputFromArray<qint8>(
TensorShape({filter_size, filter_size, depth, filter_count}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<float>(TensorShape({}), {0.0f});
AddInputFromArray<float>(TensorShape({}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {-127.0f});
AddInputFromArray<float>(TensorShape({}), {127.0f});
TF_ASSERT_OK(RunOpKernel());
const int expected_width = image_width / stride;
const int expected_height = image_height / stride;
Tensor expected(DT_QINT32, TensorShape({image_batch_count, expected_height,
expected_width, filter_count}));
test::FillValues<qint32>(&expected, {348, 252, 274, 175, 348, 252, 274, 175,
348, 252, 274, 175});
const Tensor& output = *GetOutput(0);
test::ExpectTensorEqual<qint32>(expected, output);
}
void TestDepthwiseConv2D(const bool old_api) {
const int stride = 1;
if (old_api) {
TF_ASSERT_OK(NodeDefBuilder("quantized_depthwise_conv_op",
"_MklQuantizedDepthwiseConv2D")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("Tinput", DataTypeToEnum<quint8>::v())
.Attr("Tfilter", DataTypeToEnum<qint8>::v())
.Attr("out_type", DataTypeToEnum<qint32>::v())
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", "SAME")
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
} else {
TF_EXPECT_OK(NodeDefBuilder("quantized_depthwise_conv_op",
"_FusedQuantizedDepthwiseConv2D")
.Attr("Thost_inputs", {DT_QUINT8, DT_QINT8, DT_FLOAT,
DT_FLOAT, DT_FLOAT, DT_FLOAT})
.Attr("Thost_outputs", {DT_QINT32, DT_FLOAT, DT_FLOAT})
.Attr("Tdevice_inputs", std::vector<DataType>())
.Attr("Tdevice_outputs", std::vector<DataType>())
.Attr("Tinput", DT_QUINT8)
.Attr("Tfilter", DT_QINT8)
.Attr("Tsummand", DT_QINT32)
.Attr("out_type", DT_QINT32)
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", "SAME")
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
}
TF_ASSERT_OK(InitOp());
RunQuantizedDepthwiseConv2DOp(false);
}
void TestDepthwiseConv2DWithBias(const bool old_api) {
const int stride = 1;
if (old_api) {
TF_ASSERT_OK(NodeDefBuilder("quantized_depthwise_conv_op",
"_MklQuantizedDepthwiseConv2DWithBias")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("Tinput", DataTypeToEnum<quint8>::v())
.Attr("Tfilter", DataTypeToEnum<qint8>::v())
.Attr("out_type", DataTypeToEnum<qint32>::v())
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", "SAME")
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
} else {
TF_EXPECT_OK(
NodeDefBuilder("quantized_depthwise_conv_op",
"_FusedQuantizedDepthwiseConv2D")
.Attr("Thost_inputs", {DT_QUINT8, DT_QINT8, DT_FLOAT, DT_FLOAT,
DT_FLOAT, DT_FLOAT, DT_FLOAT})
.Attr("Thost_outputs", {DT_QINT32, DT_FLOAT, DT_FLOAT})
.Attr("Tdevice_inputs", std::vector<DataType>())
.Attr("Tdevice_outputs", std::vector<DataType>())
.Attr("Tinput", DT_QUINT8)
.Attr("Tfilter", DT_QINT8)
.Attr("Tbias", DT_FLOAT)
.Attr("Tsummand", DT_QINT32)
.Attr("out_type", DT_QINT32)
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", "SAME")
.Attr("fused_ops", {"BiasAdd"})
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
}
TF_ASSERT_OK(InitOp());
RunQuantizedDepthwiseConv2DOp(true);
}
void TestDepthwiseConv2DWithBiasAndRelu(const bool old_api) {
const int stride = 1;
if (old_api) {
TF_ASSERT_OK(NodeDefBuilder("quantized_depthwise_conv_op",
"_MklQuantizedDepthwiseConv2DWithBiasAndRelu")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("Tinput", DataTypeToEnum<quint8>::v())
.Attr("Tfilter", DataTypeToEnum<qint8>::v())
.Attr("out_type", DataTypeToEnum<qint32>::v())
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", "SAME")
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
} else {
TF_EXPECT_OK(
NodeDefBuilder("quantized_depthwise_conv_op",
"_FusedQuantizedDepthwiseConv2D")
.Attr("Thost_inputs", {DT_QUINT8, DT_QINT8, DT_FLOAT, DT_FLOAT,
DT_FLOAT, DT_FLOAT, DT_FLOAT})
.Attr("Thost_outputs", {DT_QINT32, DT_FLOAT, DT_FLOAT})
.Attr("Tdevice_inputs", std::vector<DataType>())
.Attr("Tdevice_outputs", std::vector<DataType>())
.Attr("Tinput", DT_QUINT8)
.Attr("Tfilter", DT_QINT8)
.Attr("Tbias", DT_FLOAT)
.Attr("Tsummand", DT_QINT32)
.Attr("out_type", DT_QINT32)
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", "SAME")
.Attr("fused_ops", {"BiasAdd", "Relu"})
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
}
TF_ASSERT_OK(InitOp());
RunQuantizedDepthwiseConv2DOp(true);
}
};
TEST_F(QuantizedConv2DTest, SmallOldAPI) { TestSmall(true); }
TEST_F(QuantizedConv2DTest, SmallNewAPI) { TestSmall(false); }
TEST_F(QuantizedConv2DTest, SmallS8OldAPI) { TestSmallS8(true); }
TEST_F(QuantizedConv2DTest, SmallS8NewAPI) { TestSmallS8(false); }
TEST_F(QuantizedConv2DTest, Small32BitOldAPI) { TestSmall32Bit(true); }
TEST_F(QuantizedConv2DTest, Small32BitNewAPI) { TestSmall32Bit(false); }
TEST_F(QuantizedConv2DTest, Small32BitWithPaddingOldAPI) {
TestSmall32BitWithPadding(true);
}
TEST_F(QuantizedConv2DTest, Small32BitWithPaddingNewAPI) {
TestSmall32BitWithPadding(false);
}
TEST_F(QuantizedConv2DTest, OddPaddingOldAPI) { TestOddPadding(true); }
TEST_F(QuantizedConv2DTest, OddPaddingNewAPI) { TestOddPadding(false); }
TEST_F(QuantizedConv2DTest, OddPaddingBatchOldAPI) {
TestOddPaddingBatch(true);
}
TEST_F(QuantizedConv2DTest, OddPaddingBatchNewAPI) {
TestOddPaddingBatch(false);
}
TEST_F(QuantizedConv2DTest, DepthwiseConv2DOldAPI) {
TestDepthwiseConv2D(true);
}
TEST_F(QuantizedConv2DTest, DepthwiseConv2DNewAPI) {
TestDepthwiseConv2D(false);
}
TEST_F(QuantizedConv2DTest, DepthwiseConv2DWithBiasOldAPI) {
TestDepthwiseConv2DWithBias(true);
}
TEST_F(QuantizedConv2DTest, DepthwiseConv2DWithBiasNewAPI) {
TestDepthwiseConv2DWithBias(false);
}
TEST_F(QuantizedConv2DTest, DepthwiseConv2DWithBiasAndReluOldAPI) {
TestDepthwiseConv2DWithBiasAndRelu(true);
}
TEST_F(QuantizedConv2DTest, DepthwiseConv2DWithBiasAndReluNewAPI) {
TestDepthwiseConv2DWithBiasAndRelu(false);
}
class QuantizedConvTest : public OpsTestBase {
protected:
template <typename Tinput, typename Tfilter, typename Toutput,
typename Tbias = float, typename Tsummand = float>
void RunQuantizedKernel(Tensor& image_float, Tensor& filter_float,
Tensor& bias_float, Tensor& summand_float,
Tensor& expected_out_float,
const std::vector<string>& fused_ops,
const float tol = 1.0) {
bool fuse_bias = std::find(fused_ops.begin(), fused_ops.end(), "BiasAdd") !=
fused_ops.end();
bool fuse_sum =
std::find(fused_ops.begin(), fused_ops.end(), "Sum") != fused_ops.end();
bool fuse_requantize = std::find(fused_ops.begin(), fused_ops.end(),
"Requantize") != fused_ops.end();
float image_min, image_max;
MklTestingUtil::ComputeMinMax<float>(image_float, &image_min, &image_max);
const float image_max_abs =
std::max(std::abs(image_min), std::abs(image_max));
Tensor image_quantized;
MklTestingUtil::RunMklQuantizeOp(image_float, -image_max_abs, image_max_abs,
DataTypeToEnum<Tinput>::v(), "SCALED",
&image_quantized);
float filter_min, filter_max;
MklTestingUtil::ComputeMinMax<float>(filter_float, &filter_min,
&filter_max);
const float filter_max_abs =
std::max(std::abs(filter_min), std::abs(filter_max));
Tensor filter_quantized;
MklTestingUtil::RunMklQuantizeOp(
filter_float, -filter_max_abs, filter_max_abs,
DataTypeToEnum<Tfilter>::v(), "SCALED", &filter_quantized);
AddInputFromArray<Tinput>(image_quantized.shape(),
image_quantized.flat<Tinput>());
AddInputFromArray<Tfilter>(filter_quantized.shape(),
filter_quantized.flat<Tfilter>());
if (fuse_bias) {
if (std::is_same<Tbias, float>::value) {
AddInputFromArray<Tbias>(bias_float.shape(), bias_float.flat<Tbias>());
} else {
float bias_min, bias_max;
MklTestingUtil::ComputeMinMax<float>(bias_float, &bias_min, &bias_max);
const float bias_max_abs =
std::max(std::abs(bias_min), std::abs(bias_max));
Tensor bias_quantized;
MklTestingUtil::RunMklQuantizeOp(
bias_float, -bias_max_abs, bias_max_abs, DataTypeToEnum<Tbias>::v(),
"SCALED", &bias_quantized);
AddInputFromArray<Tbias>(bias_quantized.shape(),
bias_quantized.flat<Tbias>());
}
}
bool is_quantized_summand = false;
float summand_max_abs = 0;
if (fuse_sum) {
if (std::is_same<Tsummand, float>::value) {
AddInputFromArray<Tsummand>(summand_float.shape(),
summand_float.flat<Tsummand>());
} else {
is_quantized_summand = true;
float summand_min, summand_max;
MklTestingUtil::ComputeMinMax<float>(summand_float, &summand_min,
&summand_max);
summand_max_abs =
std::max(std::abs(summand_min), std::abs(summand_max));
Tensor summand_quantized;
MklTestingUtil::RunMklQuantizeOp(
summand_float, -summand_max_abs, summand_max_abs,
DataTypeToEnum<Tsummand>::v(), "SCALED", &summand_quantized);
AddInputFromArray<Tsummand>(summand_quantized.shape(),
summand_quantized.flat<Tsummand>());
}
}
AddInputFromArray<float>(TensorShape({}), {-image_max_abs});
AddInputFromArray<float>(TensorShape({}), {image_max_abs});
AddInputFromArray<float>(TensorShape({}), {-filter_max_abs});
AddInputFromArray<float>(TensorShape({}), {filter_max_abs});
if (is_quantized_summand) {
AddInputFromArray<float>(TensorShape({}), {-summand_max_abs});
AddInputFromArray<float>(TensorShape({}), {summand_max_abs});
}
if (fuse_requantize) {
float expected_output_min, expected_output_max;
MklTestingUtil::ComputeMinMax<float>(
expected_out_float, &expected_output_min, &expected_output_max);
const float output_max_abs = std::max(std::abs(expected_output_min),
std::abs(expected_output_max));
AddInputFromArray<float>(TensorShape({}), {-output_max_abs});
AddInputFromArray<float>(TensorShape({}), {output_max_abs});
}
TF_ASSERT_OK(RunOpKernel());
const Tensor& output = *GetOutput(0);
const Tensor& output_min = *GetOutput(1);
const Tensor& output_max = *GetOutput(2);
const float output_max_value = output_max.scalar<float>()();
Tensor output_float;
MklTestingUtil::RunDequantizeOp(output, output_min, output_max, "SCALED",
&output_float);
if (std::is_same<Tsummand, qint8>::value &&
std::is_same<Toutput, quint8>::value) {
for (int i = 0; i < expected_out_float.NumElements(); i++) {
float* expected_data =
const_cast<float*>(expected_out_float.flat<float>().data());
expected_data[i] =
std::min(expected_data[i], output_max_value * 127.0f / 255.0f);
}
}
test::ExpectTensorNear<float>(expected_out_float, output_float, tol);
}
void RunFloatConv(const Tensor& input_data, const Tensor& filter_data,
const Tensor& bias_data, const Tensor& summand_data,
Tensor* output, const bool is_depthwise,
const std::vector<string>& fused_ops, const string padding,
const int stride) {
auto root = tensorflow::Scope::NewRootScope();
auto input_data_op =
ops::Const(root.WithOpName("input"), Input::Initializer(input_data));
Output out_op;
if (is_depthwise) {
out_op = ops::DepthwiseConv2dNative(
root.WithOpName("conv"), input_data_op,
ops::Const(root.WithOpName("filter"),
Input::Initializer(filter_data)),
{1, stride, stride, 1}, padding);
} else {
out_op = ops::Conv2D(root.WithOpName("conv"), input_data_op,
ops::Const(root.WithOpName("filter"),
Input::Initializer(filter_data)),
{1, stride, stride, 1}, padding);
}
string last_op = "";
for (int i = 0; i < fused_ops.size(); ++i) {
if (fused_ops[i] == "BiasAdd") {
last_op = "with_bias";
out_op = ops::BiasAdd(
root.WithOpName(last_op), out_op,
ops::Const(root.WithOpName("bias"), Input::Initializer(bias_data)));
}
if (fused_ops[i] == "Sum") {
last_op = "with_sum";
out_op = ops::AddV2(root.WithOpName(last_op), out_op,
ops::Const(root.WithOpName("summand"),
Input::Initializer(summand_data)));
}
if (fused_ops[i] == "Relu") {
last_op = "with_relu";
out_op = ops::Relu(root.WithOpName(last_op), out_op);
}
}
tensorflow::GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
MklTestingUtil::RunGraph(graph_def, last_op, output);
}
template <typename Tinput, typename Toutput>
void TestBiasAddFusion(bool fuse_requantize, const bool is_depthwise,
string activation = "", const float tol = 1.0) {
const int stride = 1;
const string padding = "VALID";
std::vector<string> fused_ops = {"BiasAdd"};
std::map<string, DataType> data_types = {
{"Tinput", DataTypeToEnum<Tinput>::v()},
{"Tfilter", DT_QINT8},
{"Tbias", DT_FLOAT},
{"Tsummand", DataTypeToEnum<Toutput>::v()},
{"out_type", DataTypeToEnum<Toutput>::v()}};
std::vector<DataType> input_types = {data_types["Tinput"],
data_types["Tfilter"],
data_types["Tbias"],
DT_FLOAT,
DT_FLOAT,
DT_FLOAT,
DT_FLOAT};
if (!activation.empty()) {
fused_ops.push_back(activation);
}
if (fuse_requantize) {
fused_ops.push_back("Requantize");
input_types.push_back(DT_FLOAT);
input_types.push_back(DT_FLOAT);
}
TF_EXPECT_OK(
NodeDefBuilder("quantized_conv_op",
is_depthwise ? "_FusedQuantizedDepthwiseConv2D"
: "_FusedQuantizedConv2D")
.Attr("Thost_inputs", input_types)
.Attr("Thost_outputs", {data_types["out_type"], DT_FLOAT, DT_FLOAT})
.Attr("Tdevice_inputs", std::vector<DataType>())
.Attr("Tdevice_outputs", std::vector<DataType>())
.Attr("Tinput", data_types["Tinput"])
.Attr("Tfilter", data_types["Tfilter"])
.Attr("Tbias", data_types["Tbias"])
.Attr("Tsummand", data_types["Tsummand"])
.Attr("out_type", data_types["out_type"])
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", padding)
.Attr("fused_ops", fused_ops)
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const int image_batch = 1;
const int image_height = 6;
const int image_width = 6;
const int channels = 2;
const int filter_height = 2;
const int filter_width = 2;
const int filter_out_channels = 2;
Tensor image_float(DT_FLOAT,
{image_batch, image_height, image_width, channels});
test::FillValues<float>(
&image_float, {4, 3, 1, 0, 4, 6, 3, 1, 2, 1, 0, 2, 6, 2, 1, 3, 1, 3,
6, 1, 2, 5, 3, 2, 3, 4, 1, 4, 0, 3, 3, 1, 2, 0, 1, 1,
3, 3, 1, 0, 2, 1, 4, 3, 3, 2, 1, 4, 1, 0, 2, 2, 5, 0,
3, 3, 3, 1, 0, 2, 2, 1, 3, 2, 6, 3, 4, 6, 0, 1, 3, 5});
Tensor filter_float(
DT_FLOAT, {filter_height, filter_width, channels, filter_out_channels});
test::FillValues<float>(
&filter_float, {-2, -3, 0, 3, 1, -1, 4, 2, -3, -2, -4, 0, 4, 3, 1, 2});
Tensor bias_float(DT_FLOAT, {is_depthwise ? channels * filter_out_channels
: filter_out_channels});
if (is_depthwise) {
test::FillValues<float>(&bias_float, {1, 2, 1, 2});
} else {
test::FillValues<float>(&bias_float, {1, 2});
}
Tensor expected_float, dummy_summand;
RunFloatConv(image_float, filter_float, bias_float, dummy_summand,
&expected_float, is_depthwise, fused_ops, padding, stride);
RunQuantizedKernel<Tinput, qint8, Toutput, float>(
image_float, filter_float, bias_float, dummy_summand, expected_float,
fused_ops, tol);
}
template <typename Tsummand, typename Toutput>
void TestBiasAddSumActivationFusion(string activation = "") {
const int stride = 1;
const string padding = "VALID";
std::vector<string> fused_ops = {"BiasAdd", "Sum"};
std::map<string, DataType> data_types = {
{"Tinput", DT_QINT8},
{"Tfilter", DT_QINT8},
{"Tbias", DT_FLOAT},
{"Tsummand", DataTypeToEnum<Tsummand>::v()},
{"out_type", DataTypeToEnum<Toutput>::v()}};
std::vector<DataType> input_types = {data_types["Tinput"],
data_types["Tfilter"],
data_types["Tbias"],
data_types["Tsummand"],
DT_FLOAT,
DT_FLOAT,
DT_FLOAT,
DT_FLOAT};
if (std::is_same<Tsummand, quint8>::value ||
std::is_same<Tsummand, qint8>::value) {
input_types.push_back(DT_FLOAT);
input_types.push_back(DT_FLOAT);
}
if (!activation.empty()) {
fused_ops.push_back(activation);
}
if (std::is_same<Toutput, qint8>::value ||
std::is_same<Toutput, quint8>::value) {
fused_ops.push_back("Requantize");
input_types.push_back(DT_FLOAT);
input_types.push_back(DT_FLOAT);
}
TF_EXPECT_OK(
NodeDefBuilder("quantized_conv_op", "_FusedQuantizedConv2D")
.Attr("Thost_inputs", input_types)
.Attr("Thost_outputs", {data_types["out_type"], DT_FLOAT, DT_FLOAT})
.Attr("Tdevice_inputs", std::vector<DataType>())
.Attr("Tdevice_outputs", std::vector<DataType>())
.Attr("Tinput", data_types["Tinput"])
.Attr("Tfilter", data_types["Tfilter"])
.Attr("Tbias", data_types["Tbias"])
.Attr("Tsummand", data_types["Tsummand"])
.Attr("out_type", data_types["out_type"])
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", padding)
.Attr("fused_ops", fused_ops)
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const int image_batch = 1;
const int image_height = 4;
const int image_width = 4;
const int channels = 2;
const int filter_height = 2;
const int filter_width = 2;
const int filter_out_channels = 2;
Tensor image_float(DT_FLOAT,
{image_batch, image_height, image_width, channels});
test::FillValues<float>(&image_float,
{2, 4, 5, 6, 1, 2, 3, 0, 1, 1, 6, 2, 6, 2, 4, 1,
3, 4, 3, 1, 1, 4, 0, 7, 3, 1, 5, 0, 2, 1, 3, 3});
Tensor filter_float(
DT_FLOAT, {filter_height, filter_width, channels, filter_out_channels});
test::FillValues<float>(
&filter_float, {1, -3, 0, 2, 3, -4, 0, 5, 2, 1, -1, -2, -5, 3, 4, 0});
Tensor bias_float(DT_FLOAT, {filter_out_channels});
test::FillValues<float>(&bias_float, {2, 4});
Tensor summand_float(DT_FLOAT, {1, 3, 3, 2});
test::FillValues<float>(
&summand_float, {1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Tensor expected_float;
RunFloatConv(image_float, filter_float, bias_float, summand_float,
&expected_float, false, fused_ops, padding,
stride);
RunQuantizedKernel<qint8, qint8, Toutput, float, Tsummand>(
image_float, filter_float, bias_float, summand_float, expected_float,
fused_ops);
}
};
TEST_F(QuantizedConvTest, BiasAddFusion) {
TestBiasAddFusion<qint8, qint32>(false, false);
}
TEST_F(QuantizedConvTest, BiasAddRequantizeFusion) {
TestBiasAddFusion<qint8, qint8>(true, false);
}
TEST_F(QuantizedConvTest, BiasAddReluRequantizeFusion) {
TestBiasAddFusion<qint8, qint8>(true, false, "Relu");
}
TEST_F(QuantizedConvTest, UnsignedInputBiasAddReluRequantizeFusion) {
TestBiasAddFusion<quint8, quint8>(true, false, "Relu", 4.0);
}
TEST_F(QuantizedConvTest, DWBiasAddFusion) {
TestBiasAddFusion<qint8, qint32>(false, true);
}
TEST_F(QuantizedConvTest, DWBiasAddRequantizeFusion) {
TestBiasAddFusion<qint8, qint8>(true, true);
}
TEST_F(QuantizedConvTest, DWBiasAddReluRequantizeFusion) {
TestBiasAddFusion<qint8, qint8>(true, true, "Relu");
}
TEST_F(QuantizedConvTest, DWUnsignedInputBiasAddReluRequantizeFusion) {
TestBiasAddFusion<quint8, quint8>(true, true, "Relu", 4.0);
}
TEST_F(QuantizedConvTest, BiasAddSumReluRequantizeFusion) {
TestBiasAddSumActivationFusion<quint8, quint8>("Relu");
}
TEST_F(QuantizedConvTest, BiasAddSumReluRequantizeFusionSignedSummand) {
TestBiasAddSumActivationFusion<qint8, quint8>("Relu");
}
TEST_F(QuantizedConvTest, BiasAddSumReluFusionFloatSummand) {
TestBiasAddSumActivationFusion<float, qint32>("Relu");
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_quantized_conv_ops.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_quantized_conv_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
13a3f2e4-d5ef-4e08-b0cb-49f8f2fdec84 | cpp | tensorflow/tensorflow | exec_on_stall | tensorflow/core/util/exec_on_stall.h | tensorflow/core/util/exec_on_stall_test.cc | #ifndef TENSORFLOW_CORE_UTIL_EXEC_ON_STALL_H_
#define TENSORFLOW_CORE_UTIL_EXEC_ON_STALL_H_
#include <functional>
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/mutex.h"
namespace tensorflow {
class ExecuteOnStall {
public:
ExecuteOnStall(int delay_secs, std::function<void()> f,
int32_t poll_microseconds = 100)
: disabled_(false),
joined_(false),
env_(Env::Default()),
f_(f),
poll_microseconds_(poll_microseconds) {
deadline_ = env_->NowMicros() + 1000000 * delay_secs;
env_->SchedClosure([this]() {
while (env_->NowMicros() < deadline_) {
{
mutex_lock l(mu_);
if (disabled_) {
break;
}
}
env_->SleepForMicroseconds(poll_microseconds_);
}
{
mutex_lock l(mu_);
if (!disabled_) {
f_();
}
joined_ = true;
cond_var_.notify_all();
}
});
}
~ExecuteOnStall() {
mutex_lock l(mu_);
disabled_ = true;
if (!joined_) {
cond_var_.wait(l);
}
}
private:
mutex mu_;
condition_variable cond_var_;
bool disabled_ TF_GUARDED_BY(mu_);
bool joined_ TF_GUARDED_BY(mu_);
Env* env_;
std::function<void()> f_;
int64_t deadline_;
int32 poll_microseconds_;
};
}
#endif | #include "tensorflow/core/util/exec_on_stall.h"
#include <functional>
#include <memory>
#include <utility>
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
struct Chunk {
std::unique_ptr<ExecuteOnStall> stall_closure;
};
Chunk* NewChunk(int stall_seconds, std::function<void()> f) {
Chunk* c = new Chunk;
c->stall_closure =
std::make_unique<ExecuteOnStall>(stall_seconds, std::move(f));
return c;
}
TEST(ExecuteOnStallTest, BothWays) {
mutex mu;
bool a_triggered(false);
bool b_triggered(false);
Chunk* a = NewChunk(1, [&mu, &a_triggered]() {
mutex_lock l(mu);
a_triggered = true;
});
Chunk* b = NewChunk(1, [&mu, &b_triggered]() {
mutex_lock l(mu);
b_triggered = true;
});
delete a;
Env::Default()->SleepForMicroseconds(2000000);
{
mutex_lock l(mu);
EXPECT_FALSE(a_triggered);
EXPECT_TRUE(b_triggered);
}
delete b;
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/exec_on_stall.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/exec_on_stall_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
511f9d16-bf58-4913-a3e8-d40d6b2c0fe5 | cpp | tensorflow/tensorflow | mkl_heuristics | tensorflow/core/util/mkl_heuristics.h | tensorflow/core/util/mkl_heuristics_test.cc | #ifndef TENSORFLOW_CORE_UTIL_MKL_HEURISTICS_H_
#define TENSORFLOW_CORE_UTIL_MKL_HEURISTICS_H_
#ifdef INTEL_MKL
#include <vector>
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/graph.h"
#include "tsl/platform/cpu_info.h"
namespace tensorflow {
struct RewriteThreshold {
std::string op;
int cpu_family;
int cpu_model_num;
struct PerformanceParameters {
double thread_sync_cost;
double framework_cost;
} params;
};
static const RewriteThreshold rewrite_thresholds[] = {
#ifdef DNNL_AARCH64_USE_ACL
{"Conv2D", 0x41, 0xd40, {0.9349, 22.603}},
{"_FusedConv2D", 0x41, 0xd40, {0.9349, 22.603}},
{"FusedBatchNormV3", 0x41, 0xd40, {0.3223, -0.8822}},
{"Sigmoid", 0x41, 0xd40, {0.0, 0.064736}},
#endif
{"", 0x0, 0x0, {0, 0}}};
static double FindRewriteThreshold(const string node_name, int threads) {
int cpu_family_ = tsl::port::CPUFamily();
int cpu_model_num_ = tsl::port::CPUModelNum();
if (threads == 0) {
return 0;
}
for (const RewriteThreshold* i = rewrite_thresholds;
i->op != "" && threads > 0; i++) {
if (node_name == i->op && cpu_family_ == i->cpu_family &&
cpu_model_num_ == i->cpu_model_num) {
return i->params.thread_sync_cost * threads + i->params.framework_cost;
}
}
return 0;
}
static double CalculateNodeMFlops(const AttrSlice& attrs,
const string node_name) {
std::vector<const TensorShapeProto*> shape_attrs;
if (!TryGetNodeAttr(attrs, "_input_shapes", &shape_attrs)) {
return -1;
}
if ((node_name == "Conv2D" || node_name == "_FusedConv2D") &&
shape_attrs.size() == 2) {
TensorShape input_shape, filter_shape;
if (TensorShape::BuildTensorShape(*shape_attrs[0], &input_shape) !=
tsl::OkStatus()) {
return -1;
}
if (TensorShape::BuildTensorShape(*shape_attrs[1], &filter_shape) !=
tsl::OkStatus()) {
return -1;
}
return input_shape.dim_size(0) * input_shape.dim_size(1) *
input_shape.dim_size(2) * input_shape.dim_size(3) *
filter_shape.dim_size(0) * filter_shape.dim_size(1) *
filter_shape.dim_size(3) / (double)1e6;
} else if ((node_name == "FusedBatchNormV3" || node_name == "Sigmoid") &&
shape_attrs.size() >= 1) {
TensorShape input_shape;
if (TensorShape::BuildTensorShape(*shape_attrs[0], &input_shape) !=
tsl::OkStatus()) {
return -1;
}
return input_shape.dim_size(0) * input_shape.dim_size(1) *
input_shape.dim_size(2) * input_shape.dim_size(3) / (double)1e6;
}
return -1;
}
static bool MatMulHeuristic(const Node* n) {
if (!tsl::port::TestAarch64CPU(tsl::port::Aarch64CPU::ARM_NEOVERSE_V1)) {
return true;
}
std::vector<const TensorShapeProto*> shape_attrs;
if (!TryGetNodeAttr(n->attrs(), "_input_shapes", &shape_attrs)) {
return true;
}
if ((n->type_string() == "MatMul" || n->type_string() == "_FusedMatMul")) {
TensorShape lhs_shape, rhs_shape;
if (TensorShape::BuildTensorShape(*shape_attrs[0], &lhs_shape) !=
tsl::OkStatus()) {
return true;
}
if (TensorShape::BuildTensorShape(*shape_attrs[1], &rhs_shape) !=
tsl::OkStatus()) {
return true;
}
auto M = lhs_shape.dim_size(0);
auto K = lhs_shape.dim_size(1);
auto N = rhs_shape.dim_size(1);
auto ops = M * N * K;
std::array<int, 3> n_threshold = {7560, 250, 1536};
std::array<int, 2> m_threshold = {378, 80};
std::array<int, 2> ops_threshold = {5242880, 1090519040};
if (N <= n_threshold.at(0)) {
if (ops <= ops_threshold.at(0)) {
if (M <= m_threshold.at(0)) {
return false;
} else {
if (N <= n_threshold.at(1)) {
return false;
} else {
return true;
}
}
} else {
if (M <= m_threshold.at(1)) {
if (N <= n_threshold.at(2)) {
return true;
} else {
return false;
}
} else {
if (ops <= ops_threshold.at(1)) {
return true;
} else {
return false;
}
}
}
} else {
return false;
}
}
return true;
}
}
#endif
#endif | #ifdef INTEL_MKL
#define EIGEN_USE_THREADS
#include "tensorflow/core/util/mkl_heuristics.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(MklHeuristicsTest, MklCalculateMFlops) {
int batch = 8;
int width = 32;
int height = 32;
int in_depth = 3;
int filter_h = 3;
int filter_w = 3;
int out_depth = 64;
AttrValue attr_input_shape;
TensorShapeProto* proto = attr_input_shape.mutable_list()->add_shape();
proto->add_dim()->set_size(batch);
proto->add_dim()->set_size(width);
proto->add_dim()->set_size(height);
proto->add_dim()->set_size(in_depth);
proto = attr_input_shape.mutable_list()->add_shape();
proto->add_dim()->set_size(filter_h);
proto->add_dim()->set_size(filter_w);
proto->add_dim()->set_size(in_depth);
proto->add_dim()->set_size(out_depth);
NodeDef ndef;
double calculated_empty_mflops =
CalculateNodeMFlops(AttrSlice(ndef), "Conv2D");
EXPECT_EQ(calculated_empty_mflops, -1);
(*ndef.mutable_attr())["_input_shapes"] = attr_input_shape;
double conv_calculated_mflops =
CalculateNodeMFlops(AttrSlice(ndef), "Conv2D");
double expected_conv_mflops = batch * width * height * in_depth * filter_h *
filter_w * out_depth / static_cast<double>(1e6);
EXPECT_EQ(conv_calculated_mflops, expected_conv_mflops);
double fused_calculated_mflops =
CalculateNodeMFlops(AttrSlice(ndef), "_FusedConv2D");
EXPECT_EQ(conv_calculated_mflops, expected_conv_mflops);
double sigmoid_calculated_mflops =
CalculateNodeMFlops(AttrSlice(ndef), "Sigmoid");
double expected_sigmoid_mflops =
batch * width * height * in_depth / static_cast<double>(1e6);
EXPECT_EQ(sigmoid_calculated_mflops, expected_sigmoid_mflops);
}
#ifdef DNNL_AARCH64_USE_ACL
TEST(MklHeuristicsTest, MklThresholds) {
int cpu_family = tsl::port::CPUFamily();
int cpu_model_num = tsl::port::CPUModelNum();
int neoverse_v1_family = 0x41;
int neoverse_v1_model = 0xd40;
string op_type = "Conv2D";
if (neoverse_v1_family == cpu_family && neoverse_v1_model == cpu_model_num) {
double thread_sync_cost = -1;
double framework_cost = -1;
for (const RewriteThreshold* i = rewrite_thresholds; i->op != ""; i++) {
if (i->op == op_type) {
thread_sync_cost = i->params.thread_sync_cost;
framework_cost = i->params.framework_cost;
break;
}
}
EXPECT_NE(thread_sync_cost, -1);
EXPECT_NE(thread_sync_cost, -1);
int no_threads = 0;
double calculated_threshold_zero_threads =
FindRewriteThreshold(op_type, no_threads);
EXPECT_EQ(calculated_threshold_zero_threads, 0);
int threads = 8;
double calculated_threshold = FindRewriteThreshold(op_type, threads);
double expected_threshold = threads * thread_sync_cost + framework_cost;
EXPECT_EQ(expected_threshold, calculated_threshold);
}
}
#endif
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/mkl_heuristics.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/mkl_heuristics_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0c1c9d13-d0f8-48d2-8b19-bb7cb1d46d0c | cpp | tensorflow/tensorflow | reffed_status_callback | tensorflow/core/util/reffed_status_callback.h | tensorflow/core/util/reffed_status_callback_test.cc | #ifndef TENSORFLOW_CORE_UTIL_REFFED_STATUS_CALLBACK_H_
#define TENSORFLOW_CORE_UTIL_REFFED_STATUS_CALLBACK_H_
#include <utility>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/lib/core/refcount.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/mutex.h"
namespace tensorflow {
class ReffedStatusCallback : public core::RefCounted {
public:
explicit ReffedStatusCallback(StatusCallback done) : done_(std::move(done)) {}
void UpdateStatus(const Status& s) {
mutex_lock lock(mu_);
status_group_.Update(s);
}
bool ok() {
tf_shared_lock lock(mu_);
return status_group_.ok();
}
Status status() {
tf_shared_lock lock(mu_);
return status_group_.as_summary_status();
}
~ReffedStatusCallback() override { done_(status_group_.as_summary_status()); }
private:
StatusCallback done_;
mutex mu_;
StatusGroup status_group_ TF_GUARDED_BY(mu_);
};
}
#endif | #include "tensorflow/core/util/reffed_status_callback.h"
#include <atomic>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
TEST(TestReffedStatusCallback, CallsBackOK) {
bool called = false;
Status status = absl::InvalidArgumentError("");
auto done = [&called, &status](const Status& s) {
called = true;
status = s;
};
auto* cb = new ReffedStatusCallback(std::move(done));
EXPECT_FALSE(called);
cb->Unref();
EXPECT_TRUE(called);
EXPECT_TRUE(status.ok());
}
TEST(TestReffedStatusCallback, CallsBackFail) {
bool called = false;
Status status = absl::OkStatus();
auto done = [&called, &status](const Status& s) {
called = true;
status = s;
};
auto* cb = new ReffedStatusCallback(std::move(done));
cb->UpdateStatus(absl::InternalError("1"));
cb->UpdateStatus(absl::InvalidArgumentError("2"));
EXPECT_FALSE(called);
cb->Unref();
EXPECT_TRUE(called);
EXPECT_THAT(status.code(),
::testing::AnyOf(error::INTERNAL, error::INVALID_ARGUMENT));
EXPECT_TRUE(absl::StrContains(status.message(), "1"));
EXPECT_TRUE(absl::StrContains(status.message(), "2"));
}
TEST(TestReffedStatusCallback, RefMulti) {
int called = false;
Status status = absl::OkStatus();
auto done = [&called, &status](const Status& s) {
called = true;
status = s;
};
auto* cb = new ReffedStatusCallback(std::move(done));
cb->Ref();
cb->UpdateStatus(absl::InternalError("1"));
cb->Ref();
cb->UpdateStatus(absl::InternalError("2"));
cb->Unref();
cb->Unref();
EXPECT_FALSE(called);
cb->Unref();
EXPECT_TRUE(called);
EXPECT_TRUE(absl::StrContains(status.message(), "1"));
EXPECT_TRUE(absl::StrContains(status.message(), "2"));
}
TEST(TestReffedStatusCallback, MultiThreaded) {
std::atomic<int> num_called(0);
Status status;
Notification n;
auto done = [&num_called, &status, &n](const Status& s) {
++num_called;
status = s;
n.Notify();
};
auto* cb = new ReffedStatusCallback(std::move(done));
thread::ThreadPool threads(Env::Default(), "test", 3);
for (int i = 0; i < 5; ++i) {
cb->Ref();
threads.Schedule([cb]() {
cb->UpdateStatus(absl::InvalidArgumentError("err"));
cb->Unref();
});
}
cb->Unref();
n.WaitForNotification();
EXPECT_EQ(num_called.load(), 1);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(absl::StrContains(status.message(), "err"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/reffed_status_callback.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/reffed_status_callback_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
eb44438c-84cc-487a-a607-7f89b0335637 | cpp | tensorflow/tensorflow | mkl_util | tensorflow/core/util/mkl_util.h | tensorflow/core/util/mkl_util_test.cc | #ifndef TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
#define TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
#ifdef INTEL_MKL
#include <list>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "dnnl.hpp"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/graph/mkl_graph_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/util/env_var.h"
#include "tensorflow/core/util/onednn_env_vars.h"
#include "tensorflow/core/util/padding.h"
#include "tensorflow/core/util/tensor_format.h"
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
#include "tensorflow/core/platform/mutex.h"
#endif
#include "xla/tsl/util/onednn_threadpool.h"
using dnnl::engine;
using dnnl::memory;
using dnnl::primitive;
using dnnl::reorder;
using dnnl::stream;
using CPUDevice = Eigen::ThreadPoolDevice;
using MemoryArgsMap = std::unordered_map<int, memory>;
using ReorderPd = dnnl::reorder::primitive_desc;
#ifdef _WIN32
typedef unsigned int uint;
#endif
namespace tensorflow {
typedef enum {
Dim_N = 0,
Dim_C = 1,
Dim_H = 2,
Dim_W = 3,
Dim_O = 0,
Dim_I = 1
} MklDnnDims;
typedef enum {
Dim3d_N = 0,
Dim3d_C = 1,
Dim3d_D = 2,
Dim3d_H = 3,
Dim3d_W = 4,
Dim3d_O = 0,
Dim3d_I = 1
} MklDnnDims3D;
typedef enum {
TF_2DFILTER_DIM_H = 0,
TF_2DFILTER_DIM_W = 1,
TF_2DFILTER_DIM_I = 2,
TF_2DFILTER_DIM_O = 3
} TFFilterDims2d;
typedef enum {
TF_3DFILTER_DIM_P = 0,
TF_3DFILTER_DIM_H = 1,
TF_3DFILTER_DIM_W = 2,
TF_3DFILTER_DIM_I = 3,
TF_3DFILTER_DIM_O = 4
} TFFilterDims3d;
typedef enum {
MKL_GROUP_FILTER_DIM_G = 0,
MKL_GROUP_FILTER_DIM_O = 1,
MKL_GROUP_FILTER_DIM_I = 2,
MKL_GROUP_FILTER_DIM_H = 3,
MKL_GROUP_FILTER_DIM_W = 4
} MklDnnFilterGroupDims;
enum class MklQuantization {
QUANTIZED_VERSION,
FP_VERSION,
};
static const int kSmallBatchSize = 32;
enum class OneDNNMathModeSetting {
kNone = 0,
kBF16,
};
inline OneDNNMathModeSetting SetFPMathMode() {
static OneDNNMathModeSetting math_mode = [] {
OneDNNMathModeSetting mode = OneDNNMathModeSetting::kNone;
if (FPMathModeSetting() == "BF16") {
if (dnnl::set_default_fpmath_mode(dnnl::fpmath_mode::bf16) ==
dnnl::status::success) {
mode = OneDNNMathModeSetting::kBF16;
}
}
return mode;
}();
return math_mode;
}
inline void execute_primitives(
std::vector<dnnl::primitive>& primitives, std::shared_ptr<stream> stream,
std::vector<std::unordered_map<int, memory>>& net_args) {
DCHECK_EQ(primitives.size(), net_args.size());
for (size_t i = 0; i < primitives.size(); ++i) {
primitives.at(i).execute(*stream, net_args.at(i));
}
}
#ifndef ENABLE_ONEDNN_V3
#define ARE_MEMORY_DESCS_EQUAL(md1, md2) dnnl_memory_desc_equal(&md1, &md2)
#define CREATE_MEMORY_DESC_USING_STRIDES dnnl_memory_desc_init_by_strides
#define GET_DATA_TYPE data_type
#define GET_DIMS dims
#define GET_INNER_BLKS format_desc.blocking.inner_blks
#define GET_INNER_DIMS(dims, dims_1) dims_1
#define GET_INNER_IDXS format_desc.blocking.inner_idxs
#define GET_INNER_NBLKS format_desc.blocking.inner_nblks
#define GET_MEMORY_DESC get_desc().data
#define GET_MEMORY_DESC_FLAGS extra.flags
#define GET_MEMORY_DESC_USING_MKLDNN_SHAPE_PTR GetMklLayout().data
#define GET_NDIMS ndims
#define GET_STRIDES format_desc.blocking.strides
#define GET_STRIDES_DIMS(dims, dims_outer_blocks) dims_outer_blocks
#define INIT_DIMS_FROM_DESC(in_dims, md) in_dims(md.dims, &md.dims[md.ndims])
#define MEMORY_DESC dnnl_memory_desc_t
#else
#define ARE_MEMORY_DESCS_EQUAL(md1, md2) md1 == md2
#define CREATE_MEMORY_DESC_USING_STRIDES dnnl_memory_desc_create_with_strides
#define GET_DATA_TYPE get_data_type()
#define GET_DIMS get_dims()
#define GET_INNER_BLKS get_inner_blks()
#define GET_INNER_DIMS(dims, dims_1) dims
#define GET_INNER_IDXS get_inner_idxs()
#define GET_INNER_NBLKS get_inner_nblks()
#define GET_MEMORY_DESC get_desc()
#define GET_MEMORY_DESC_FLAGS get_size()
#define GET_MEMORY_DESC_USING_MKLDNN_SHAPE_PTR GetMklLayout()
#define GET_NDIMS get_ndims()
#define GET_STRIDES get_strides()
#define GET_STRIDES_DIMS(dims, dims_outer_blocks) dims
#define INIT_DIMS_FROM_DESC(in_dims, md) in_dims = md.get_dims()
#define MEMORY_DESC memory::desc
#endif
enum class MklTensorFormat {
FORMAT_NHWC = 0,
FORMAT_NCHW = 1,
FORMAT_NDHWC = 2,
FORMAT_NCDHW = 3,
FORMAT_X = 4,
FORMAT_NC = 5,
FORMAT_TNC = 6,
FORMAT_BLOCKED = 7,
FORMAT_INVALID = 8,
};
memory::format_tag MklTensorFormatToMklDnnDataFormat(MklTensorFormat format);
TensorFormat MklDnn3DDataFormatToTFDataFormat(MklTensorFormat format);
TensorFormat MklDnnDataFormatToTFDataFormat(MklTensorFormat format);
memory::dims CalculateTFStrides(const memory::dims& dims_tf_order);
Status CreateBlockedMemDescHelper(const memory::dims& dim,
const memory::dims& strides,
memory::data_type dtype,
dnnl_memory_desc_t* blocked_md);
inline std::ostream& operator<<(std::ostream& os,
const memory::format_tag& tag) {
if (tag == memory::format_tag::undef) {
os << "undef";
} else if (tag == memory::format_tag::any) {
os << "any";
} else {
os << "invalid";
}
return os;
}
inline void operator<<(std::ostream& os, const MklTensorFormat& format) {
if (format == MklTensorFormat::FORMAT_NHWC) {
os << "FORMAT_NHWC";
} else if (format == MklTensorFormat::FORMAT_NCHW) {
os << "FORMAT_NCHW";
} else if (format == MklTensorFormat::FORMAT_NDHWC) {
os << "FORMAT_NDHWC";
} else if (format == MklTensorFormat::FORMAT_NCDHW) {
os << "FORMAT_NCDHW";
} else if (format == MklTensorFormat::FORMAT_X) {
os << "FORMAT_X";
} else if (format == MklTensorFormat::FORMAT_NC) {
os << "FORMAT_NC";
} else if (format == MklTensorFormat::FORMAT_TNC) {
os << "FORMAT_TNC";
} else if (format == MklTensorFormat::FORMAT_BLOCKED) {
os << "FORMAT_BLOCKED";
} else {
os << "INVALID FORMAT";
}
}
template <typename T>
inline bool array_cmp(const T* a1, const T* a2, size_t size) {
for (size_t i = 0; i < size; ++i)
if (a1[i] != a2[i]) return false;
return true;
}
inline dnnl::stream* CreateStream(tsl::OneDnnThreadPool* eigen_tp,
const engine& engine) {
#ifndef ENABLE_ONEDNN_OPENMP
if (eigen_tp != nullptr) {
stream* tp_stream =
new stream(dnnl::threadpool_interop::make_stream(engine, eigen_tp));
return tp_stream;
} else {
stream* tp_stream = new stream(engine);
return tp_stream;
}
#else
stream* tp_stream = new stream(engine);
return tp_stream;
#endif
}
class MklDnnShape {
private:
struct MklShapeData {
bool is_mkl_tensor_ = false;
size_t dimension_ = 0;
dnnl_dims_t sizes_;
MklTensorFormat tf_data_format_ = MklTensorFormat::FORMAT_BLOCKED;
memory::data_type T_ = memory::data_type::undef;
MEMORY_DESC mkl_md_;
dnnl_dims_t map_;
};
MklShapeData data_;
typedef std::remove_extent<dnnl_dims_t>::type dnnl_dim_t;
#define INVALID_DIM_SIZE -1
public:
MklDnnShape() : data_{} {
for (size_t i = 0; i < sizeof(data_.sizes_) / sizeof(data_.sizes_[0]);
++i) {
data_.sizes_[i] = -1;
}
for (size_t i = 0; i < sizeof(data_.map_) / sizeof(data_.map_[0]); ++i) {
data_.map_[i] = -1;
}
}
~MklDnnShape() {}
MklDnnShape(const MklDnnShape&) = delete;
void operator=(const MklDnnShape&) = delete;
inline bool operator==(const MklDnnShape& input_shape) const {
if (this->IsMklTensor() != input_shape.IsMklTensor()) {
return false;
}
if (this->IsMklTensor()) {
auto const& cur_md = this->GET_MEMORY_DESC_USING_MKLDNN_SHAPE_PTR;
auto const& input_shape_md =
input_shape.GET_MEMORY_DESC_USING_MKLDNN_SHAPE_PTR;
return (this->GetTfShape() == input_shape.GetTfShape()) &&
ARE_MEMORY_DESCS_EQUAL(cur_md, input_shape_md);
}
return true;
}
inline bool operator==(const TensorShape& input_shape) const {
if (!this->IsMklTensor()) {
return false;
}
return this->GetTfShape() == input_shape;
}
inline const bool IsMklTensor() const { return data_.is_mkl_tensor_; }
inline void SetMklTensor(bool is_mkl_tensor) {
data_.is_mkl_tensor_ = is_mkl_tensor;
}
inline void SetDimensions(const size_t dimension) {
data_.dimension_ = dimension;
}
inline size_t GetDimension(char dimension) const {
int index = GetMklDnnTensorDimIndex(dimension);
CHECK(index >= 0 && index < this->GetDimension())
<< "Invalid index from the dimension: " << index << ", " << dimension;
return this->DimSize(index);
}
inline size_t GetDimension3D(char dimension) const {
int index = GetMklDnnTensor3DDimIndex(dimension);
CHECK(index >= 0 && index < this->GetDimension())
<< "Invalid index from the dimension: " << index << ", " << dimension;
return this->DimSize(index);
}
inline int32 GetMklDnnTensorDimIndex(char dimension) const {
switch (dimension) {
case 'N':
return MklDnnDims::Dim_N;
case 'C':
return MklDnnDims::Dim_C;
case 'H':
return MklDnnDims::Dim_H;
case 'W':
return MklDnnDims::Dim_W;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1;
}
}
inline int32 GetMklDnnTensor3DDimIndex(char dimension) const {
switch (dimension) {
case 'N':
return MklDnnDims3D::Dim3d_N;
case 'C':
return MklDnnDims3D::Dim3d_C;
case 'D':
return MklDnnDims3D::Dim3d_D;
case 'H':
return MklDnnDims3D::Dim3d_H;
case 'W':
return MklDnnDims3D::Dim3d_W;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1;
}
}
inline size_t GetDimension() const { return data_.dimension_; }
inline const int* GetSizes() const {
return reinterpret_cast<const int*>(&data_.sizes_[0]);
}
inline memory::dims GetSizesAsMklDnnDims() const {
memory::dims retVal;
if (data_.is_mkl_tensor_) {
size_t dimensions = sizeof(data_.sizes_) / sizeof(data_.sizes_[0]);
for (size_t i = 0; i < dimensions; i++) {
if (data_.sizes_[i] != INVALID_DIM_SIZE)
retVal.push_back(data_.sizes_[i]);
}
} else {
CHECK_EQ(data_.is_mkl_tensor_, true);
}
return retVal;
}
inline int64 DimSize(int index) const {
CHECK_LT(index, sizeof(data_.sizes_) / sizeof(data_.sizes_[0]));
return data_.sizes_[index];
}
inline TensorShape GetTfShape() const {
CHECK_EQ(data_.is_mkl_tensor_, true);
std::vector<int32> shape(data_.dimension_, -1);
if (data_.tf_data_format_ != MklTensorFormat::FORMAT_BLOCKED) {
for (size_t idx = 0; idx < data_.dimension_; ++idx) {
shape[idx] = data_.sizes_[TfDimIdx(idx)];
}
} else {
for (size_t idx = 0; idx < data_.dimension_; ++idx) {
shape[idx] = data_.sizes_[idx];
}
}
TensorShape ts;
bool ret = TensorShapeUtils::MakeShape(shape, &ts).ok();
CHECK_EQ(ret, true);
return ts;
}
inline void SetElemType(memory::data_type dt) { data_.T_ = dt; }
inline const memory::data_type GetElemType() { return data_.T_; }
#ifndef ENABLE_ONEDNN_V3
inline void SetMklLayout(memory::desc* md) {
CHECK_NOTNULL(md);
data_.mkl_md_ = md->data;
}
#else
inline void SetMklLayout(const memory::desc& md) { data_.mkl_md_ = md; }
#endif
inline const memory::desc GetMklLayout() const {
return memory::desc(data_.mkl_md_);
}
inline MklTensorFormat GetTfDataFormat() const {
return data_.tf_data_format_;
}
inline void SetTfLayout(size_t dims, const memory::dims& sizes,
MklTensorFormat format) {
DCHECK_EQ(dims, sizes.size())
<< "SetTfLayout: Number of dimensions does not"
"match with dimension array";
data_.dimension_ = dims;
for (size_t ii = 0; ii < dims; ++ii) {
data_.sizes_[ii] = sizes[ii];
}
data_.tf_data_format_ = format;
if (format != MklTensorFormat::FORMAT_BLOCKED) {
if (dims == 2) {
data_.map_[0] = MklDnnDims::Dim_N;
data_.map_[1] = MklDnnDims::Dim_C;
} else {
SetTfDimOrder(dims, format);
}
}
}
inline const memory::desc GetTfLayout() const {
memory::dims dims;
for (size_t ii = 0; ii < data_.dimension_; ++ii) {
dims.push_back(data_.sizes_[ii]);
}
if (data_.tf_data_format_ == MklTensorFormat::FORMAT_BLOCKED) {
auto strides = CalculateTFStrides(dims);
dnnl_memory_desc_t blocked_md;
TF_CHECK_OK(
CreateBlockedMemDescHelper(dims, strides, data_.T_, &blocked_md));
return memory::desc(blocked_md);
} else {
auto format_tag =
MklTensorFormatToMklDnnDataFormat(data_.tf_data_format_);
return memory::desc(dims, data_.T_, format_tag);
}
}
inline const memory::desc GetCurLayout() const {
return IsMklTensor() ? GetMklLayout() : GetTfLayout();
}
inline void SetTfDimOrder(const size_t dimension, const dnnl_dims_t map) {
CHECK(dimension == data_.dimension_);
for (size_t ii = 0; ii < dimension; ii++) {
data_.map_[ii] = map[ii];
}
}
inline void SetTfDimOrder(const size_t dimension, TensorFormat data_format) {
if (dimension == 5) {
CHECK(dimension == data_.dimension_);
data_.map_[GetTensorDimIndex<3>(data_format, '0')] =
MklDnnDims3D::Dim3d_D;
data_.map_[GetTensorDimIndex<3>(data_format, '1')] =
MklDnnDims3D::Dim3d_H;
data_.map_[GetTensorDimIndex<3>(data_format, '2')] =
MklDnnDims3D::Dim3d_W;
data_.map_[GetTensorDimIndex<3>(data_format, 'C')] =
MklDnnDims3D::Dim3d_C;
data_.map_[GetTensorDimIndex<3>(data_format, 'N')] =
MklDnnDims3D::Dim3d_N;
} else {
CHECK_EQ(dimension, 4);
CHECK(dimension == data_.dimension_);
data_.map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDnnDims::Dim_W;
data_.map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDnnDims::Dim_H;
data_.map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDnnDims::Dim_C;
data_.map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDnnDims::Dim_N;
}
}
inline void SetTfDimOrder(const size_t dimension, MklTensorFormat format) {
TensorFormat data_format = MklDnnDataFormatToTFDataFormat(format);
SetTfDimOrder(dimension, data_format);
}
inline const dnnl_dim_t* GetTfToMklDimMap() const { return &data_.map_[0]; }
inline size_t TfDimIdx(int index) const { return data_.map_[index]; }
inline int64 TfDimSize(int index) const {
return data_.sizes_[TfDimIdx(index)];
}
inline bool IsMklChannelDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_C;
}
inline bool IsMklBatchDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_N;
}
inline bool IsMklWidthDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_W;
}
inline bool IsMklHeightDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_H;
}
inline bool IsTensorInNCHWFormat() const {
TensorFormat data_format = FORMAT_NCHW;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
inline bool IsTensorInNHWCFormat() const {
TensorFormat data_format = FORMAT_NHWC;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
inline size_t GetSerializeBufferSize() const { return sizeof(MklShapeData); }
void SerializeMklDnnShape(unsigned char* buf, size_t buf_size) const {
CHECK(buf_size >= GetSerializeBufferSize())
<< "Buffer size is too small to SerializeMklDnnShape";
*reinterpret_cast<MklShapeData*>(buf) = data_;
}
void DeSerializeMklDnnShape(const unsigned char* buf, size_t buf_size) {
CHECK(buf_size >= sizeof(data_.is_mkl_tensor_))
<< "Buffer size is too small in DeSerializeMklDnnShape";
const bool is_mkl_tensor = *reinterpret_cast<const bool*>(buf);
if (is_mkl_tensor) {
CHECK(buf_size >= GetSerializeBufferSize())
<< "Buffer size is too small in DeSerializeMklDnnShape";
data_ = *reinterpret_cast<const MklShapeData*>(buf);
}
}
};
inline Eigen::ThreadPoolInterface* EigenThreadPoolFromTfContext(
OpKernelContext* context) {
return context->device()
->tensorflow_cpu_worker_threads()
->workers->AsEigenThreadPool();
}
typedef std::vector<MklDnnShape> MklDnnShapeList;
template <typename T>
class MklDnnData;
inline void ExecutePrimitive(const std::vector<primitive>& net,
const std::vector<MemoryArgsMap>* net_args,
const engine& cpu_engine,
OpKernelContext* context = nullptr) {
DCHECK(net_args);
DCHECK_EQ(net.size(), net_args->size());
std::unique_ptr<stream> cpu_stream;
tsl::OneDnnThreadPool eigen_tp;
if (context != nullptr) {
Eigen::ThreadPoolInterface* eigen_interface =
EigenThreadPoolFromTfContext(context);
eigen_tp =
tsl::OneDnnThreadPool(eigen_interface, ThreadPoolUseCallerThread());
cpu_stream.reset(CreateStream(&eigen_tp, cpu_engine));
} else {
cpu_stream.reset(CreateStream(nullptr, cpu_engine));
}
for (size_t i = 0; i < net.size(); ++i) {
net.at(i).execute(*cpu_stream, net_args->at(i));
}
cpu_stream->wait();
}
template <typename T>
inline Status ConvertMklToTF(OpKernelContext* context,
const Tensor& input_mkl_tensor,
const MklDnnShape& input_mkl_shape,
Tensor* output_tf_tensor) {
try {
if (!input_mkl_shape.IsMklTensor()) {
*output_tf_tensor = input_mkl_tensor;
return OkStatus();
}
TensorShape output_tf_shape = input_mkl_shape.GetTfShape();
TF_CHECK_OK(context->allocate_temp(DataTypeToEnum<T>::v(), output_tf_shape,
output_tf_tensor));
engine cpu_engine(engine::kind::cpu, 0);
MklDnnData<T> input(&cpu_engine);
auto input_mkl_md = input_mkl_shape.GetMklLayout();
auto output_tf_md = input_mkl_shape.GetTfLayout();
input.SetUsrMem(input_mkl_md, &input_mkl_tensor);
if (input.IsReorderNeeded(output_tf_md)) {
std::vector<primitive> net;
std::vector<MemoryArgsMap> net_args;
bool status = input.CheckReorderToOpMem(output_tf_md, output_tf_tensor,
net, net_args, cpu_engine);
if (!status) {
return absl::InternalError(
"ConvertMklToTF(): Failed to create reorder for input");
}
ExecutePrimitive(net, &net_args, cpu_engine, context);
} else {
bool status =
output_tf_tensor->CopyFrom(input_mkl_tensor, output_tf_shape);
if (!status) {
return absl::InternalError(
"ConvertMklToTF(): Failed to forward input tensor to output");
}
}
return OkStatus();
} catch (dnnl::error& e) {
string error_msg = "Status: " + std::to_string(e.status) +
", message: " + string(e.message) + ", in file " +
string(__FILE__) + ":" + std::to_string(__LINE__);
LOG(FATAL) << "Operation received an exception: " << error_msg;
}
}
inline void GetMklShape(OpKernelContext* ctext, int n, MklDnnShape* mklshape,
bool eager_mode) {
if (!eager_mode) {
mklshape->DeSerializeMklDnnShape(
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.data(),
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.size() *
sizeof(uint8));
} else {
mklshape->SetMklTensor(false);
}
}
inline void GetMklShape(OpKernelContext* ctext, int n, MklDnnShape* mklshape) {
GetMklShape(ctext, n, mklshape, false);
}
inline const Tensor& MklGetInput(OpKernelContext* ctext, int n) {
return ctext->input(GetTensorDataIndex(n, ctext->num_inputs()));
}
inline void GetMklInputList(OpKernelContext* ctext, StringPiece name,
OpInputList* input_tensors) {
CHECK_NOTNULL(input_tensors);
TF_CHECK_OK(ctext->input_list(name, input_tensors));
}
inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name,
MklDnnShapeList* mkl_shapes,
bool native_format = false) {
if (!native_format) {
OpInputList input_mkl_tensors;
GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors);
for (int i = 0; i < input_mkl_tensors.size(); i++) {
(*mkl_shapes)[i].DeSerializeMklDnnShape(
input_mkl_tensors[i].flat<uint8>().data(),
input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8));
}
} else {
for (int i = 0; i < mkl_shapes->size(); ++i) {
(*mkl_shapes)[i].SetMklTensor(false);
}
}
}
inline TensorShape GetTfShape(OpKernelContext* context, size_t input_idx,
bool eager_mode = false) {
CHECK_NOTNULL(context);
CHECK_LT(input_idx, context->num_inputs());
MklDnnShape input_mkl_shape;
GetMklShape(context, input_idx, &input_mkl_shape, eager_mode);
if (input_mkl_shape.IsMklTensor() && !eager_mode) {
return input_mkl_shape.GetTfShape();
} else {
const Tensor& t = MklGetInput(context, input_idx);
return t.shape();
}
}
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
const MklDnnShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(mkl_shape.GetSerializeBufferSize());
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklDnnShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
Tensor** output,
const TensorShape& tf_shape,
const MklDnnShape& mkl_shape,
bool eager_mode = false) {
OP_REQUIRES_OK(
ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()),
tf_shape, output));
if (!eager_mode) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(mkl_shape.GetSerializeBufferSize());
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklDnnShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
}
template <typename T>
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
const memory::desc& pd, void** buf_out) {
TensorShape tf_shape;
tf_shape.AddDim(pd.get_size() / sizeof(T) + 1);
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(),
tf_shape, tensor_out));
*buf_out = static_cast<void*>(tensor_out->flat<T>().data());
}
template <typename T>
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
TensorShape tf_shape) {
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(),
tf_shape, tensor_out));
}
template <typename T>
struct UserScratchPad {
template <typename MklPrim>
inline void AllocateSPTensor(MklPrim* mkl_prim, OpKernelContext* context) {
allocated_ = false;
auto spad_md = mkl_prim->GetScratchPadDesc();
size_t spad_size = spad_md.get_size();
if (spad_size == 0) return;
size_t allocate_size = (spad_size + sizeof(T) - 1) / sizeof(T);
TensorShape tf_shape;
tf_shape.AddDim(allocate_size);
AllocTmpBuffer<T>(context, &scratch_pad_, tf_shape);
allocated_ = true;
}
inline void* Get() {
if (allocated_) {
return static_cast<void*>(scratch_pad_.flat<T>().data());
} else {
return nullptr;
}
}
private:
Tensor scratch_pad_;
bool allocated_ = false;
};
inline void GetStridesFromSizes(MklTensorFormat data_format, size_t* strides,
const size_t* sizes) {
DCHECK_NE(data_format, MklTensorFormat::FORMAT_INVALID);
if (data_format == MklTensorFormat::FORMAT_NHWC) {
strides[0] = sizes[2];
strides[1] = sizes[0] * sizes[2];
strides[2] = 1;
strides[3] = sizes[0] * sizes[1] * sizes[2];
} else {
strides[0] = 1;
strides[1] = sizes[0];
strides[2] = sizes[0] * sizes[1];
strides[3] = sizes[0] * sizes[1] * sizes[2];
}
}
inline void CopyMklTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
const Tensor& meta = context->input(idx_meta_in);
Tensor output(data.dtype());
Tensor meta_output(meta.dtype());
CHECK(output.CopyFrom(data, data.shape()));
CHECK(meta_output.CopyFrom(meta, meta.shape()));
context->set_output(idx_data_out, output);
context->set_output(idx_meta_out, meta_output);
}
inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in,
int idx_out,
const TensorShape& shape) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
MklDnnShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, mkl_shape_output);
Tensor output(data.dtype());
CHECK(output.CopyFrom(data, shape));
context->set_output(idx_data_out, output);
}
inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
MklDnnShape dnn_shape_output;
dnn_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, dnn_shape_output);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
}
}
inline void ForwardMklTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
context->set_output(idx_meta_out, context->input(idx_meta_in));
}
}
inline void SetDummyMklDnnShapeOutput(OpKernelContext* context,
uint32 idx_data_out) {
MklDnnShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output);
}
inline bool ForwardMklTensorInToOutWithMklShape(OpKernelContext* context,
int idx_in, int idx_out,
Tensor** output,
const MklDnnShape& mkl_shape,
bool always_forward = true) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
bool is_forwarded = false;
const Tensor& input_tensor = context->input(idx_data_in);
const auto output_shape = input_tensor.shape();
if (always_forward) {
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, input_tensor);
}
} else {
is_forwarded = context->forward_input_to_output_with_shape(
idx_data_in, idx_data_out, output_shape, output);
}
if (is_forwarded || always_forward) {
AllocateOutputSetMklShape(context, idx_out, mkl_shape);
return true;
}
return false;
}
inline void ForwardMklMetaDataInToOut(OpKernelContext* context,
uint32 idx_data_in,
uint32_t idx_data_out) {
uint32 idx_meta_in =
GetTensorMetaDataIndex(idx_data_in, context->num_inputs());
uint32 idx_meta_out =
GetTensorMetaDataIndex(idx_data_out, context->num_outputs());
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out);
} else {
context->set_output(idx_meta_out, context->input(idx_meta_in));
}
}
inline Tensor GetMklMetaTensor() {
MklDnnShape non_mkl_shape;
non_mkl_shape.SetMklTensor(false);
auto size = static_cast<int64_t>(non_mkl_shape.GetSerializeBufferSize());
Tensor tensor(DT_UINT8, {size});
non_mkl_shape.SerializeMklDnnShape(tensor.flat<uint8>().data(),
size * sizeof(uint8));
return tensor;
}
template <typename T>
static memory::data_type MklDnnType();
template <>
memory::data_type MklDnnType<float>() {
return memory::data_type::f32;
}
template <>
memory::data_type MklDnnType<quint8>() {
return memory::data_type::u8;
}
template <>
memory::data_type MklDnnType<uint8>() {
return memory::data_type::u8;
}
template <>
memory::data_type MklDnnType<qint8>() {
return memory::data_type::s8;
}
template <>
memory::data_type MklDnnType<qint32>() {
return memory::data_type::s32;
}
template <>
memory::data_type MklDnnType<bfloat16>() {
return memory::data_type::bf16;
}
template <>
memory::data_type MklDnnType<Eigen::half>() {
return memory::data_type::f16;
}
inline memory::format_tag MklTensorFormatToMklDnnDataFormat(
MklTensorFormat format) {
if (format == MklTensorFormat::FORMAT_NHWC) return memory::format_tag::nhwc;
if (format == MklTensorFormat::FORMAT_NCHW) return memory::format_tag::nchw;
if (format == MklTensorFormat::FORMAT_NDHWC) return memory::format_tag::ndhwc;
if (format == MklTensorFormat::FORMAT_NCDHW) return memory::format_tag::ncdhw;
if (format == MklTensorFormat::FORMAT_X) return memory::format_tag::x;
if (format == MklTensorFormat::FORMAT_NC) return memory::format_tag::nc;
if (format == MklTensorFormat::FORMAT_TNC) return memory::format_tag::tnc;
return memory::format_tag::undef;
}
inline MklTensorFormat TFDataFormatToMklDnn3DDataFormat(TensorFormat format) {
if (format == FORMAT_NHWC) return MklTensorFormat::FORMAT_NDHWC;
if (format == FORMAT_NCHW) return MklTensorFormat::FORMAT_NCDHW;
TF_CHECK_OK(absl::InvalidArgumentError("Unsupported data format"));
return MklTensorFormat::FORMAT_INVALID;
}
inline MklTensorFormat TFDataFormatToMklDnnDataFormat(TensorFormat format) {
if (format == FORMAT_NHWC) return MklTensorFormat::FORMAT_NHWC;
if (format == FORMAT_NCHW) return MklTensorFormat::FORMAT_NCHW;
TF_CHECK_OK(absl::InvalidArgumentError("Unsupported data format"));
return MklTensorFormat::FORMAT_INVALID;
}
inline TensorFormat MklDnnDataFormatToTFDataFormat(MklTensorFormat format) {
if (format == MklTensorFormat::FORMAT_NHWC ||
format == MklTensorFormat::FORMAT_NDHWC)
return FORMAT_NHWC;
if (format == MklTensorFormat::FORMAT_NCHW ||
format == MklTensorFormat::FORMAT_NCDHW)
return FORMAT_NCHW;
TF_CHECK_OK(absl::InvalidArgumentError("Unsupported data format"));
return FORMAT_NHWC;
}
inline memory::dims TFShapeToMklDnnDims(const TensorShape& shape) {
memory::dims dims(shape.dims());
for (int d = 0; d < shape.dims(); ++d) {
dims[d] = shape.dim_size(d);
}
return dims;
}
inline memory::dims TFShapeToMklDnnDimsInNCHW(const TensorShape& shape,
TensorFormat format) {
DCHECK_NE(TFDataFormatToMklDnnDataFormat(format),
MklTensorFormat::FORMAT_INVALID);
int n = shape.dim_size(GetTensorDimIndex(format, 'N'));
int c = shape.dim_size(GetTensorDimIndex(format, 'C'));
int h = shape.dim_size(GetTensorDimIndex(format, 'H'));
int w = shape.dim_size(GetTensorDimIndex(format, 'W'));
return memory::dims({n, c, h, w});
}
inline memory::dims TFShapeToMklDnnDimsInNCDHW(const TensorShape& shape,
TensorFormat format) {
DCHECK_NE(TFDataFormatToMklDnn3DDataFormat(format),
MklTensorFormat::FORMAT_INVALID);
int n = shape.dim_size(GetTensorDimIndex<3>(format, 'N'));
int c = shape.dim_size(GetTensorDimIndex<3>(format, 'C'));
int d = shape.dim_size(GetTensorDimIndex<3>(format, '0'));
int h = shape.dim_size(GetTensorDimIndex<3>(format, '1'));
int w = shape.dim_size(GetTensorDimIndex<3>(format, '2'));
return memory::dims({n, c, d, h, w});
}
inline memory::dims MklDnnDimsInNCHW(const memory::dims& in_dims,
TensorFormat format) {
DCHECK_NE(TFDataFormatToMklDnnDataFormat(format),
MklTensorFormat::FORMAT_INVALID);
int n = in_dims[GetTensorDimIndex(format, 'N')];
int c = in_dims[GetTensorDimIndex(format, 'C')];
int h = in_dims[GetTensorDimIndex(format, 'H')];
int w = in_dims[GetTensorDimIndex(format, 'W')];
return memory::dims({n, c, h, w});
}
inline memory::dims MklDnnDimsInNCDHW(const memory::dims& in_dims,
TensorFormat format) {
DCHECK_NE(TFDataFormatToMklDnnDataFormat(format),
MklTensorFormat::FORMAT_INVALID);
int n = in_dims[GetTensorDimIndex<3>(format, 'N')];
int c = in_dims[GetTensorDimIndex<3>(format, 'C')];
int d = in_dims[GetTensorDimIndex<3>(format, '0')];
int h = in_dims[GetTensorDimIndex<3>(format, '1')];
int w = in_dims[GetTensorDimIndex<3>(format, '2')];
return memory::dims({n, c, d, h, w});
}
inline TensorShape MklDnnDimsToTFShape(const memory::dims& dims) {
std::vector<int32> shape(dims.size(), -1);
for (int d = 0; d < dims.size(); d++) {
shape[d] = dims[d];
}
TensorShape ret;
CHECK_EQ(TensorShapeUtils::MakeShape(shape, &ret).ok(), true);
return ret;
}
inline memory::dims CalculateTFStrides(const memory::dims& dims_tf_order) {
CHECK_GT(dims_tf_order.size(), 0);
memory::dims strides(dims_tf_order.size());
int last_dim_idx = dims_tf_order.size() - 1;
strides[last_dim_idx] = 1;
for (int d = last_dim_idx - 1; d >= 0; d--) {
strides[d] = strides[d + 1] * dims_tf_order[d + 1];
}
return strides;
}
inline Status CreateBlockedMemDescHelper(const memory::dims& dim,
const memory::dims& strides,
memory::data_type dtype,
dnnl_memory_desc_t* blocked_md) {
DCHECK_EQ(dim.size(), strides.size());
const int kNumDims = dim.size();
dnnl_dim_t* input_dims = new dnnl_dim_t[kNumDims];
dnnl_dim_t* input_strides = new dnnl_dim_t[kNumDims];
for (int i = 0; i < kNumDims; ++i) {
input_dims[i] = dim[i];
input_strides[i] = strides[i];
}
try {
CREATE_MEMORY_DESC_USING_STRIDES(blocked_md, kNumDims, input_dims,
memory::convert_to_c(dtype),
input_strides);
delete[] input_dims;
delete[] input_strides;
} catch (dnnl::error& e) {
delete[] input_dims;
delete[] input_strides;
return absl::InternalError(
absl::StrCat("Failed to create blocked memory descriptor.",
"Status: ", e.status, ", message: ", e.message));
}
return OkStatus();
}
inline void CreateAndExecuteReorder(const ReorderPd& reorder_desc,
const memory& src_mem,
const memory& dst_mem, const engine& engine,
OpKernelContext* ctx = nullptr,
memory* scale_mem = nullptr) {
std::vector<primitive> net;
net.push_back(dnnl::reorder(reorder_desc));
std::vector<MemoryArgsMap> net_args;
#ifndef ENABLE_ONEDNN_V3
net_args.push_back({{DNNL_ARG_FROM, src_mem}, {DNNL_ARG_TO, dst_mem}});
#else
if (scale_mem != nullptr) {
net_args.push_back({{DNNL_ARG_FROM, src_mem},
{DNNL_ARG_TO, dst_mem},
{DNNL_ARG_ATTR_SCALES | DNNL_ARG_DST, *scale_mem}});
} else {
net_args.push_back({{DNNL_ARG_FROM, src_mem}, {DNNL_ARG_TO, dst_mem}});
}
#endif
ExecutePrimitive(net, &net_args, engine, ctx);
}
class MklReorderPrimitive;
template <typename T>
inline MklReorderPrimitive* FindOrCreateReorder(const memory* from,
const memory* to);
template <typename T>
class MklDnnData {
private:
memory* user_memory_;
memory* reorder_memory_;
memory::desc* op_md_;
bool bIs3D;
void* allocated_buffer_;
const engine* cpu_engine_;
public:
explicit MklDnnData(const engine* e)
: user_memory_(nullptr),
reorder_memory_(nullptr),
op_md_(nullptr),
bIs3D(false),
allocated_buffer_(nullptr),
cpu_engine_(e) {}
MklDnnData(const MklDnnData&) = default;
MklDnnData& operator=(const MklDnnData&) = delete;
~MklDnnData() {
if (allocated_buffer_ != nullptr) {
cpu_allocator()->DeallocateRaw(allocated_buffer_);
}
cpu_engine_ = nullptr;
delete (user_memory_);
delete (reorder_memory_);
delete (op_md_);
}
inline void* GetTensorBuffer(const Tensor* tensor) const {
CHECK_NOTNULL(tensor);
return const_cast<void*>(
static_cast<const void*>(tensor->flat<T>().data()));
}
void SetIs3DData(bool bIs3D_) { bIs3D = bIs3D_; }
bool GetIs3D() { return bIs3D; }
inline void SetUsrMem(const memory::dims& dim, memory::format_tag fm,
void* data_buffer = nullptr) {
auto md = memory::desc(dim, MklDnnType<T>(), fm);
SetUsrMem(md, data_buffer);
}
inline void SetUsrMem(const memory::dims& dim, memory::format_tag fm,
const Tensor* tensor) {
DCHECK(tensor);
SetUsrMem(dim, fm, GetTensorBuffer(tensor));
}
static inline memory::desc CreateBlockedMemDesc(const memory::dims& dim,
const memory::dims& strides) {
dnnl_memory_desc_t blocked_md;
TF_CHECK_OK(
CreateBlockedMemDescHelper(dim, strides, MklDnnType<T>(), &blocked_md));
return memory::desc(blocked_md);
}
inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides,
void* data_buffer = nullptr) {
CHECK_EQ(dim.size(), strides.size());
auto blocked_md = MklDnnData<T>::CreateBlockedMemDesc(dim, strides);
SetUsrMem(blocked_md, data_buffer);
}
inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides,
const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(dim, strides, GetTensorBuffer(tensor));
}
inline void SetUsrMem(const memory::desc& md, const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(md, GetTensorBuffer(tensor));
}
inline void SetUsrMem(const memory::desc& pd, void* data_buffer = nullptr) {
DCHECK(cpu_engine_);
if (user_memory_) delete user_memory_;
if (data_buffer) {
user_memory_ = new memory(pd, *cpu_engine_, data_buffer);
} else {
user_memory_ = new memory(pd, *cpu_engine_);
}
}
inline const memory* GetUsrMem() const { return user_memory_; }
inline memory::desc GetUsrMemDesc() const {
DCHECK(user_memory_);
return user_memory_->get_desc();
}
inline void* GetUsrMemDataHandle() const {
CHECK_NOTNULL(user_memory_);
return user_memory_->get_data_handle();
}
inline void SetUsrMemDataHandle(void* data_buffer,
std::shared_ptr<stream> t_stream = nullptr) {
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(data_buffer);
#if !defined(ENABLE_ONEDNN_OPENMP) && !defined(ENABLE_ONEDNN_V3)
user_memory_->set_data_handle(data_buffer, *t_stream);
#else
user_memory_->set_data_handle(data_buffer);
#endif
}
inline void SetUsrMemDataHandle(const Tensor* tensor,
std::shared_ptr<stream> t_stream = nullptr) {
SetUsrMemDataHandle(GetTensorBuffer(tensor), t_stream);
}
inline void AllocateBuffer(size_t size) {
const int64 kMemoryAlignment = 64;
allocated_buffer_ = cpu_allocator()->AllocateRaw(kMemoryAlignment, size);
}
inline void* GetAllocatedBuffer() { return allocated_buffer_; }
inline const memory& GetOpMem() const {
return reorder_memory_ ? *reorder_memory_ : *user_memory_;
}
inline void SetOpMemDesc(const memory::dims& dim, memory::format_tag fm) {
op_md_ = new memory::desc(dim, MklDnnType<T>(), fm);
}
inline const memory::desc& GetOpMemDesc() const { return *op_md_; }
inline bool IsReorderNeeded(const memory::desc& op_pd) const {
DCHECK(user_memory_);
return op_pd != user_memory_->get_desc();
}
inline primitive CreateReorder(const memory* from, const memory* to) const {
CHECK_NOTNULL(from);
CHECK_NOTNULL(to);
return reorder(*from, *to);
}
inline bool CheckReorderToOpMem(const memory::desc& op_md,
std::vector<primitive>& net,
std::vector<MemoryArgsMap>& net_args,
const engine& engine) {
DCHECK(user_memory_);
DCHECK_EQ(net.size(), net_args.size());
if (IsReorderNeeded(op_md)) {
reorder_memory_ = new memory(op_md, engine);
net.push_back(CreateReorder(user_memory_, reorder_memory_));
net_args.push_back(MemoryArgsMap{{DNNL_ARG_FROM, *user_memory_},
{DNNL_ARG_TO, *reorder_memory_}});
return true;
}
return false;
}
inline bool CheckReorderToOpMem(const memory::desc& op_md,
const engine& engine,
OpKernelContext* context = nullptr) {
DCHECK(user_memory_);
if (IsReorderNeeded(op_md)) {
reorder_memory_ = new memory(op_md, engine);
auto* prim = FindOrCreateReorder<T>(user_memory_, reorder_memory_);
std::shared_ptr<stream> cpu_stream;
tsl::OneDnnThreadPool eigen_tp;
if (context != nullptr) {
Eigen::ThreadPoolInterface* eigen_interface =
EigenThreadPoolFromTfContext(context);
eigen_tp =
tsl::OneDnnThreadPool(eigen_interface, ThreadPoolUseCallerThread());
cpu_stream.reset(CreateStream(&eigen_tp, prim->GetEngine()));
} else {
cpu_stream.reset(CreateStream(nullptr, prim->GetEngine()));
}
std::vector<primitive> net;
net.push_back(*(prim->GetPrimitive()));
std::vector<MemoryArgsMap> net_args;
net_args.push_back(
{{DNNL_ARG_FROM, *user_memory_}, {DNNL_ARG_TO, *reorder_memory_}});
execute_primitives(net, cpu_stream, net_args);
return true;
}
return false;
}
inline bool CheckReorderToOpMem(const memory::desc& op_md,
void* reorder_data_handle,
std::vector<primitive>& net,
std::vector<MemoryArgsMap>& net_args,
const engine& engine) {
DCHECK(reorder_data_handle);
DCHECK(user_memory_);
if (IsReorderNeeded(op_md)) {
reorder_memory_ = new memory(op_md, engine, reorder_data_handle);
net.push_back(CreateReorder(user_memory_, reorder_memory_));
net_args.push_back(MemoryArgsMap{{DNNL_ARG_FROM, *user_memory_},
{DNNL_ARG_TO, *reorder_memory_}});
return true;
}
return false;
}
inline bool CheckReorderToOpMem(const memory::desc& op_md,
void* reorder_data_handle,
const engine& engine,
OpKernelContext* context = nullptr) {
DCHECK(reorder_data_handle);
DCHECK(user_memory_);
if (IsReorderNeeded(op_md)) {
reorder_memory_ = new memory(op_md, engine, reorder_data_handle);
auto* prim = FindOrCreateReorder<T>(user_memory_, reorder_memory_);
std::shared_ptr<stream> cpu_stream;
tsl::OneDnnThreadPool eigen_tp;
if (context != nullptr) {
Eigen::ThreadPoolInterface* eigen_interface =
EigenThreadPoolFromTfContext(context);
eigen_tp =
tsl::OneDnnThreadPool(eigen_interface, ThreadPoolUseCallerThread());
cpu_stream.reset(CreateStream(&eigen_tp, prim->GetEngine()));
} else {
cpu_stream.reset(CreateStream(nullptr, prim->GetEngine()));
}
std::vector<primitive> net;
net.push_back(*(prim->GetPrimitive()));
std::vector<MemoryArgsMap> net_args;
net_args.push_back(
{{DNNL_ARG_FROM, *user_memory_}, {DNNL_ARG_TO, *reorder_memory_}});
execute_primitives(net, cpu_stream, net_args);
return true;
}
return false;
}
inline bool CheckReorderToOpMem(const memory::desc& op_md,
Tensor* reorder_tensor,
std::vector<primitive>& net,
std::vector<MemoryArgsMap>& net_args,
const engine& engine) {
DCHECK(reorder_tensor);
return CheckReorderToOpMem(op_md, GetTensorBuffer(reorder_tensor), net,
net_args, engine);
}
inline bool CheckReorderToOpMem(const memory::desc& op_pd,
Tensor* reorder_tensor,
OpKernelContext* ctx = nullptr) {
DCHECK(reorder_tensor);
return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor),
*cpu_engine_, ctx);
}
inline bool PrepareReorderToUserMemIfReq(const memory::desc& op_pd) {
DCHECK(user_memory_);
if (IsReorderNeeded(op_pd)) {
reorder_memory_ = new memory(op_pd, *cpu_engine_);
return true;
}
return false;
}
inline void InsertReorderToUserMem(std::vector<primitive>& net,
std::vector<MemoryArgsMap>& net_args) {
DCHECK(user_memory_);
DCHECK(reorder_memory_);
net.push_back(CreateReorder(reorder_memory_, user_memory_));
net_args.push_back(MemoryArgsMap{{DNNL_ARG_FROM, *reorder_memory_},
{DNNL_ARG_TO, *user_memory_}});
}
inline void InsertReorderToUserMem(OpKernelContext* ctx = nullptr) {
DCHECK(user_memory_);
DCHECK(reorder_memory_);
DCHECK(cpu_engine_);
std::vector<primitive> net;
auto* prim = FindOrCreateReorder<T>(reorder_memory_, user_memory_);
net.push_back(*(prim->GetPrimitive()));
std::vector<MemoryArgsMap> net_args;
net_args.push_back(
{{DNNL_ARG_FROM, *reorder_memory_}, {DNNL_ARG_TO, *user_memory_}});
std::shared_ptr<stream> cpu_stream;
tsl::OneDnnThreadPool eigen_tp;
if (ctx != nullptr) {
Eigen::ThreadPoolInterface* eigen_interface =
EigenThreadPoolFromTfContext(ctx);
eigen_tp =
tsl::OneDnnThreadPool(eigen_interface, ThreadPoolUseCallerThread());
cpu_stream.reset(CreateStream(&eigen_tp, prim->GetEngine()));
} else {
cpu_stream.reset(CreateStream(nullptr, prim->GetEngine()));
}
execute_primitives(net, cpu_stream, net_args);
}
};
class MklPrimitive {
public:
virtual ~MklPrimitive() {}
MklPrimitive() {}
MklPrimitive(const engine& cpu_engine) { cpu_engine_ = cpu_engine; }
unsigned char* DummyData = nullptr;
engine cpu_engine_ = engine(engine::kind::cpu, 0);
const engine& GetEngine() { return cpu_engine_; }
};
const dnnl::memory::dims NONE_DIMS = {};
template <typename T>
class LRUCache {
public:
explicit LRUCache(size_t capacity) {
capacity_ = capacity;
Clear();
}
T* GetOp(const string& key) {
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex_lock lock(lru_mu_);
#endif
auto it = cache_.find(key);
if (it == cache_.end()) {
return nullptr;
}
lru_list_.erase(it->second.lru_iterator);
lru_list_.push_front(it->first);
it->second.lru_iterator = lru_list_.begin();
return it->second.op;
}
void SetOp(const string& key, T* op) {
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex_lock lock(lru_mu_);
#endif
if (lru_list_.size() >= capacity_) {
Delete();
}
lru_list_.push_front(key);
Entry entry(op, lru_list_.begin());
cache_.emplace(std::make_pair(key, std::move(entry)));
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
FinishedAllocation(key);
#endif
}
void Clear() {
if (lru_list_.empty()) return;
cache_.clear();
lru_list_.clear();
}
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
bool IsAllocating(const string& key) {
mutex_lock lock(in_flight_mu_);
return in_flight_.find(key) != in_flight_.end();
}
void Allocate(const string& key) {
mutex_lock lock(in_flight_mu_);
in_flight_.insert(key);
}
void FinishedAllocation(const string& key) {
mutex_lock lock(in_flight_mu_);
in_flight_.erase(key);
}
#endif
private:
struct Entry {
T* op;
std::list<string>::iterator lru_iterator;
Entry(T* op, std::list<string>::iterator it) {
this->op = op;
this->lru_iterator = it;
}
Entry(Entry&& source) noexcept
: lru_iterator(std::move(source.lru_iterator)) {
op = std::move(source.op);
source.op = std::forward<T*>(nullptr);
}
~Entry() {
if (op != nullptr) delete op;
}
};
bool Delete() {
if (lru_list_.empty()) return false;
string key = lru_list_.back();
lru_list_.pop_back();
cache_.erase(key);
return true;
}
size_t capacity_;
std::unordered_map<string, Entry> cache_;
std::list<string> lru_list_;
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex lru_mu_;
std::set<string> in_flight_;
TF_GUARDED_BY(in_flight_mu_)
mutex in_flight_mu_;
#endif
};
template <typename T>
class MklPrimitiveFactory {
public:
MklPrimitiveFactory() {}
~MklPrimitiveFactory() {}
MklPrimitive* GetOp(const string& key) {
#if !defined(DNNL_AARCH64_USE_ACL) || !defined(ENABLE_ONEDNN_OPENMP)
auto& lru_cache = MklPrimitiveFactory<T>::GetLRUCache();
return lru_cache.GetOp(key);
#else
while (true) {
mutex_lock lock(primitive_creation_mu_);
auto& lru_cache = MklPrimitiveFactory<T>::GetLRUCache();
MklPrimitive* primitive = lru_cache.GetOp(key);
if (primitive != nullptr) {
return primitive;
}
if (!lru_cache.IsAllocating(key)) {
lru_cache.Allocate(key);
return nullptr;
}
primitive_creation_cv_.wait(lock);
}
#endif
}
void SetOp(const string& key, MklPrimitive* op) {
#if !defined(DNNL_AARCH64_USE_ACL) || !defined(ENABLE_ONEDNN_OPENMP)
auto& lru_cache = MklPrimitiveFactory<T>::GetLRUCache();
lru_cache.SetOp(key, op);
#else
{
mutex_lock lock(primitive_creation_mu_);
auto& lru_cache = MklPrimitiveFactory<T>::GetLRUCache();
lru_cache.SetOp(key, op);
}
primitive_creation_cv_.notify_all();
#endif
}
static inline bool IsLegacyPlatform() {
#ifdef DNNL_AARCH64_USE_ACL
return false;
#else
static const bool is_legacy_platform =
(!port::TestCPUFeature(port::CPUFeature::AVX512F) &&
!port::TestCPUFeature(port::CPUFeature::AVX2));
return is_legacy_platform;
#endif
}
static inline bool IsPrimitiveMemOptEnabled() {
static const bool is_primitive_mem_opt_enabled = [] {
bool value = true;
TF_CHECK_OK(
ReadBoolFromEnvVar("TF_MKL_OPTIMIZE_PRIMITIVE_MEMUSE", true, &value));
return value;
}();
return is_primitive_mem_opt_enabled;
}
#ifdef DNNL_AARCH64_USE_ACL
static int IncrementCounter() {
static std::atomic_int counter{1};
return counter.fetch_add(1);
}
#endif
private:
static inline LRUCache<MklPrimitive>& GetLRUCache() {
static const int kCapacity = 1024;
#if !defined(DNNL_AARCH64_USE_ACL) || !defined(ENABLE_ONEDNN_OPENMP)
static thread_local LRUCache<MklPrimitive> lru_cache_(kCapacity);
#else
static LRUCache<MklPrimitive> lru_cache_(kCapacity);
#endif
return lru_cache_;
}
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex primitive_creation_mu_;
condition_variable primitive_creation_cv_;
#endif
};
class FactoryKeyCreator {
public:
FactoryKeyCreator() { key_.reserve(kMaxKeyLength); }
~FactoryKeyCreator() {}
void AddAsKey(const string& str) { Append(str); }
void AddAsKey(const dnnl::memory::dims& dims) {
for (unsigned int i = 0; i < dims.size(); i++) {
AddAsKey<int>(dims[i]);
}
}
template <typename T>
void AddAsKey(const T data) {
auto buffer = reinterpret_cast<const char*>(&data);
Append(StringPiece(buffer, sizeof(T)));
}
void AddAsKey(const void* data) {
auto buffer = reinterpret_cast<const char*>(&data);
Append(StringPiece(buffer, sizeof(data)));
}
string GetKey() { return key_; }
private:
string key_;
const char delimiter = 'x';
const int kMaxKeyLength = 256;
void Append(StringPiece s) {
key_.append(string(s));
key_.append(1, delimiter);
}
};
class MklReorderPrimitive : public MklPrimitive {
public:
explicit MklReorderPrimitive(const memory* from, const memory* to)
: MklPrimitive(engine(engine::kind::cpu, 0)) {
Setup(from, to);
}
~MklReorderPrimitive() {}
std::shared_ptr<primitive> GetPrimitive() { return context_.reorder_prim; }
void SetMemory(const memory* from, const memory* to) {
context_.src_mem->set_data_handle(from->get_data_handle());
context_.dst_mem->set_data_handle(to->get_data_handle());
}
std::shared_ptr<dnnl::stream> GetStream() { return stream_; }
private:
struct ReorderContext {
std::shared_ptr<dnnl::memory> src_mem;
std::shared_ptr<dnnl::memory> dst_mem;
std::shared_ptr<primitive> reorder_prim;
ReorderContext()
: src_mem(nullptr), dst_mem(nullptr), reorder_prim(nullptr) {}
} context_;
std::shared_ptr<dnnl::stream> stream_;
void Setup(const memory* from, const memory* to) {
context_.src_mem.reset(
new memory(from->get_desc(), cpu_engine_, DummyData));
context_.dst_mem.reset(new memory(to->get_desc(), cpu_engine_, DummyData));
context_.reorder_prim = std::make_shared<dnnl::reorder>(
reorder(*context_.src_mem, *context_.dst_mem));
stream_.reset(new stream(cpu_engine_));
}
};
template <typename T>
class MklReorderPrimitiveFactory : public MklPrimitiveFactory<T> {
public:
static MklReorderPrimitive* Get(const memory* from, const memory* to) {
auto reorderPrim = static_cast<MklReorderPrimitive*>(
MklReorderPrimitiveFactory<T>::GetInstance().GetReorder(from, to));
if (reorderPrim == nullptr) {
reorderPrim = new MklReorderPrimitive(from, to);
MklReorderPrimitiveFactory<T>::GetInstance().SetReorder(from, to,
reorderPrim);
}
reorderPrim->SetMemory(from, to);
return reorderPrim;
}
static MklReorderPrimitiveFactory& GetInstance() {
static MklReorderPrimitiveFactory instance_;
return instance_;
}
static string CreateKey(const memory* from, const memory* to) {
string prefix = "reorder";
FactoryKeyCreator key_creator;
auto const& from_desc = from->GET_MEMORY_DESC;
auto const& to_desc = to->GET_MEMORY_DESC;
memory::dims INIT_DIMS_FROM_DESC(from_dims, from_desc);
memory::dims INIT_DIMS_FROM_DESC(to_dims, to_desc);
auto from_strides = from_desc.GET_STRIDES;
auto from_inner_nblks = from_desc.GET_INNER_NBLKS;
auto from_inner_blks = from_desc.GET_INNER_BLKS;
auto from_inner_idxs = from_desc.GET_INNER_IDXS;
auto to_inner_nblks = to_desc.GET_INNER_NBLKS;
auto to_inner_blks = to_desc.GET_INNER_BLKS;
auto to_inner_idxs = to_desc.GET_INNER_IDXS;
auto to_strides = to_desc.GET_STRIDES;
#ifndef ENABLE_ONEDNN_V3
memory::dims from_inner_blks_1(from_inner_blks,
&from_inner_blks[from_inner_nblks]);
memory::dims from_inner_idxs_1(from_inner_idxs,
&from_inner_idxs[from_inner_nblks]);
memory::dims to_inner_blks_1(to_inner_blks, &to_inner_blks[to_inner_nblks]);
memory::dims to_inner_idxs_1(to_inner_idxs, &to_inner_idxs[to_inner_nblks]);
memory::dims from_strides_outer_blocks(from_strides,
&from_strides[from_desc.ndims]);
memory::dims to_strides_outer_blocks(to_strides,
&to_strides[to_desc.ndims]);
#endif
key_creator.AddAsKey(prefix);
#ifdef DNNL_AARCH64_USE_ACL
key_creator.AddAsKey(std::this_thread::get_id());
#endif
key_creator.AddAsKey(static_cast<int>(from_desc.GET_MEMORY_DESC_FLAGS));
key_creator.AddAsKey(static_cast<int>(from_inner_nblks));
key_creator.AddAsKey(GET_INNER_DIMS(from_inner_blks, from_inner_blks_1));
key_creator.AddAsKey(GET_INNER_DIMS(from_inner_idxs, from_inner_idxs_1));
key_creator.AddAsKey(static_cast<int>(from_desc.GET_DATA_TYPE));
key_creator.AddAsKey(from_dims);
key_creator.AddAsKey(
GET_STRIDES_DIMS(from_strides, from_strides_outer_blocks));
key_creator.AddAsKey(static_cast<int>(to_desc.GET_MEMORY_DESC_FLAGS));
key_creator.AddAsKey(static_cast<int>(to_inner_nblks));
key_creator.AddAsKey(GET_INNER_DIMS(to_inner_blks, to_inner_blks_1));
key_creator.AddAsKey(GET_INNER_DIMS(to_inner_idxs, to_inner_idxs_1));
key_creator.AddAsKey(static_cast<int>(to_desc.GET_DATA_TYPE));
key_creator.AddAsKey(to_dims);
key_creator.AddAsKey(GET_STRIDES_DIMS(to_strides, to_strides_outer_blocks));
return key_creator.GetKey();
}
private:
MklReorderPrimitiveFactory() {}
~MklReorderPrimitiveFactory() {}
MklPrimitive* GetReorder(const memory* from, const memory* to) {
string key = CreateKey(from, to);
return this->GetOp(key);
}
void SetReorder(const memory* from, const memory* to, MklPrimitive* op) {
string key = CreateKey(from, to);
this->SetOp(key, op);
}
};
template <typename T>
inline MklReorderPrimitive* FindOrCreateReorder(const memory* from,
const memory* to) {
CHECK_NOTNULL(from);
CHECK_NOTNULL(to);
MklReorderPrimitive* reorder_prim =
MklReorderPrimitiveFactory<T>::Get(from, to);
return reorder_prim;
}
inline bool IsConv1x1StrideNot1(memory::dims filter_dims,
memory::dims strides) {
if (filter_dims.size() != 4 || strides.size() != 2) return false;
return ((filter_dims[2] == 1) && (filter_dims[3] == 1) &&
((strides[0] != 1) || (strides[1] != 1)));
}
#undef ARE_MEMORY_DESCS_EQUAL
#undef CREATE_MEMORY_DESC_USING_STRIDES
#undef GET_DATA_TYPE
#undef GET_DIMS
#undef GET_INNER_BLKS
#undef GET_INNER_DIMS
#undef GET_INNER_IDXS
#undef GET_INNER_NBLKS
#undef GET_MEMORY_DESC
#undef GET_MEMORY_DESC_FLAGS
#undef GET_MEMORY_DESC_USING_MKLDNN_SHAPE_PTR
#undef GET_NDIMS
#undef GET_STRIDES
#undef GET_STRIDES_DIMS
#undef INIT_DIMS_FROM_DESC
#undef MEMORY_DESC
}
#define REGISTER_TEST_FLOAT32(TEST) REGISTER_TEST(TEST, DT_FLOAT, Float32Input);
#define REGISTER_TEST_BFLOAT16(TEST) \
REGISTER_TEST(TEST, DT_BFLOAT16, BFloat16Input);
#define REGISTER_TEST_ALL_TYPES(TEST) \
REGISTER_TEST_FLOAT32(TEST); \
REGISTER_TEST_BFLOAT16(TEST);
#else
#define REGISTER_TEST_ALL_TYPES(TEST) REGISTER_TEST_FLOAT32(TEST);
#endif
#endif | #ifdef INTEL_MKL
#include "tensorflow/core/util/mkl_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(MklUtilTest, MklDnnTfShape) {
auto cpu_engine = engine(engine::kind::cpu, 0);
MklDnnData<float> a(&cpu_engine);
const int N = 1, C = 2, H = 3, W = 4;
memory::dims a_dims = {N, C, H, W};
MklDnnShape a_mkldnn_shape;
a_mkldnn_shape.SetMklTensor(true);
a_mkldnn_shape.SetTfLayout(a_dims.size(), a_dims,
MklTensorFormat::FORMAT_NCHW);
TensorShape a_tf_shape_nchw({N, C, H, W});
TensorShape a_tf_shape_nhwc({N, H, W, C});
TensorShape a_mkldnn_tf_shape = a_mkldnn_shape.GetTfShape();
EXPECT_EQ(a_tf_shape_nchw, a_mkldnn_tf_shape);
EXPECT_NE(a_tf_shape_nhwc, a_mkldnn_tf_shape);
memory::dims b_dims = {N, C, H, W};
MklDnnShape b_mkldnn_shape;
b_mkldnn_shape.SetMklTensor(true);
b_mkldnn_shape.SetTfLayout(b_dims.size(), b_dims,
MklTensorFormat::FORMAT_NHWC);
TensorShape b_tf_shape_nhwc({N, H, W, C});
TensorShape b_tf_shape_nchw({N, C, H, W});
TensorShape b_mkldnn_tf_shape = b_mkldnn_shape.GetTfShape();
EXPECT_EQ(b_tf_shape_nhwc, b_mkldnn_tf_shape);
EXPECT_NE(b_tf_shape_nchw, b_mkldnn_tf_shape);
}
TEST(MklUtilTest, LRUCacheTest) {
size_t capacity = 100;
size_t num_objects = capacity + 10;
LRUCache<int> lru_cache(capacity);
for (int k = 0; k < num_objects; k++) {
lru_cache.SetOp(std::to_string(k), new int(k));
}
for (int k = 0; k < num_objects - capacity; ++k) {
EXPECT_EQ(nullptr, lru_cache.GetOp(std::to_string(k)));
}
for (int k = num_objects - capacity; k < num_objects; ++k) {
int* int_ptr = lru_cache.GetOp(std::to_string(k));
EXPECT_NE(nullptr, int_ptr);
EXPECT_EQ(*int_ptr, k);
}
lru_cache.Clear();
for (int k = 0; k < num_objects; ++k) {
EXPECT_EQ(nullptr, lru_cache.GetOp(std::to_string(k)));
}
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/mkl_util.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/mkl_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.